1 | n/a | # Defines classes that provide synchronization objects. Note that use of |
---|
2 | n/a | # this module requires that your Python support threads. |
---|
3 | n/a | # |
---|
4 | n/a | # condition(lock=None) # a POSIX-like condition-variable object |
---|
5 | n/a | # barrier(n) # an n-thread barrier |
---|
6 | n/a | # event() # an event object |
---|
7 | n/a | # semaphore(n=1) # a semaphore object, with initial count n |
---|
8 | n/a | # mrsw() # a multiple-reader single-writer lock |
---|
9 | n/a | # |
---|
10 | n/a | # CONDITIONS |
---|
11 | n/a | # |
---|
12 | n/a | # A condition object is created via |
---|
13 | n/a | # import this_module |
---|
14 | n/a | # your_condition_object = this_module.condition(lock=None) |
---|
15 | n/a | # |
---|
16 | n/a | # As explained below, a condition object has a lock associated with it, |
---|
17 | n/a | # used in the protocol to protect condition data. You can specify a |
---|
18 | n/a | # lock to use in the constructor, else the constructor will allocate |
---|
19 | n/a | # an anonymous lock for you. Specifying a lock explicitly can be useful |
---|
20 | n/a | # when more than one condition keys off the same set of shared data. |
---|
21 | n/a | # |
---|
22 | n/a | # Methods: |
---|
23 | n/a | # .acquire() |
---|
24 | n/a | # acquire the lock associated with the condition |
---|
25 | n/a | # .release() |
---|
26 | n/a | # release the lock associated with the condition |
---|
27 | n/a | # .wait() |
---|
28 | n/a | # block the thread until such time as some other thread does a |
---|
29 | n/a | # .signal or .broadcast on the same condition, and release the |
---|
30 | n/a | # lock associated with the condition. The lock associated with |
---|
31 | n/a | # the condition MUST be in the acquired state at the time |
---|
32 | n/a | # .wait is invoked. |
---|
33 | n/a | # .signal() |
---|
34 | n/a | # wake up exactly one thread (if any) that previously did a .wait |
---|
35 | n/a | # on the condition; that thread will awaken with the lock associated |
---|
36 | n/a | # with the condition in the acquired state. If no threads are |
---|
37 | n/a | # .wait'ing, this is a nop. If more than one thread is .wait'ing on |
---|
38 | n/a | # the condition, any of them may be awakened. |
---|
39 | n/a | # .broadcast() |
---|
40 | n/a | # wake up all threads (if any) that are .wait'ing on the condition; |
---|
41 | n/a | # the threads are woken up serially, each with the lock in the |
---|
42 | n/a | # acquired state, so should .release() as soon as possible. If no |
---|
43 | n/a | # threads are .wait'ing, this is a nop. |
---|
44 | n/a | # |
---|
45 | n/a | # Note that if a thread does a .wait *while* a signal/broadcast is |
---|
46 | n/a | # in progress, it's guaranteeed to block until a subsequent |
---|
47 | n/a | # signal/broadcast. |
---|
48 | n/a | # |
---|
49 | n/a | # Secret feature: `broadcast' actually takes an integer argument, |
---|
50 | n/a | # and will wake up exactly that many waiting threads (or the total |
---|
51 | n/a | # number waiting, if that's less). Use of this is dubious, though, |
---|
52 | n/a | # and probably won't be supported if this form of condition is |
---|
53 | n/a | # reimplemented in C. |
---|
54 | n/a | # |
---|
55 | n/a | # DIFFERENCES FROM POSIX |
---|
56 | n/a | # |
---|
57 | n/a | # + A separate mutex is not needed to guard condition data. Instead, a |
---|
58 | n/a | # condition object can (must) be .acquire'ed and .release'ed directly. |
---|
59 | n/a | # This eliminates a common error in using POSIX conditions. |
---|
60 | n/a | # |
---|
61 | n/a | # + Because of implementation difficulties, a POSIX `signal' wakes up |
---|
62 | n/a | # _at least_ one .wait'ing thread. Race conditions make it difficult |
---|
63 | n/a | # to stop that. This implementation guarantees to wake up only one, |
---|
64 | n/a | # but you probably shouldn't rely on that. |
---|
65 | n/a | # |
---|
66 | n/a | # PROTOCOL |
---|
67 | n/a | # |
---|
68 | n/a | # Condition objects are used to block threads until "some condition" is |
---|
69 | n/a | # true. E.g., a thread may wish to wait until a producer pumps out data |
---|
70 | n/a | # for it to consume, or a server may wish to wait until someone requests |
---|
71 | n/a | # its services, or perhaps a whole bunch of threads want to wait until a |
---|
72 | n/a | # preceding pass over the data is complete. Early models for conditions |
---|
73 | n/a | # relied on some other thread figuring out when a blocked thread's |
---|
74 | n/a | # condition was true, and made the other thread responsible both for |
---|
75 | n/a | # waking up the blocked thread and guaranteeing that it woke up with all |
---|
76 | n/a | # data in a correct state. This proved to be very delicate in practice, |
---|
77 | n/a | # and gave conditions a bad name in some circles. |
---|
78 | n/a | # |
---|
79 | n/a | # The POSIX model addresses these problems by making a thread responsible |
---|
80 | n/a | # for ensuring that its own state is correct when it wakes, and relies |
---|
81 | n/a | # on a rigid protocol to make this easy; so long as you stick to the |
---|
82 | n/a | # protocol, POSIX conditions are easy to "get right": |
---|
83 | n/a | # |
---|
84 | n/a | # A) The thread that's waiting for some arbitrarily-complex condition |
---|
85 | n/a | # (ACC) to become true does: |
---|
86 | n/a | # |
---|
87 | n/a | # condition.acquire() |
---|
88 | n/a | # while not (code to evaluate the ACC): |
---|
89 | n/a | # condition.wait() |
---|
90 | n/a | # # That blocks the thread, *and* releases the lock. When a |
---|
91 | n/a | # # condition.signal() happens, it will wake up some thread that |
---|
92 | n/a | # # did a .wait, *and* acquire the lock again before .wait |
---|
93 | n/a | # # returns. |
---|
94 | n/a | # # |
---|
95 | n/a | # # Because the lock is acquired at this point, the state used |
---|
96 | n/a | # # in evaluating the ACC is frozen, so it's safe to go back & |
---|
97 | n/a | # # reevaluate the ACC. |
---|
98 | n/a | # |
---|
99 | n/a | # # At this point, ACC is true, and the thread has the condition |
---|
100 | n/a | # # locked. |
---|
101 | n/a | # # So code here can safely muck with the shared state that |
---|
102 | n/a | # # went into evaluating the ACC -- if it wants to. |
---|
103 | n/a | # # When done mucking with the shared state, do |
---|
104 | n/a | # condition.release() |
---|
105 | n/a | # |
---|
106 | n/a | # B) Threads that are mucking with shared state that may affect the |
---|
107 | n/a | # ACC do: |
---|
108 | n/a | # |
---|
109 | n/a | # condition.acquire() |
---|
110 | n/a | # # muck with shared state |
---|
111 | n/a | # condition.release() |
---|
112 | n/a | # if it's possible that ACC is true now: |
---|
113 | n/a | # condition.signal() # or .broadcast() |
---|
114 | n/a | # |
---|
115 | n/a | # Note: You may prefer to put the "if" clause before the release(). |
---|
116 | n/a | # That's fine, but do note that anyone waiting on the signal will |
---|
117 | n/a | # stay blocked until the release() is done (since acquiring the |
---|
118 | n/a | # condition is part of what .wait() does before it returns). |
---|
119 | n/a | # |
---|
120 | n/a | # TRICK OF THE TRADE |
---|
121 | n/a | # |
---|
122 | n/a | # With simpler forms of conditions, it can be impossible to know when |
---|
123 | n/a | # a thread that's supposed to do a .wait has actually done it. But |
---|
124 | n/a | # because this form of condition releases a lock as _part_ of doing a |
---|
125 | n/a | # wait, the state of that lock can be used to guarantee it. |
---|
126 | n/a | # |
---|
127 | n/a | # E.g., suppose thread A spawns thread B and later wants to wait for B to |
---|
128 | n/a | # complete: |
---|
129 | n/a | # |
---|
130 | n/a | # In A: In B: |
---|
131 | n/a | # |
---|
132 | n/a | # B_done = condition() ... do work ... |
---|
133 | n/a | # B_done.acquire() B_done.acquire(); B_done.release() |
---|
134 | n/a | # spawn B B_done.signal() |
---|
135 | n/a | # ... some time later ... ... and B exits ... |
---|
136 | n/a | # B_done.wait() |
---|
137 | n/a | # |
---|
138 | n/a | # Because B_done was in the acquire'd state at the time B was spawned, |
---|
139 | n/a | # B's attempt to acquire B_done can't succeed until A has done its |
---|
140 | n/a | # B_done.wait() (which releases B_done). So B's B_done.signal() is |
---|
141 | n/a | # guaranteed to be seen by the .wait(). Without the lock trick, B |
---|
142 | n/a | # may signal before A .waits, and then A would wait forever. |
---|
143 | n/a | # |
---|
144 | n/a | # BARRIERS |
---|
145 | n/a | # |
---|
146 | n/a | # A barrier object is created via |
---|
147 | n/a | # import this_module |
---|
148 | n/a | # your_barrier = this_module.barrier(num_threads) |
---|
149 | n/a | # |
---|
150 | n/a | # Methods: |
---|
151 | n/a | # .enter() |
---|
152 | n/a | # the thread blocks until num_threads threads in all have done |
---|
153 | n/a | # .enter(). Then the num_threads threads that .enter'ed resume, |
---|
154 | n/a | # and the barrier resets to capture the next num_threads threads |
---|
155 | n/a | # that .enter it. |
---|
156 | n/a | # |
---|
157 | n/a | # EVENTS |
---|
158 | n/a | # |
---|
159 | n/a | # An event object is created via |
---|
160 | n/a | # import this_module |
---|
161 | n/a | # your_event = this_module.event() |
---|
162 | n/a | # |
---|
163 | n/a | # An event has two states, `posted' and `cleared'. An event is |
---|
164 | n/a | # created in the cleared state. |
---|
165 | n/a | # |
---|
166 | n/a | # Methods: |
---|
167 | n/a | # |
---|
168 | n/a | # .post() |
---|
169 | n/a | # Put the event in the posted state, and resume all threads |
---|
170 | n/a | # .wait'ing on the event (if any). |
---|
171 | n/a | # |
---|
172 | n/a | # .clear() |
---|
173 | n/a | # Put the event in the cleared state. |
---|
174 | n/a | # |
---|
175 | n/a | # .is_posted() |
---|
176 | n/a | # Returns 0 if the event is in the cleared state, or 1 if the event |
---|
177 | n/a | # is in the posted state. |
---|
178 | n/a | # |
---|
179 | n/a | # .wait() |
---|
180 | n/a | # If the event is in the posted state, returns immediately. |
---|
181 | n/a | # If the event is in the cleared state, blocks the calling thread |
---|
182 | n/a | # until the event is .post'ed by another thread. |
---|
183 | n/a | # |
---|
184 | n/a | # Note that an event, once posted, remains posted until explicitly |
---|
185 | n/a | # cleared. Relative to conditions, this is both the strength & weakness |
---|
186 | n/a | # of events. It's a strength because the .post'ing thread doesn't have to |
---|
187 | n/a | # worry about whether the threads it's trying to communicate with have |
---|
188 | n/a | # already done a .wait (a condition .signal is seen only by threads that |
---|
189 | n/a | # do a .wait _prior_ to the .signal; a .signal does not persist). But |
---|
190 | n/a | # it's a weakness because .clear'ing an event is error-prone: it's easy |
---|
191 | n/a | # to mistakenly .clear an event before all the threads you intended to |
---|
192 | n/a | # see the event get around to .wait'ing on it. But so long as you don't |
---|
193 | n/a | # need to .clear an event, events are easy to use safely. |
---|
194 | n/a | # |
---|
195 | n/a | # SEMAPHORES |
---|
196 | n/a | # |
---|
197 | n/a | # A semaphore object is created via |
---|
198 | n/a | # import this_module |
---|
199 | n/a | # your_semaphore = this_module.semaphore(count=1) |
---|
200 | n/a | # |
---|
201 | n/a | # A semaphore has an integer count associated with it. The initial value |
---|
202 | n/a | # of the count is specified by the optional argument (which defaults to |
---|
203 | n/a | # 1) passed to the semaphore constructor. |
---|
204 | n/a | # |
---|
205 | n/a | # Methods: |
---|
206 | n/a | # |
---|
207 | n/a | # .p() |
---|
208 | n/a | # If the semaphore's count is greater than 0, decrements the count |
---|
209 | n/a | # by 1 and returns. |
---|
210 | n/a | # Else if the semaphore's count is 0, blocks the calling thread |
---|
211 | n/a | # until a subsequent .v() increases the count. When that happens, |
---|
212 | n/a | # the count will be decremented by 1 and the calling thread resumed. |
---|
213 | n/a | # |
---|
214 | n/a | # .v() |
---|
215 | n/a | # Increments the semaphore's count by 1, and wakes up a thread (if |
---|
216 | n/a | # any) blocked by a .p(). It's an (detected) error for a .v() to |
---|
217 | n/a | # increase the semaphore's count to a value larger than the initial |
---|
218 | n/a | # count. |
---|
219 | n/a | # |
---|
220 | n/a | # MULTIPLE-READER SINGLE-WRITER LOCKS |
---|
221 | n/a | # |
---|
222 | n/a | # A mrsw lock is created via |
---|
223 | n/a | # import this_module |
---|
224 | n/a | # your_mrsw_lock = this_module.mrsw() |
---|
225 | n/a | # |
---|
226 | n/a | # This kind of lock is often useful with complex shared data structures. |
---|
227 | n/a | # The object lets any number of "readers" proceed, so long as no thread |
---|
228 | n/a | # wishes to "write". When a (one or more) thread declares its intention |
---|
229 | n/a | # to "write" (e.g., to update a shared structure), all current readers |
---|
230 | n/a | # are allowed to finish, and then a writer gets exclusive access; all |
---|
231 | n/a | # other readers & writers are blocked until the current writer completes. |
---|
232 | n/a | # Finally, if some thread is waiting to write and another is waiting to |
---|
233 | n/a | # read, the writer takes precedence. |
---|
234 | n/a | # |
---|
235 | n/a | # Methods: |
---|
236 | n/a | # |
---|
237 | n/a | # .read_in() |
---|
238 | n/a | # If no thread is writing or waiting to write, returns immediately. |
---|
239 | n/a | # Else blocks until no thread is writing or waiting to write. So |
---|
240 | n/a | # long as some thread has completed a .read_in but not a .read_out, |
---|
241 | n/a | # writers are blocked. |
---|
242 | n/a | # |
---|
243 | n/a | # .read_out() |
---|
244 | n/a | # Use sometime after a .read_in to declare that the thread is done |
---|
245 | n/a | # reading. When all threads complete reading, a writer can proceed. |
---|
246 | n/a | # |
---|
247 | n/a | # .write_in() |
---|
248 | n/a | # If no thread is writing (has completed a .write_in, but hasn't yet |
---|
249 | n/a | # done a .write_out) or reading (similarly), returns immediately. |
---|
250 | n/a | # Else blocks the calling thread, and threads waiting to read, until |
---|
251 | n/a | # the current writer completes writing or all the current readers |
---|
252 | n/a | # complete reading; if then more than one thread is waiting to |
---|
253 | n/a | # write, one of them is allowed to proceed, but which one is not |
---|
254 | n/a | # specified. |
---|
255 | n/a | # |
---|
256 | n/a | # .write_out() |
---|
257 | n/a | # Use sometime after a .write_in to declare that the thread is done |
---|
258 | n/a | # writing. Then if some other thread is waiting to write, it's |
---|
259 | n/a | # allowed to proceed. Else all threads (if any) waiting to read are |
---|
260 | n/a | # allowed to proceed. |
---|
261 | n/a | # |
---|
262 | n/a | # .write_to_read() |
---|
263 | n/a | # Use instead of a .write_in to declare that the thread is done |
---|
264 | n/a | # writing but wants to continue reading without other writers |
---|
265 | n/a | # intervening. If there are other threads waiting to write, they |
---|
266 | n/a | # are allowed to proceed only if the current thread calls |
---|
267 | n/a | # .read_out; threads waiting to read are only allowed to proceed |
---|
268 | n/a | # if there are are no threads waiting to write. (This is a |
---|
269 | n/a | # weakness of the interface!) |
---|
270 | n/a | |
---|
271 | n/a | import _thread as thread |
---|
272 | n/a | |
---|
273 | n/a | class condition: |
---|
274 | n/a | def __init__(self, lock=None): |
---|
275 | n/a | # the lock actually used by .acquire() and .release() |
---|
276 | n/a | if lock is None: |
---|
277 | n/a | self.mutex = thread.allocate_lock() |
---|
278 | n/a | else: |
---|
279 | n/a | if hasattr(lock, 'acquire') and \ |
---|
280 | n/a | hasattr(lock, 'release'): |
---|
281 | n/a | self.mutex = lock |
---|
282 | n/a | else: |
---|
283 | n/a | raise TypeError('condition constructor requires ' \ |
---|
284 | n/a | 'a lock argument') |
---|
285 | n/a | |
---|
286 | n/a | # lock used to block threads until a signal |
---|
287 | n/a | self.checkout = thread.allocate_lock() |
---|
288 | n/a | self.checkout.acquire() |
---|
289 | n/a | |
---|
290 | n/a | # internal critical-section lock, & the data it protects |
---|
291 | n/a | self.idlock = thread.allocate_lock() |
---|
292 | n/a | self.id = 0 |
---|
293 | n/a | self.waiting = 0 # num waiters subject to current release |
---|
294 | n/a | self.pending = 0 # num waiters awaiting next signal |
---|
295 | n/a | self.torelease = 0 # num waiters to release |
---|
296 | n/a | self.releasing = 0 # 1 iff release is in progress |
---|
297 | n/a | |
---|
298 | n/a | def acquire(self): |
---|
299 | n/a | self.mutex.acquire() |
---|
300 | n/a | |
---|
301 | n/a | def release(self): |
---|
302 | n/a | self.mutex.release() |
---|
303 | n/a | |
---|
304 | n/a | def wait(self): |
---|
305 | n/a | mutex, checkout, idlock = self.mutex, self.checkout, self.idlock |
---|
306 | n/a | if not mutex.locked(): |
---|
307 | n/a | raise ValueError("condition must be .acquire'd when .wait() invoked") |
---|
308 | n/a | |
---|
309 | n/a | idlock.acquire() |
---|
310 | n/a | myid = self.id |
---|
311 | n/a | self.pending = self.pending + 1 |
---|
312 | n/a | idlock.release() |
---|
313 | n/a | |
---|
314 | n/a | mutex.release() |
---|
315 | n/a | |
---|
316 | n/a | while 1: |
---|
317 | n/a | checkout.acquire(); idlock.acquire() |
---|
318 | n/a | if myid < self.id: |
---|
319 | n/a | break |
---|
320 | n/a | checkout.release(); idlock.release() |
---|
321 | n/a | |
---|
322 | n/a | self.waiting = self.waiting - 1 |
---|
323 | n/a | self.torelease = self.torelease - 1 |
---|
324 | n/a | if self.torelease: |
---|
325 | n/a | checkout.release() |
---|
326 | n/a | else: |
---|
327 | n/a | self.releasing = 0 |
---|
328 | n/a | if self.waiting == self.pending == 0: |
---|
329 | n/a | self.id = 0 |
---|
330 | n/a | idlock.release() |
---|
331 | n/a | mutex.acquire() |
---|
332 | n/a | |
---|
333 | n/a | def signal(self): |
---|
334 | n/a | self.broadcast(1) |
---|
335 | n/a | |
---|
336 | n/a | def broadcast(self, num = -1): |
---|
337 | n/a | if num < -1: |
---|
338 | n/a | raise ValueError('.broadcast called with num %r' % (num,)) |
---|
339 | n/a | if num == 0: |
---|
340 | n/a | return |
---|
341 | n/a | self.idlock.acquire() |
---|
342 | n/a | if self.pending: |
---|
343 | n/a | self.waiting = self.waiting + self.pending |
---|
344 | n/a | self.pending = 0 |
---|
345 | n/a | self.id = self.id + 1 |
---|
346 | n/a | if num == -1: |
---|
347 | n/a | self.torelease = self.waiting |
---|
348 | n/a | else: |
---|
349 | n/a | self.torelease = min( self.waiting, |
---|
350 | n/a | self.torelease + num ) |
---|
351 | n/a | if self.torelease and not self.releasing: |
---|
352 | n/a | self.releasing = 1 |
---|
353 | n/a | self.checkout.release() |
---|
354 | n/a | self.idlock.release() |
---|
355 | n/a | |
---|
356 | n/a | class barrier: |
---|
357 | n/a | def __init__(self, n): |
---|
358 | n/a | self.n = n |
---|
359 | n/a | self.togo = n |
---|
360 | n/a | self.full = condition() |
---|
361 | n/a | |
---|
362 | n/a | def enter(self): |
---|
363 | n/a | full = self.full |
---|
364 | n/a | full.acquire() |
---|
365 | n/a | self.togo = self.togo - 1 |
---|
366 | n/a | if self.togo: |
---|
367 | n/a | full.wait() |
---|
368 | n/a | else: |
---|
369 | n/a | self.togo = self.n |
---|
370 | n/a | full.broadcast() |
---|
371 | n/a | full.release() |
---|
372 | n/a | |
---|
373 | n/a | class event: |
---|
374 | n/a | def __init__(self): |
---|
375 | n/a | self.state = 0 |
---|
376 | n/a | self.posted = condition() |
---|
377 | n/a | |
---|
378 | n/a | def post(self): |
---|
379 | n/a | self.posted.acquire() |
---|
380 | n/a | self.state = 1 |
---|
381 | n/a | self.posted.broadcast() |
---|
382 | n/a | self.posted.release() |
---|
383 | n/a | |
---|
384 | n/a | def clear(self): |
---|
385 | n/a | self.posted.acquire() |
---|
386 | n/a | self.state = 0 |
---|
387 | n/a | self.posted.release() |
---|
388 | n/a | |
---|
389 | n/a | def is_posted(self): |
---|
390 | n/a | self.posted.acquire() |
---|
391 | n/a | answer = self.state |
---|
392 | n/a | self.posted.release() |
---|
393 | n/a | return answer |
---|
394 | n/a | |
---|
395 | n/a | def wait(self): |
---|
396 | n/a | self.posted.acquire() |
---|
397 | n/a | if not self.state: |
---|
398 | n/a | self.posted.wait() |
---|
399 | n/a | self.posted.release() |
---|
400 | n/a | |
---|
401 | n/a | class semaphore: |
---|
402 | n/a | def __init__(self, count=1): |
---|
403 | n/a | if count <= 0: |
---|
404 | n/a | raise ValueError('semaphore count %d; must be >= 1' % count) |
---|
405 | n/a | self.count = count |
---|
406 | n/a | self.maxcount = count |
---|
407 | n/a | self.nonzero = condition() |
---|
408 | n/a | |
---|
409 | n/a | def p(self): |
---|
410 | n/a | self.nonzero.acquire() |
---|
411 | n/a | while self.count == 0: |
---|
412 | n/a | self.nonzero.wait() |
---|
413 | n/a | self.count = self.count - 1 |
---|
414 | n/a | self.nonzero.release() |
---|
415 | n/a | |
---|
416 | n/a | def v(self): |
---|
417 | n/a | self.nonzero.acquire() |
---|
418 | n/a | if self.count == self.maxcount: |
---|
419 | n/a | raise ValueError('.v() tried to raise semaphore count above ' \ |
---|
420 | n/a | 'initial value %r' % self.maxcount) |
---|
421 | n/a | self.count = self.count + 1 |
---|
422 | n/a | self.nonzero.signal() |
---|
423 | n/a | self.nonzero.release() |
---|
424 | n/a | |
---|
425 | n/a | class mrsw: |
---|
426 | n/a | def __init__(self): |
---|
427 | n/a | # critical-section lock & the data it protects |
---|
428 | n/a | self.rwOK = thread.allocate_lock() |
---|
429 | n/a | self.nr = 0 # number readers actively reading (not just waiting) |
---|
430 | n/a | self.nw = 0 # number writers either waiting to write or writing |
---|
431 | n/a | self.writing = 0 # 1 iff some thread is writing |
---|
432 | n/a | |
---|
433 | n/a | # conditions |
---|
434 | n/a | self.readOK = condition(self.rwOK) # OK to unblock readers |
---|
435 | n/a | self.writeOK = condition(self.rwOK) # OK to unblock writers |
---|
436 | n/a | |
---|
437 | n/a | def read_in(self): |
---|
438 | n/a | self.rwOK.acquire() |
---|
439 | n/a | while self.nw: |
---|
440 | n/a | self.readOK.wait() |
---|
441 | n/a | self.nr = self.nr + 1 |
---|
442 | n/a | self.rwOK.release() |
---|
443 | n/a | |
---|
444 | n/a | def read_out(self): |
---|
445 | n/a | self.rwOK.acquire() |
---|
446 | n/a | if self.nr <= 0: |
---|
447 | n/a | raise ValueError('.read_out() invoked without an active reader') |
---|
448 | n/a | self.nr = self.nr - 1 |
---|
449 | n/a | if self.nr == 0: |
---|
450 | n/a | self.writeOK.signal() |
---|
451 | n/a | self.rwOK.release() |
---|
452 | n/a | |
---|
453 | n/a | def write_in(self): |
---|
454 | n/a | self.rwOK.acquire() |
---|
455 | n/a | self.nw = self.nw + 1 |
---|
456 | n/a | while self.writing or self.nr: |
---|
457 | n/a | self.writeOK.wait() |
---|
458 | n/a | self.writing = 1 |
---|
459 | n/a | self.rwOK.release() |
---|
460 | n/a | |
---|
461 | n/a | def write_out(self): |
---|
462 | n/a | self.rwOK.acquire() |
---|
463 | n/a | if not self.writing: |
---|
464 | n/a | raise ValueError('.write_out() invoked without an active writer') |
---|
465 | n/a | self.writing = 0 |
---|
466 | n/a | self.nw = self.nw - 1 |
---|
467 | n/a | if self.nw: |
---|
468 | n/a | self.writeOK.signal() |
---|
469 | n/a | else: |
---|
470 | n/a | self.readOK.broadcast() |
---|
471 | n/a | self.rwOK.release() |
---|
472 | n/a | |
---|
473 | n/a | def write_to_read(self): |
---|
474 | n/a | self.rwOK.acquire() |
---|
475 | n/a | if not self.writing: |
---|
476 | n/a | raise ValueError('.write_to_read() invoked without an active writer') |
---|
477 | n/a | self.writing = 0 |
---|
478 | n/a | self.nw = self.nw - 1 |
---|
479 | n/a | self.nr = self.nr + 1 |
---|
480 | n/a | if not self.nw: |
---|
481 | n/a | self.readOK.broadcast() |
---|
482 | n/a | self.rwOK.release() |
---|
483 | n/a | |
---|
484 | n/a | # The rest of the file is a test case, that runs a number of parallelized |
---|
485 | n/a | # quicksorts in parallel. If it works, you'll get about 600 lines of |
---|
486 | n/a | # tracing output, with a line like |
---|
487 | n/a | # test passed! 209 threads created in all |
---|
488 | n/a | # as the last line. The content and order of preceding lines will |
---|
489 | n/a | # vary across runs. |
---|
490 | n/a | |
---|
491 | n/a | def _new_thread(func, *args): |
---|
492 | n/a | global TID |
---|
493 | n/a | tid.acquire(); id = TID = TID+1; tid.release() |
---|
494 | n/a | io.acquire(); alive.append(id); \ |
---|
495 | n/a | print('starting thread', id, '--', len(alive), 'alive'); \ |
---|
496 | n/a | io.release() |
---|
497 | n/a | thread.start_new_thread( func, (id,) + args ) |
---|
498 | n/a | |
---|
499 | n/a | def _qsort(tid, a, l, r, finished): |
---|
500 | n/a | # sort a[l:r]; post finished when done |
---|
501 | n/a | io.acquire(); print('thread', tid, 'qsort', l, r); io.release() |
---|
502 | n/a | if r-l > 1: |
---|
503 | n/a | pivot = a[l] |
---|
504 | n/a | j = l+1 # make a[l:j] <= pivot, and a[j:r] > pivot |
---|
505 | n/a | for i in range(j, r): |
---|
506 | n/a | if a[i] <= pivot: |
---|
507 | n/a | a[j], a[i] = a[i], a[j] |
---|
508 | n/a | j = j + 1 |
---|
509 | n/a | a[l], a[j-1] = a[j-1], pivot |
---|
510 | n/a | |
---|
511 | n/a | l_subarray_sorted = event() |
---|
512 | n/a | r_subarray_sorted = event() |
---|
513 | n/a | _new_thread(_qsort, a, l, j-1, l_subarray_sorted) |
---|
514 | n/a | _new_thread(_qsort, a, j, r, r_subarray_sorted) |
---|
515 | n/a | l_subarray_sorted.wait() |
---|
516 | n/a | r_subarray_sorted.wait() |
---|
517 | n/a | |
---|
518 | n/a | io.acquire(); print('thread', tid, 'qsort done'); \ |
---|
519 | n/a | alive.remove(tid); io.release() |
---|
520 | n/a | finished.post() |
---|
521 | n/a | |
---|
522 | n/a | def _randarray(tid, a, finished): |
---|
523 | n/a | io.acquire(); print('thread', tid, 'randomizing array'); \ |
---|
524 | n/a | io.release() |
---|
525 | n/a | for i in range(1, len(a)): |
---|
526 | n/a | wh.acquire(); j = randint(0,i); wh.release() |
---|
527 | n/a | a[i], a[j] = a[j], a[i] |
---|
528 | n/a | io.acquire(); print('thread', tid, 'randomizing done'); \ |
---|
529 | n/a | alive.remove(tid); io.release() |
---|
530 | n/a | finished.post() |
---|
531 | n/a | |
---|
532 | n/a | def _check_sort(a): |
---|
533 | n/a | if a != range(len(a)): |
---|
534 | n/a | raise ValueError('a not sorted', a) |
---|
535 | n/a | |
---|
536 | n/a | def _run_one_sort(tid, a, bar, done): |
---|
537 | n/a | # randomize a, and quicksort it |
---|
538 | n/a | # for variety, all the threads running this enter a barrier |
---|
539 | n/a | # at the end, and post `done' after the barrier exits |
---|
540 | n/a | io.acquire(); print('thread', tid, 'randomizing', a); \ |
---|
541 | n/a | io.release() |
---|
542 | n/a | finished = event() |
---|
543 | n/a | _new_thread(_randarray, a, finished) |
---|
544 | n/a | finished.wait() |
---|
545 | n/a | |
---|
546 | n/a | io.acquire(); print('thread', tid, 'sorting', a); io.release() |
---|
547 | n/a | finished.clear() |
---|
548 | n/a | _new_thread(_qsort, a, 0, len(a), finished) |
---|
549 | n/a | finished.wait() |
---|
550 | n/a | _check_sort(a) |
---|
551 | n/a | |
---|
552 | n/a | io.acquire(); print('thread', tid, 'entering barrier'); \ |
---|
553 | n/a | io.release() |
---|
554 | n/a | bar.enter() |
---|
555 | n/a | io.acquire(); print('thread', tid, 'leaving barrier'); \ |
---|
556 | n/a | io.release() |
---|
557 | n/a | io.acquire(); alive.remove(tid); io.release() |
---|
558 | n/a | bar.enter() # make sure they've all removed themselves from alive |
---|
559 | n/a | ## before 'done' is posted |
---|
560 | n/a | bar.enter() # just to be cruel |
---|
561 | n/a | done.post() |
---|
562 | n/a | |
---|
563 | n/a | def test(): |
---|
564 | n/a | global TID, tid, io, wh, randint, alive |
---|
565 | n/a | import random |
---|
566 | n/a | randint = random.randint |
---|
567 | n/a | |
---|
568 | n/a | TID = 0 # thread ID (1, 2, ...) |
---|
569 | n/a | tid = thread.allocate_lock() # for changing TID |
---|
570 | n/a | io = thread.allocate_lock() # for printing, and 'alive' |
---|
571 | n/a | wh = thread.allocate_lock() # for calls to random |
---|
572 | n/a | alive = [] # IDs of active threads |
---|
573 | n/a | |
---|
574 | n/a | NSORTS = 5 |
---|
575 | n/a | arrays = [] |
---|
576 | n/a | for i in range(NSORTS): |
---|
577 | n/a | arrays.append( range( (i+1)*10 ) ) |
---|
578 | n/a | |
---|
579 | n/a | bar = barrier(NSORTS) |
---|
580 | n/a | finished = event() |
---|
581 | n/a | for i in range(NSORTS): |
---|
582 | n/a | _new_thread(_run_one_sort, arrays[i], bar, finished) |
---|
583 | n/a | finished.wait() |
---|
584 | n/a | |
---|
585 | n/a | print('all threads done, and checking results ...') |
---|
586 | n/a | if alive: |
---|
587 | n/a | raise ValueError('threads still alive at end', alive) |
---|
588 | n/a | for i in range(NSORTS): |
---|
589 | n/a | a = arrays[i] |
---|
590 | n/a | if len(a) != (i+1)*10: |
---|
591 | n/a | raise ValueError('length of array', i, 'screwed up') |
---|
592 | n/a | _check_sort(a) |
---|
593 | n/a | |
---|
594 | n/a | print('test passed!', TID, 'threads created in all') |
---|
595 | n/a | |
---|
596 | n/a | if __name__ == '__main__': |
---|
597 | n/a | test() |
---|
598 | n/a | |
---|
599 | n/a | # end of module |
---|