»Core Development>Code coverage>Modules/_heapqmodule.c

Python code coverage for Modules/_heapqmodule.c

#countcontent
1n/a/* Drop in replacement for heapq.py
2n/a
3n/aC implementation derived directly from heapq.py in Py2.3
4n/awhich was written by Kevin O'Connor, augmented by Tim Peters,
5n/aannotated by François Pinard, and converted to C by Raymond Hettinger.
6n/a
7n/a*/
8n/a
9n/a#include "Python.h"
10n/a
11n/astatic int
12n/asiftdown(PyListObject *heap, Py_ssize_t startpos, Py_ssize_t pos)
13n/a{
14n/a PyObject *newitem, *parent, **arr;
15n/a Py_ssize_t parentpos, size;
16n/a int cmp;
17n/a
18n/a assert(PyList_Check(heap));
19n/a size = PyList_GET_SIZE(heap);
20n/a if (pos >= size) {
21n/a PyErr_SetString(PyExc_IndexError, "index out of range");
22n/a return -1;
23n/a }
24n/a
25n/a /* Follow the path to the root, moving parents down until finding
26n/a a place newitem fits. */
27n/a arr = _PyList_ITEMS(heap);
28n/a newitem = arr[pos];
29n/a while (pos > startpos) {
30n/a parentpos = (pos - 1) >> 1;
31n/a parent = arr[parentpos];
32n/a cmp = PyObject_RichCompareBool(newitem, parent, Py_LT);
33n/a if (cmp < 0)
34n/a return -1;
35n/a if (size != PyList_GET_SIZE(heap)) {
36n/a PyErr_SetString(PyExc_RuntimeError,
37n/a "list changed size during iteration");
38n/a return -1;
39n/a }
40n/a if (cmp == 0)
41n/a break;
42n/a arr = _PyList_ITEMS(heap);
43n/a parent = arr[parentpos];
44n/a newitem = arr[pos];
45n/a arr[parentpos] = newitem;
46n/a arr[pos] = parent;
47n/a pos = parentpos;
48n/a }
49n/a return 0;
50n/a}
51n/a
52n/astatic int
53n/asiftup(PyListObject *heap, Py_ssize_t pos)
54n/a{
55n/a Py_ssize_t startpos, endpos, childpos, limit;
56n/a PyObject *tmp1, *tmp2, **arr;
57n/a int cmp;
58n/a
59n/a assert(PyList_Check(heap));
60n/a endpos = PyList_GET_SIZE(heap);
61n/a startpos = pos;
62n/a if (pos >= endpos) {
63n/a PyErr_SetString(PyExc_IndexError, "index out of range");
64n/a return -1;
65n/a }
66n/a
67n/a /* Bubble up the smaller child until hitting a leaf. */
68n/a arr = _PyList_ITEMS(heap);
69n/a limit = endpos >> 1; /* smallest pos that has no child */
70n/a while (pos < limit) {
71n/a /* Set childpos to index of smaller child. */
72n/a childpos = 2*pos + 1; /* leftmost child position */
73n/a if (childpos + 1 < endpos) {
74n/a cmp = PyObject_RichCompareBool(
75n/a arr[childpos],
76n/a arr[childpos + 1],
77n/a Py_LT);
78n/a if (cmp < 0)
79n/a return -1;
80n/a childpos += ((unsigned)cmp ^ 1); /* increment when cmp==0 */
81n/a arr = _PyList_ITEMS(heap); /* arr may have changed */
82n/a if (endpos != PyList_GET_SIZE(heap)) {
83n/a PyErr_SetString(PyExc_RuntimeError,
84n/a "list changed size during iteration");
85n/a return -1;
86n/a }
87n/a }
88n/a /* Move the smaller child up. */
89n/a tmp1 = arr[childpos];
90n/a tmp2 = arr[pos];
91n/a arr[childpos] = tmp2;
92n/a arr[pos] = tmp1;
93n/a pos = childpos;
94n/a }
95n/a /* Bubble it up to its final resting place (by sifting its parents down). */
96n/a return siftdown(heap, startpos, pos);
97n/a}
98n/a
99n/astatic PyObject *
100n/aheappush(PyObject *self, PyObject *args)
101n/a{
102n/a PyObject *heap, *item;
103n/a
104n/a if (!PyArg_UnpackTuple(args, "heappush", 2, 2, &heap, &item))
105n/a return NULL;
106n/a
107n/a if (!PyList_Check(heap)) {
108n/a PyErr_SetString(PyExc_TypeError, "heap argument must be a list");
109n/a return NULL;
110n/a }
111n/a
112n/a if (PyList_Append(heap, item))
113n/a return NULL;
114n/a
115n/a if (siftdown((PyListObject *)heap, 0, PyList_GET_SIZE(heap)-1))
116n/a return NULL;
117n/a Py_RETURN_NONE;
118n/a}
119n/a
120n/aPyDoc_STRVAR(heappush_doc,
121n/a"heappush(heap, item) -> None. Push item onto heap, maintaining the heap invariant.");
122n/a
123n/astatic PyObject *
124n/aheappop_internal(PyObject *heap, int siftup_func(PyListObject *, Py_ssize_t))
125n/a{
126n/a PyObject *lastelt, *returnitem;
127n/a Py_ssize_t n;
128n/a
129n/a if (!PyList_Check(heap)) {
130n/a PyErr_SetString(PyExc_TypeError, "heap argument must be a list");
131n/a return NULL;
132n/a }
133n/a
134n/a /* raises IndexError if the heap is empty */
135n/a n = PyList_GET_SIZE(heap);
136n/a if (n == 0) {
137n/a PyErr_SetString(PyExc_IndexError, "index out of range");
138n/a return NULL;
139n/a }
140n/a
141n/a lastelt = PyList_GET_ITEM(heap, n-1) ;
142n/a Py_INCREF(lastelt);
143n/a if (PyList_SetSlice(heap, n-1, n, NULL)) {
144n/a Py_DECREF(lastelt);
145n/a return NULL;
146n/a }
147n/a n--;
148n/a
149n/a if (!n)
150n/a return lastelt;
151n/a returnitem = PyList_GET_ITEM(heap, 0);
152n/a PyList_SET_ITEM(heap, 0, lastelt);
153n/a if (siftup_func((PyListObject *)heap, 0)) {
154n/a Py_DECREF(returnitem);
155n/a return NULL;
156n/a }
157n/a return returnitem;
158n/a}
159n/a
160n/astatic PyObject *
161n/aheappop(PyObject *self, PyObject *heap)
162n/a{
163n/a return heappop_internal(heap, siftup);
164n/a}
165n/a
166n/aPyDoc_STRVAR(heappop_doc,
167n/a"Pop the smallest item off the heap, maintaining the heap invariant.");
168n/a
169n/astatic PyObject *
170n/aheapreplace_internal(PyObject *args, int siftup_func(PyListObject *, Py_ssize_t))
171n/a{
172n/a PyObject *heap, *item, *returnitem;
173n/a
174n/a if (!PyArg_UnpackTuple(args, "heapreplace", 2, 2, &heap, &item))
175n/a return NULL;
176n/a
177n/a if (!PyList_Check(heap)) {
178n/a PyErr_SetString(PyExc_TypeError, "heap argument must be a list");
179n/a return NULL;
180n/a }
181n/a
182n/a if (PyList_GET_SIZE(heap) == 0) {
183n/a PyErr_SetString(PyExc_IndexError, "index out of range");
184n/a return NULL;
185n/a }
186n/a
187n/a returnitem = PyList_GET_ITEM(heap, 0);
188n/a Py_INCREF(item);
189n/a PyList_SET_ITEM(heap, 0, item);
190n/a if (siftup_func((PyListObject *)heap, 0)) {
191n/a Py_DECREF(returnitem);
192n/a return NULL;
193n/a }
194n/a return returnitem;
195n/a}
196n/a
197n/astatic PyObject *
198n/aheapreplace(PyObject *self, PyObject *args)
199n/a{
200n/a return heapreplace_internal(args, siftup);
201n/a}
202n/a
203n/aPyDoc_STRVAR(heapreplace_doc,
204n/a"heapreplace(heap, item) -> value. Pop and return the current smallest value, and add the new item.\n\
205n/a\n\
206n/aThis is more efficient than heappop() followed by heappush(), and can be\n\
207n/amore appropriate when using a fixed-size heap. Note that the value\n\
208n/areturned may be larger than item! That constrains reasonable uses of\n\
209n/athis routine unless written as part of a conditional replacement:\n\n\
210n/a if item > heap[0]:\n\
211n/a item = heapreplace(heap, item)\n");
212n/a
213n/astatic PyObject *
214n/aheappushpop(PyObject *self, PyObject *args)
215n/a{
216n/a PyObject *heap, *item, *returnitem;
217n/a int cmp;
218n/a
219n/a if (!PyArg_UnpackTuple(args, "heappushpop", 2, 2, &heap, &item))
220n/a return NULL;
221n/a
222n/a if (!PyList_Check(heap)) {
223n/a PyErr_SetString(PyExc_TypeError, "heap argument must be a list");
224n/a return NULL;
225n/a }
226n/a
227n/a if (PyList_GET_SIZE(heap) == 0) {
228n/a Py_INCREF(item);
229n/a return item;
230n/a }
231n/a
232n/a cmp = PyObject_RichCompareBool(PyList_GET_ITEM(heap, 0), item, Py_LT);
233n/a if (cmp < 0)
234n/a return NULL;
235n/a if (cmp == 0) {
236n/a Py_INCREF(item);
237n/a return item;
238n/a }
239n/a
240n/a if (PyList_GET_SIZE(heap) == 0) {
241n/a PyErr_SetString(PyExc_IndexError, "index out of range");
242n/a return NULL;
243n/a }
244n/a
245n/a returnitem = PyList_GET_ITEM(heap, 0);
246n/a Py_INCREF(item);
247n/a PyList_SET_ITEM(heap, 0, item);
248n/a if (siftup((PyListObject *)heap, 0)) {
249n/a Py_DECREF(returnitem);
250n/a return NULL;
251n/a }
252n/a return returnitem;
253n/a}
254n/a
255n/aPyDoc_STRVAR(heappushpop_doc,
256n/a"heappushpop(heap, item) -> value. Push item on the heap, then pop and return the smallest item\n\
257n/afrom the heap. The combined action runs more efficiently than\n\
258n/aheappush() followed by a separate call to heappop().");
259n/a
260n/astatic Py_ssize_t
261n/akeep_top_bit(Py_ssize_t n)
262n/a{
263n/a int i = 0;
264n/a
265n/a while (n > 1) {
266n/a n >>= 1;
267n/a i++;
268n/a }
269n/a return n << i;
270n/a}
271n/a
272n/a/* Cache friendly version of heapify()
273n/a -----------------------------------
274n/a
275n/a Build-up a heap in O(n) time by performing siftup() operations
276n/a on nodes whose children are already heaps.
277n/a
278n/a The simplest way is to sift the nodes in reverse order from
279n/a n//2-1 to 0 inclusive. The downside is that children may be
280n/a out of cache by the time their parent is reached.
281n/a
282n/a A better way is to not wait for the children to go out of cache.
283n/a Once a sibling pair of child nodes have been sifted, immediately
284n/a sift their parent node (while the children are still in cache).
285n/a
286n/a Both ways build child heaps before their parents, so both ways
287n/a do the exact same number of comparisons and produce exactly
288n/a the same heap. The only difference is that the traversal
289n/a order is optimized for cache efficiency.
290n/a*/
291n/a
292n/astatic PyObject *
293n/acache_friendly_heapify(PyObject *heap, int siftup_func(PyListObject *, Py_ssize_t))
294n/a{
295n/a Py_ssize_t i, j, m, mhalf, leftmost;
296n/a
297n/a m = PyList_GET_SIZE(heap) >> 1; /* index of first childless node */
298n/a leftmost = keep_top_bit(m + 1) - 1; /* leftmost node in row of m */
299n/a mhalf = m >> 1; /* parent of first childless node */
300n/a
301n/a for (i = leftmost - 1 ; i >= mhalf ; i--) {
302n/a j = i;
303n/a while (1) {
304n/a if (siftup_func((PyListObject *)heap, j))
305n/a return NULL;
306n/a if (!(j & 1))
307n/a break;
308n/a j >>= 1;
309n/a }
310n/a }
311n/a
312n/a for (i = m - 1 ; i >= leftmost ; i--) {
313n/a j = i;
314n/a while (1) {
315n/a if (siftup_func((PyListObject *)heap, j))
316n/a return NULL;
317n/a if (!(j & 1))
318n/a break;
319n/a j >>= 1;
320n/a }
321n/a }
322n/a Py_RETURN_NONE;
323n/a}
324n/a
325n/astatic PyObject *
326n/aheapify_internal(PyObject *heap, int siftup_func(PyListObject *, Py_ssize_t))
327n/a{
328n/a Py_ssize_t i, n;
329n/a
330n/a if (!PyList_Check(heap)) {
331n/a PyErr_SetString(PyExc_TypeError, "heap argument must be a list");
332n/a return NULL;
333n/a }
334n/a
335n/a /* For heaps likely to be bigger than L1 cache, we use the cache
336n/a friendly heapify function. For smaller heaps that fit entirely
337n/a in cache, we prefer the simpler algorithm with less branching.
338n/a */
339n/a n = PyList_GET_SIZE(heap);
340n/a if (n > 2500)
341n/a return cache_friendly_heapify(heap, siftup_func);
342n/a
343n/a /* Transform bottom-up. The largest index there's any point to
344n/a looking at is the largest with a child index in-range, so must
345n/a have 2*i + 1 < n, or i < (n-1)/2. If n is even = 2*j, this is
346n/a (2*j-1)/2 = j-1/2 so j-1 is the largest, which is n//2 - 1. If
347n/a n is odd = 2*j+1, this is (2*j+1-1)/2 = j so j-1 is the largest,
348n/a and that's again n//2-1.
349n/a */
350n/a for (i = (n >> 1) - 1 ; i >= 0 ; i--)
351n/a if (siftup_func((PyListObject *)heap, i))
352n/a return NULL;
353n/a Py_RETURN_NONE;
354n/a}
355n/a
356n/astatic PyObject *
357n/aheapify(PyObject *self, PyObject *heap)
358n/a{
359n/a return heapify_internal(heap, siftup);
360n/a}
361n/a
362n/aPyDoc_STRVAR(heapify_doc,
363n/a"Transform list into a heap, in-place, in O(len(heap)) time.");
364n/a
365n/astatic int
366n/asiftdown_max(PyListObject *heap, Py_ssize_t startpos, Py_ssize_t pos)
367n/a{
368n/a PyObject *newitem, *parent, **arr;
369n/a Py_ssize_t parentpos, size;
370n/a int cmp;
371n/a
372n/a assert(PyList_Check(heap));
373n/a size = PyList_GET_SIZE(heap);
374n/a if (pos >= size) {
375n/a PyErr_SetString(PyExc_IndexError, "index out of range");
376n/a return -1;
377n/a }
378n/a
379n/a /* Follow the path to the root, moving parents down until finding
380n/a a place newitem fits. */
381n/a arr = _PyList_ITEMS(heap);
382n/a newitem = arr[pos];
383n/a while (pos > startpos) {
384n/a parentpos = (pos - 1) >> 1;
385n/a parent = arr[parentpos];
386n/a cmp = PyObject_RichCompareBool(parent, newitem, Py_LT);
387n/a if (cmp < 0)
388n/a return -1;
389n/a if (size != PyList_GET_SIZE(heap)) {
390n/a PyErr_SetString(PyExc_RuntimeError,
391n/a "list changed size during iteration");
392n/a return -1;
393n/a }
394n/a if (cmp == 0)
395n/a break;
396n/a arr = _PyList_ITEMS(heap);
397n/a parent = arr[parentpos];
398n/a newitem = arr[pos];
399n/a arr[parentpos] = newitem;
400n/a arr[pos] = parent;
401n/a pos = parentpos;
402n/a }
403n/a return 0;
404n/a}
405n/a
406n/astatic int
407n/asiftup_max(PyListObject *heap, Py_ssize_t pos)
408n/a{
409n/a Py_ssize_t startpos, endpos, childpos, limit;
410n/a PyObject *tmp1, *tmp2, **arr;
411n/a int cmp;
412n/a
413n/a assert(PyList_Check(heap));
414n/a endpos = PyList_GET_SIZE(heap);
415n/a startpos = pos;
416n/a if (pos >= endpos) {
417n/a PyErr_SetString(PyExc_IndexError, "index out of range");
418n/a return -1;
419n/a }
420n/a
421n/a /* Bubble up the smaller child until hitting a leaf. */
422n/a arr = _PyList_ITEMS(heap);
423n/a limit = endpos >> 1; /* smallest pos that has no child */
424n/a while (pos < limit) {
425n/a /* Set childpos to index of smaller child. */
426n/a childpos = 2*pos + 1; /* leftmost child position */
427n/a if (childpos + 1 < endpos) {
428n/a cmp = PyObject_RichCompareBool(
429n/a arr[childpos + 1],
430n/a arr[childpos],
431n/a Py_LT);
432n/a if (cmp < 0)
433n/a return -1;
434n/a childpos += ((unsigned)cmp ^ 1); /* increment when cmp==0 */
435n/a arr = _PyList_ITEMS(heap); /* arr may have changed */
436n/a if (endpos != PyList_GET_SIZE(heap)) {
437n/a PyErr_SetString(PyExc_RuntimeError,
438n/a "list changed size during iteration");
439n/a return -1;
440n/a }
441n/a }
442n/a /* Move the smaller child up. */
443n/a tmp1 = arr[childpos];
444n/a tmp2 = arr[pos];
445n/a arr[childpos] = tmp2;
446n/a arr[pos] = tmp1;
447n/a pos = childpos;
448n/a }
449n/a /* Bubble it up to its final resting place (by sifting its parents down). */
450n/a return siftdown_max(heap, startpos, pos);
451n/a}
452n/a
453n/astatic PyObject *
454n/aheappop_max(PyObject *self, PyObject *heap)
455n/a{
456n/a return heappop_internal(heap, siftup_max);
457n/a}
458n/a
459n/aPyDoc_STRVAR(heappop_max_doc, "Maxheap variant of heappop.");
460n/a
461n/astatic PyObject *
462n/aheapreplace_max(PyObject *self, PyObject *args)
463n/a{
464n/a return heapreplace_internal(args, siftup_max);
465n/a}
466n/a
467n/aPyDoc_STRVAR(heapreplace_max_doc, "Maxheap variant of heapreplace");
468n/a
469n/astatic PyObject *
470n/aheapify_max(PyObject *self, PyObject *heap)
471n/a{
472n/a return heapify_internal(heap, siftup_max);
473n/a}
474n/a
475n/aPyDoc_STRVAR(heapify_max_doc, "Maxheap variant of heapify.");
476n/a
477n/astatic PyMethodDef heapq_methods[] = {
478n/a {"heappush", (PyCFunction)heappush,
479n/a METH_VARARGS, heappush_doc},
480n/a {"heappushpop", (PyCFunction)heappushpop,
481n/a METH_VARARGS, heappushpop_doc},
482n/a {"heappop", (PyCFunction)heappop,
483n/a METH_O, heappop_doc},
484n/a {"heapreplace", (PyCFunction)heapreplace,
485n/a METH_VARARGS, heapreplace_doc},
486n/a {"heapify", (PyCFunction)heapify,
487n/a METH_O, heapify_doc},
488n/a {"_heappop_max", (PyCFunction)heappop_max,
489n/a METH_O, heappop_max_doc},
490n/a {"_heapreplace_max",(PyCFunction)heapreplace_max,
491n/a METH_VARARGS, heapreplace_max_doc},
492n/a {"_heapify_max", (PyCFunction)heapify_max,
493n/a METH_O, heapify_max_doc},
494n/a {NULL, NULL} /* sentinel */
495n/a};
496n/a
497n/aPyDoc_STRVAR(module_doc,
498n/a"Heap queue algorithm (a.k.a. priority queue).\n\
499n/a\n\
500n/aHeaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for\n\
501n/aall k, counting elements from 0. For the sake of comparison,\n\
502n/anon-existing elements are considered to be infinite. The interesting\n\
503n/aproperty of a heap is that a[0] is always its smallest element.\n\
504n/a\n\
505n/aUsage:\n\
506n/a\n\
507n/aheap = [] # creates an empty heap\n\
508n/aheappush(heap, item) # pushes a new item on the heap\n\
509n/aitem = heappop(heap) # pops the smallest item from the heap\n\
510n/aitem = heap[0] # smallest item on the heap without popping it\n\
511n/aheapify(x) # transforms list into a heap, in-place, in linear time\n\
512n/aitem = heapreplace(heap, item) # pops and returns smallest item, and adds\n\
513n/a # new item; the heap size is unchanged\n\
514n/a\n\
515n/aOur API differs from textbook heap algorithms as follows:\n\
516n/a\n\
517n/a- We use 0-based indexing. This makes the relationship between the\n\
518n/a index for a node and the indexes for its children slightly less\n\
519n/a obvious, but is more suitable since Python uses 0-based indexing.\n\
520n/a\n\
521n/a- Our heappop() method returns the smallest item, not the largest.\n\
522n/a\n\
523n/aThese two make it possible to view the heap as a regular Python list\n\
524n/awithout surprises: heap[0] is the smallest item, and heap.sort()\n\
525n/amaintains the heap invariant!\n");
526n/a
527n/a
528n/aPyDoc_STRVAR(__about__,
529n/a"Heap queues\n\
530n/a\n\
531n/a[explanation by Fran\xc3\xa7ois Pinard]\n\
532n/a\n\
533n/aHeaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for\n\
534n/aall k, counting elements from 0. For the sake of comparison,\n\
535n/anon-existing elements are considered to be infinite. The interesting\n\
536n/aproperty of a heap is that a[0] is always its smallest element.\n"
537n/a"\n\
538n/aThe strange invariant above is meant to be an efficient memory\n\
539n/arepresentation for a tournament. The numbers below are `k', not a[k]:\n\
540n/a\n\
541n/a 0\n\
542n/a\n\
543n/a 1 2\n\
544n/a\n\
545n/a 3 4 5 6\n\
546n/a\n\
547n/a 7 8 9 10 11 12 13 14\n\
548n/a\n\
549n/a 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30\n\
550n/a\n\
551n/a\n\
552n/aIn the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In\n\
553n/aa usual binary tournament we see in sports, each cell is the winner\n\
554n/aover the two cells it tops, and we can trace the winner down the tree\n\
555n/ato see all opponents s/he had. However, in many computer applications\n\
556n/aof such tournaments, we do not need to trace the history of a winner.\n\
557n/aTo be more memory efficient, when a winner is promoted, we try to\n\
558n/areplace it by something else at a lower level, and the rule becomes\n\
559n/athat a cell and the two cells it tops contain three different items,\n\
560n/abut the top cell \"wins\" over the two topped cells.\n"
561n/a"\n\
562n/aIf this heap invariant is protected at all time, index 0 is clearly\n\
563n/athe overall winner. The simplest algorithmic way to remove it and\n\
564n/afind the \"next\" winner is to move some loser (let's say cell 30 in the\n\
565n/adiagram above) into the 0 position, and then percolate this new 0 down\n\
566n/athe tree, exchanging values, until the invariant is re-established.\n\
567n/aThis is clearly logarithmic on the total number of items in the tree.\n\
568n/aBy iterating over all items, you get an O(n ln n) sort.\n"
569n/a"\n\
570n/aA nice feature of this sort is that you can efficiently insert new\n\
571n/aitems while the sort is going on, provided that the inserted items are\n\
572n/anot \"better\" than the last 0'th element you extracted. This is\n\
573n/aespecially useful in simulation contexts, where the tree holds all\n\
574n/aincoming events, and the \"win\" condition means the smallest scheduled\n\
575n/atime. When an event schedule other events for execution, they are\n\
576n/ascheduled into the future, so they can easily go into the heap. So, a\n\
577n/aheap is a good structure for implementing schedulers (this is what I\n\
578n/aused for my MIDI sequencer :-).\n"
579n/a"\n\
580n/aVarious structures for implementing schedulers have been extensively\n\
581n/astudied, and heaps are good for this, as they are reasonably speedy,\n\
582n/athe speed is almost constant, and the worst case is not much different\n\
583n/athan the average case. However, there are other representations which\n\
584n/aare more efficient overall, yet the worst cases might be terrible.\n"
585n/a"\n\
586n/aHeaps are also very useful in big disk sorts. You most probably all\n\
587n/aknow that a big sort implies producing \"runs\" (which are pre-sorted\n\
588n/asequences, which size is usually related to the amount of CPU memory),\n\
589n/afollowed by a merging passes for these runs, which merging is often\n\
590n/avery cleverly organised[1]. It is very important that the initial\n\
591n/asort produces the longest runs possible. Tournaments are a good way\n\
592n/ato that. If, using all the memory available to hold a tournament, you\n\
593n/areplace and percolate items that happen to fit the current run, you'll\n\
594n/aproduce runs which are twice the size of the memory for random input,\n\
595n/aand much better for input fuzzily ordered.\n"
596n/a"\n\
597n/aMoreover, if you output the 0'th item on disk and get an input which\n\
598n/amay not fit in the current tournament (because the value \"wins\" over\n\
599n/athe last output value), it cannot fit in the heap, so the size of the\n\
600n/aheap decreases. The freed memory could be cleverly reused immediately\n\
601n/afor progressively building a second heap, which grows at exactly the\n\
602n/asame rate the first heap is melting. When the first heap completely\n\
603n/avanishes, you switch heaps and start a new run. Clever and quite\n\
604n/aeffective!\n\
605n/a\n\
606n/aIn a word, heaps are useful memory structures to know. I use them in\n\
607n/aa few applications, and I think it is good to keep a `heap' module\n\
608n/aaround. :-)\n"
609n/a"\n\
610n/a--------------------\n\
611n/a[1] The disk balancing algorithms which are current, nowadays, are\n\
612n/amore annoying than clever, and this is a consequence of the seeking\n\
613n/acapabilities of the disks. On devices which cannot seek, like big\n\
614n/atape drives, the story was quite different, and one had to be very\n\
615n/aclever to ensure (far in advance) that each tape movement will be the\n\
616n/amost effective possible (that is, will best participate at\n\
617n/a\"progressing\" the merge). Some tapes were even able to read\n\
618n/abackwards, and this was also used to avoid the rewinding time.\n\
619n/aBelieve me, real good tape sorts were quite spectacular to watch!\n\
620n/aFrom all times, sorting has always been a Great Art! :-)\n");
621n/a
622n/a
623n/astatic struct PyModuleDef _heapqmodule = {
624n/a PyModuleDef_HEAD_INIT,
625n/a "_heapq",
626n/a module_doc,
627n/a -1,
628n/a heapq_methods,
629n/a NULL,
630n/a NULL,
631n/a NULL,
632n/a NULL
633n/a};
634n/a
635n/aPyMODINIT_FUNC
636n/aPyInit__heapq(void)
637n/a{
638n/a PyObject *m, *about;
639n/a
640n/a m = PyModule_Create(&_heapqmodule);
641n/a if (m == NULL)
642n/a return NULL;
643n/a about = PyUnicode_DecodeUTF8(__about__, strlen(__about__), NULL);
644n/a PyModule_AddObject(m, "__about__", about);
645n/a return m;
646n/a}
647n/a