ยปCore Development>Code coverage>Parser/node.c

# Python code coverage for Parser/node.c

#countcontent
1n/a/* Parse tree node implementation */
2n/a
3n/a#include "Python.h"
4n/a#include "node.h"
5n/a#include "errcode.h"
6n/a
7n/anode *
8n/aPyNode_New(int type)
9n/a{
10n/a node *n = (node *) PyObject_MALLOC(1 * sizeof(node));
11n/a if (n == NULL)
12n/a return NULL;
13n/a n->n_type = type;
14n/a n->n_str = NULL;
15n/a n->n_lineno = 0;
16n/a n->n_nchildren = 0;
17n/a n->n_child = NULL;
18n/a return n;
19n/a}
20n/a
21n/a/* See comments at XXXROUNDUP below. Returns -1 on overflow. */
22n/astatic int
23n/afancy_roundup(int n)
24n/a{
25n/a /* Round up to the closest power of 2 >= n. */
26n/a int result = 256;
27n/a assert(n > 128);
28n/a while (result < n) {
29n/a result <<= 1;
30n/a if (result <= 0)
31n/a return -1;
32n/a }
33n/a return result;
34n/a}
35n/a
36n/a/* A gimmick to make massive numbers of reallocs quicker. The result is
37n/a * a number >= the input. In PyNode_AddChild, it's used like so, when
39n/a *
40n/a * if XXXROUNDUP(current_size) < XXXROUNDUP(current_size + 1):
41n/a * allocate space for XXXROUNDUP(current_size + 1) total children
42n/a * else:
43n/a * we already have enough space
44n/a *
45n/a * Since a node starts out empty, we must have
46n/a *
47n/a * XXXROUNDUP(0) < XXXROUNDUP(1)
48n/a *
49n/a * so that we allocate space for the first child. One-child nodes are very
50n/a * common (presumably that would change if we used a more abstract form
51n/a * of syntax tree), so to avoid wasting memory it's desirable that
52n/a * XXXROUNDUP(1) == 1. That in turn forces XXXROUNDUP(0) == 0.
53n/a *
54n/a * Else for 2 <= n <= 128, we round up to the closest multiple of 4. Why 4?
55n/a * Rounding up to a multiple of an exact power of 2 is very efficient, and
56n/a * most nodes with more than one child have <= 4 kids.
57n/a *
58n/a * Else we call fancy_roundup() to grow proportionately to n. We've got an
59n/a * extreme case then (like test_longexp.py), and on many platforms doing
60n/a * anything less than proportional growth leads to exorbitant runtime
61n/a * (e.g., MacPython), or extreme fragmentation of user address space (e.g.,
62n/a * Win98).
63n/a *
64n/a * In a run of compileall across the 2.3a0 Lib directory, Andrew MacIntyre
65n/a * reported that, with this scheme, 89% of PyObject_REALLOC calls in
66n/a * PyNode_AddChild passed 1 for the size, and 9% passed 4. So this usually
67n/a * wastes very little memory, but is very effective at sidestepping
68n/a * platform-realloc disasters on vulnerable platforms.
69n/a *
70n/a * Note that this would be straightforward if a node stored its current
71n/a * capacity. The code is tricky to avoid that.
72n/a */
73n/a#define XXXROUNDUP(n) ((n) <= 1 ? (n) : \
74n/a (n) <= 128 ? (int)_Py_SIZE_ROUND_UP((n), 4) : \
75n/a fancy_roundup(n))
76n/a
77n/a
78n/aint
79n/aPyNode_AddChild(node *n1, int type, char *str, int lineno, int col_offset)
80n/a{
81n/a const int nch = n1->n_nchildren;
82n/a int current_capacity;
83n/a int required_capacity;
84n/a node *n;
85n/a
86n/a if (nch == INT_MAX || nch < 0)
87n/a return E_OVERFLOW;
88n/a
89n/a current_capacity = XXXROUNDUP(nch);
90n/a required_capacity = XXXROUNDUP(nch + 1);
91n/a if (current_capacity < 0 || required_capacity < 0)
92n/a return E_OVERFLOW;
93n/a if (current_capacity < required_capacity) {
94n/a if ((size_t)required_capacity > SIZE_MAX / sizeof(node)) {
95n/a return E_NOMEM;
96n/a }
97n/a n = n1->n_child;
98n/a n = (node *) PyObject_REALLOC(n,
99n/a required_capacity * sizeof(node));
100n/a if (n == NULL)
101n/a return E_NOMEM;
102n/a n1->n_child = n;
103n/a }
104n/a
105n/a n = &n1->n_child[n1->n_nchildren++];
106n/a n->n_type = type;
107n/a n->n_str = str;
108n/a n->n_lineno = lineno;
109n/a n->n_col_offset = col_offset;
110n/a n->n_nchildren = 0;
111n/a n->n_child = NULL;
112n/a return 0;
113n/a}
114n/a
115n/a/* Forward */
116n/astatic void freechildren(node *);
117n/astatic Py_ssize_t sizeofchildren(node *n);
118n/a
119n/a
120n/avoid
121n/aPyNode_Free(node *n)
122n/a{
123n/a if (n != NULL) {
124n/a freechildren(n);
125n/a PyObject_FREE(n);
126n/a }
127n/a}
128n/a
129n/aPy_ssize_t
130n/a_PyNode_SizeOf(node *n)
131n/a{
132n/a Py_ssize_t res = 0;
133n/a
134n/a if (n != NULL)
135n/a res = sizeof(node) + sizeofchildren(n);
136n/a return res;
137n/a}
138n/a
139n/astatic void
140n/afreechildren(node *n)
141n/a{
142n/a int i;
143n/a for (i = NCH(n); --i >= 0; )
144n/a freechildren(CHILD(n, i));
145n/a if (n->n_child != NULL)
146n/a PyObject_FREE(n->n_child);
147n/a if (STR(n) != NULL)
148n/a PyObject_FREE(STR(n));
149n/a}
150n/a
151n/astatic Py_ssize_t
152n/asizeofchildren(node *n)
153n/a{
154n/a Py_ssize_t res = 0;
155n/a int i;
156n/a for (i = NCH(n); --i >= 0; )
157n/a res += sizeofchildren(CHILD(n, i));
158n/a if (n->n_child != NULL)
159n/a /* allocated size of n->n_child array */
160n/a res += XXXROUNDUP(NCH(n)) * sizeof(node);
161n/a if (STR(n) != NULL)
162n/a res += strlen(STR(n)) + 1;
163n/a return res;
164n/a}