ยปCore Development>Code coverage>Python/pystate.c

Python code coverage for Python/pystate.c

#countcontent
1n/a
2n/a/* Thread and interpreter state structures and their interfaces */
3n/a
4n/a#include "Python.h"
5n/a
6n/a#define GET_TSTATE() \
7n/a ((PyThreadState*)_Py_atomic_load_relaxed(&_PyThreadState_Current))
8n/a#define SET_TSTATE(value) \
9n/a _Py_atomic_store_relaxed(&_PyThreadState_Current, (uintptr_t)(value))
10n/a#define GET_INTERP_STATE() \
11n/a (GET_TSTATE()->interp)
12n/a
13n/a
14n/a/* --------------------------------------------------------------------------
15n/aCAUTION
16n/a
17n/aAlways use PyMem_RawMalloc() and PyMem_RawFree() directly in this file. A
18n/anumber of these functions are advertised as safe to call when the GIL isn't
19n/aheld, and in a debug build Python redirects (e.g.) PyMem_NEW (etc) to Python's
20n/adebugging obmalloc functions. Those aren't thread-safe (they rely on the GIL
21n/ato avoid the expense of doing their own locking).
22n/a-------------------------------------------------------------------------- */
23n/a
24n/a#ifdef HAVE_DLOPEN
25n/a#ifdef HAVE_DLFCN_H
26n/a#include <dlfcn.h>
27n/a#endif
28n/a#if !HAVE_DECL_RTLD_LAZY
29n/a#define RTLD_LAZY 1
30n/a#endif
31n/a#endif
32n/a
33n/a#ifdef __cplusplus
34n/aextern "C" {
35n/a#endif
36n/a
37n/aint _PyGILState_check_enabled = 1;
38n/a
39n/a#ifdef WITH_THREAD
40n/a#include "pythread.h"
41n/astatic PyThread_type_lock head_mutex = NULL; /* Protects interp->tstate_head */
42n/a#define HEAD_INIT() (void)(head_mutex || (head_mutex = PyThread_allocate_lock()))
43n/a#define HEAD_LOCK() PyThread_acquire_lock(head_mutex, WAIT_LOCK)
44n/a#define HEAD_UNLOCK() PyThread_release_lock(head_mutex)
45n/a
46n/a/* The single PyInterpreterState used by this process'
47n/a GILState implementation
48n/a*/
49n/astatic PyInterpreterState *autoInterpreterState = NULL;
50n/astatic int autoTLSkey = -1;
51n/a#else
52n/a#define HEAD_INIT() /* Nothing */
53n/a#define HEAD_LOCK() /* Nothing */
54n/a#define HEAD_UNLOCK() /* Nothing */
55n/a#endif
56n/a
57n/astatic PyInterpreterState *interp_head = NULL;
58n/a
59n/a/* Assuming the current thread holds the GIL, this is the
60n/a PyThreadState for the current thread. */
61n/a_Py_atomic_address _PyThreadState_Current = {0};
62n/aPyThreadFrameGetter _PyThreadState_GetFrame = NULL;
63n/a
64n/a#ifdef WITH_THREAD
65n/astatic void _PyGILState_NoteThreadState(PyThreadState* tstate);
66n/a#endif
67n/a
68n/a
69n/aPyInterpreterState *
70n/aPyInterpreterState_New(void)
71n/a{
72n/a PyInterpreterState *interp = (PyInterpreterState *)
73n/a PyMem_RawMalloc(sizeof(PyInterpreterState));
74n/a
75n/a if (interp != NULL) {
76n/a HEAD_INIT();
77n/a#ifdef WITH_THREAD
78n/a if (head_mutex == NULL)
79n/a Py_FatalError("Can't initialize threads for interpreter");
80n/a#endif
81n/a interp->modules = NULL;
82n/a interp->modules_by_index = NULL;
83n/a interp->sysdict = NULL;
84n/a interp->builtins = NULL;
85n/a interp->builtins_copy = NULL;
86n/a interp->tstate_head = NULL;
87n/a interp->codec_search_path = NULL;
88n/a interp->codec_search_cache = NULL;
89n/a interp->codec_error_registry = NULL;
90n/a interp->codecs_initialized = 0;
91n/a interp->fscodec_initialized = 0;
92n/a interp->importlib = NULL;
93n/a interp->import_func = NULL;
94n/a interp->eval_frame = _PyEval_EvalFrameDefault;
95n/a#ifdef HAVE_DLOPEN
96n/a#if HAVE_DECL_RTLD_NOW
97n/a interp->dlopenflags = RTLD_NOW;
98n/a#else
99n/a interp->dlopenflags = RTLD_LAZY;
100n/a#endif
101n/a#endif
102n/a
103n/a HEAD_LOCK();
104n/a interp->next = interp_head;
105n/a interp_head = interp;
106n/a HEAD_UNLOCK();
107n/a }
108n/a
109n/a return interp;
110n/a}
111n/a
112n/a
113n/avoid
114n/aPyInterpreterState_Clear(PyInterpreterState *interp)
115n/a{
116n/a PyThreadState *p;
117n/a HEAD_LOCK();
118n/a for (p = interp->tstate_head; p != NULL; p = p->next)
119n/a PyThreadState_Clear(p);
120n/a HEAD_UNLOCK();
121n/a Py_CLEAR(interp->codec_search_path);
122n/a Py_CLEAR(interp->codec_search_cache);
123n/a Py_CLEAR(interp->codec_error_registry);
124n/a Py_CLEAR(interp->modules);
125n/a Py_CLEAR(interp->modules_by_index);
126n/a Py_CLEAR(interp->sysdict);
127n/a Py_CLEAR(interp->builtins);
128n/a Py_CLEAR(interp->builtins_copy);
129n/a Py_CLEAR(interp->importlib);
130n/a Py_CLEAR(interp->import_func);
131n/a}
132n/a
133n/a
134n/astatic void
135n/azapthreads(PyInterpreterState *interp)
136n/a{
137n/a PyThreadState *p;
138n/a /* No need to lock the mutex here because this should only happen
139n/a when the threads are all really dead (XXX famous last words). */
140n/a while ((p = interp->tstate_head) != NULL) {
141n/a PyThreadState_Delete(p);
142n/a }
143n/a}
144n/a
145n/a
146n/avoid
147n/aPyInterpreterState_Delete(PyInterpreterState *interp)
148n/a{
149n/a PyInterpreterState **p;
150n/a zapthreads(interp);
151n/a HEAD_LOCK();
152n/a for (p = &interp_head; ; p = &(*p)->next) {
153n/a if (*p == NULL)
154n/a Py_FatalError(
155n/a "PyInterpreterState_Delete: invalid interp");
156n/a if (*p == interp)
157n/a break;
158n/a }
159n/a if (interp->tstate_head != NULL)
160n/a Py_FatalError("PyInterpreterState_Delete: remaining threads");
161n/a *p = interp->next;
162n/a HEAD_UNLOCK();
163n/a PyMem_RawFree(interp);
164n/a#ifdef WITH_THREAD
165n/a if (interp_head == NULL && head_mutex != NULL) {
166n/a PyThread_free_lock(head_mutex);
167n/a head_mutex = NULL;
168n/a }
169n/a#endif
170n/a}
171n/a
172n/a
173n/a/* Default implementation for _PyThreadState_GetFrame */
174n/astatic struct _frame *
175n/athreadstate_getframe(PyThreadState *self)
176n/a{
177n/a return self->frame;
178n/a}
179n/a
180n/astatic PyThreadState *
181n/anew_threadstate(PyInterpreterState *interp, int init)
182n/a{
183n/a PyThreadState *tstate = (PyThreadState *)PyMem_RawMalloc(sizeof(PyThreadState));
184n/a
185n/a if (_PyThreadState_GetFrame == NULL)
186n/a _PyThreadState_GetFrame = threadstate_getframe;
187n/a
188n/a if (tstate != NULL) {
189n/a tstate->interp = interp;
190n/a
191n/a tstate->frame = NULL;
192n/a tstate->recursion_depth = 0;
193n/a tstate->overflowed = 0;
194n/a tstate->recursion_critical = 0;
195n/a tstate->tracing = 0;
196n/a tstate->use_tracing = 0;
197n/a tstate->gilstate_counter = 0;
198n/a tstate->async_exc = NULL;
199n/a#ifdef WITH_THREAD
200n/a tstate->thread_id = PyThread_get_thread_ident();
201n/a#else
202n/a tstate->thread_id = 0;
203n/a#endif
204n/a
205n/a tstate->dict = NULL;
206n/a
207n/a tstate->curexc_type = NULL;
208n/a tstate->curexc_value = NULL;
209n/a tstate->curexc_traceback = NULL;
210n/a
211n/a tstate->exc_type = NULL;
212n/a tstate->exc_value = NULL;
213n/a tstate->exc_traceback = NULL;
214n/a
215n/a tstate->c_profilefunc = NULL;
216n/a tstate->c_tracefunc = NULL;
217n/a tstate->c_profileobj = NULL;
218n/a tstate->c_traceobj = NULL;
219n/a
220n/a tstate->trash_delete_nesting = 0;
221n/a tstate->trash_delete_later = NULL;
222n/a tstate->on_delete = NULL;
223n/a tstate->on_delete_data = NULL;
224n/a
225n/a tstate->coroutine_wrapper = NULL;
226n/a tstate->in_coroutine_wrapper = 0;
227n/a tstate->co_extra_user_count = 0;
228n/a
229n/a tstate->async_gen_firstiter = NULL;
230n/a tstate->async_gen_finalizer = NULL;
231n/a
232n/a if (init)
233n/a _PyThreadState_Init(tstate);
234n/a
235n/a HEAD_LOCK();
236n/a tstate->prev = NULL;
237n/a tstate->next = interp->tstate_head;
238n/a if (tstate->next)
239n/a tstate->next->prev = tstate;
240n/a interp->tstate_head = tstate;
241n/a HEAD_UNLOCK();
242n/a }
243n/a
244n/a return tstate;
245n/a}
246n/a
247n/aPyThreadState *
248n/aPyThreadState_New(PyInterpreterState *interp)
249n/a{
250n/a return new_threadstate(interp, 1);
251n/a}
252n/a
253n/aPyThreadState *
254n/a_PyThreadState_Prealloc(PyInterpreterState *interp)
255n/a{
256n/a return new_threadstate(interp, 0);
257n/a}
258n/a
259n/avoid
260n/a_PyThreadState_Init(PyThreadState *tstate)
261n/a{
262n/a#ifdef WITH_THREAD
263n/a _PyGILState_NoteThreadState(tstate);
264n/a#endif
265n/a}
266n/a
267n/aPyObject*
268n/aPyState_FindModule(struct PyModuleDef* module)
269n/a{
270n/a Py_ssize_t index = module->m_base.m_index;
271n/a PyInterpreterState *state = GET_INTERP_STATE();
272n/a PyObject *res;
273n/a if (module->m_slots) {
274n/a return NULL;
275n/a }
276n/a if (index == 0)
277n/a return NULL;
278n/a if (state->modules_by_index == NULL)
279n/a return NULL;
280n/a if (index >= PyList_GET_SIZE(state->modules_by_index))
281n/a return NULL;
282n/a res = PyList_GET_ITEM(state->modules_by_index, index);
283n/a return res==Py_None ? NULL : res;
284n/a}
285n/a
286n/aint
287n/a_PyState_AddModule(PyObject* module, struct PyModuleDef* def)
288n/a{
289n/a PyInterpreterState *state;
290n/a if (!def) {
291n/a assert(PyErr_Occurred());
292n/a return -1;
293n/a }
294n/a if (def->m_slots) {
295n/a PyErr_SetString(PyExc_SystemError,
296n/a "PyState_AddModule called on module with slots");
297n/a return -1;
298n/a }
299n/a state = GET_INTERP_STATE();
300n/a if (!state->modules_by_index) {
301n/a state->modules_by_index = PyList_New(0);
302n/a if (!state->modules_by_index)
303n/a return -1;
304n/a }
305n/a while(PyList_GET_SIZE(state->modules_by_index) <= def->m_base.m_index)
306n/a if (PyList_Append(state->modules_by_index, Py_None) < 0)
307n/a return -1;
308n/a Py_INCREF(module);
309n/a return PyList_SetItem(state->modules_by_index,
310n/a def->m_base.m_index, module);
311n/a}
312n/a
313n/aint
314n/aPyState_AddModule(PyObject* module, struct PyModuleDef* def)
315n/a{
316n/a Py_ssize_t index;
317n/a PyInterpreterState *state = GET_INTERP_STATE();
318n/a if (!def) {
319n/a Py_FatalError("PyState_AddModule: Module Definition is NULL");
320n/a return -1;
321n/a }
322n/a index = def->m_base.m_index;
323n/a if (state->modules_by_index) {
324n/a if(PyList_GET_SIZE(state->modules_by_index) >= index) {
325n/a if(module == PyList_GET_ITEM(state->modules_by_index, index)) {
326n/a Py_FatalError("PyState_AddModule: Module already added!");
327n/a return -1;
328n/a }
329n/a }
330n/a }
331n/a return _PyState_AddModule(module, def);
332n/a}
333n/a
334n/aint
335n/aPyState_RemoveModule(struct PyModuleDef* def)
336n/a{
337n/a PyInterpreterState *state;
338n/a Py_ssize_t index = def->m_base.m_index;
339n/a if (def->m_slots) {
340n/a PyErr_SetString(PyExc_SystemError,
341n/a "PyState_RemoveModule called on module with slots");
342n/a return -1;
343n/a }
344n/a state = GET_INTERP_STATE();
345n/a if (index == 0) {
346n/a Py_FatalError("PyState_RemoveModule: Module index invalid.");
347n/a return -1;
348n/a }
349n/a if (state->modules_by_index == NULL) {
350n/a Py_FatalError("PyState_RemoveModule: Interpreters module-list not acessible.");
351n/a return -1;
352n/a }
353n/a if (index > PyList_GET_SIZE(state->modules_by_index)) {
354n/a Py_FatalError("PyState_RemoveModule: Module index out of bounds.");
355n/a return -1;
356n/a }
357n/a return PyList_SetItem(state->modules_by_index, index, Py_None);
358n/a}
359n/a
360n/a/* used by import.c:PyImport_Cleanup */
361n/avoid
362n/a_PyState_ClearModules(void)
363n/a{
364n/a PyInterpreterState *state = GET_INTERP_STATE();
365n/a if (state->modules_by_index) {
366n/a Py_ssize_t i;
367n/a for (i = 0; i < PyList_GET_SIZE(state->modules_by_index); i++) {
368n/a PyObject *m = PyList_GET_ITEM(state->modules_by_index, i);
369n/a if (PyModule_Check(m)) {
370n/a /* cleanup the saved copy of module dicts */
371n/a PyModuleDef *md = PyModule_GetDef(m);
372n/a if (md)
373n/a Py_CLEAR(md->m_base.m_copy);
374n/a }
375n/a }
376n/a /* Setting modules_by_index to NULL could be dangerous, so we
377n/a clear the list instead. */
378n/a if (PyList_SetSlice(state->modules_by_index,
379n/a 0, PyList_GET_SIZE(state->modules_by_index),
380n/a NULL))
381n/a PyErr_WriteUnraisable(state->modules_by_index);
382n/a }
383n/a}
384n/a
385n/avoid
386n/aPyThreadState_Clear(PyThreadState *tstate)
387n/a{
388n/a if (Py_VerboseFlag && tstate->frame != NULL)
389n/a fprintf(stderr,
390n/a "PyThreadState_Clear: warning: thread still has a frame\n");
391n/a
392n/a Py_CLEAR(tstate->frame);
393n/a
394n/a Py_CLEAR(tstate->dict);
395n/a Py_CLEAR(tstate->async_exc);
396n/a
397n/a Py_CLEAR(tstate->curexc_type);
398n/a Py_CLEAR(tstate->curexc_value);
399n/a Py_CLEAR(tstate->curexc_traceback);
400n/a
401n/a Py_CLEAR(tstate->exc_type);
402n/a Py_CLEAR(tstate->exc_value);
403n/a Py_CLEAR(tstate->exc_traceback);
404n/a
405n/a tstate->c_profilefunc = NULL;
406n/a tstate->c_tracefunc = NULL;
407n/a Py_CLEAR(tstate->c_profileobj);
408n/a Py_CLEAR(tstate->c_traceobj);
409n/a
410n/a Py_CLEAR(tstate->coroutine_wrapper);
411n/a Py_CLEAR(tstate->async_gen_firstiter);
412n/a Py_CLEAR(tstate->async_gen_finalizer);
413n/a}
414n/a
415n/a
416n/a/* Common code for PyThreadState_Delete() and PyThreadState_DeleteCurrent() */
417n/astatic void
418n/atstate_delete_common(PyThreadState *tstate)
419n/a{
420n/a PyInterpreterState *interp;
421n/a if (tstate == NULL)
422n/a Py_FatalError("PyThreadState_Delete: NULL tstate");
423n/a interp = tstate->interp;
424n/a if (interp == NULL)
425n/a Py_FatalError("PyThreadState_Delete: NULL interp");
426n/a HEAD_LOCK();
427n/a if (tstate->prev)
428n/a tstate->prev->next = tstate->next;
429n/a else
430n/a interp->tstate_head = tstate->next;
431n/a if (tstate->next)
432n/a tstate->next->prev = tstate->prev;
433n/a HEAD_UNLOCK();
434n/a if (tstate->on_delete != NULL) {
435n/a tstate->on_delete(tstate->on_delete_data);
436n/a }
437n/a PyMem_RawFree(tstate);
438n/a}
439n/a
440n/a
441n/avoid
442n/aPyThreadState_Delete(PyThreadState *tstate)
443n/a{
444n/a if (tstate == GET_TSTATE())
445n/a Py_FatalError("PyThreadState_Delete: tstate is still current");
446n/a#ifdef WITH_THREAD
447n/a if (autoInterpreterState && PyThread_get_key_value(autoTLSkey) == tstate)
448n/a PyThread_delete_key_value(autoTLSkey);
449n/a#endif /* WITH_THREAD */
450n/a tstate_delete_common(tstate);
451n/a}
452n/a
453n/a
454n/a#ifdef WITH_THREAD
455n/avoid
456n/aPyThreadState_DeleteCurrent()
457n/a{
458n/a PyThreadState *tstate = GET_TSTATE();
459n/a if (tstate == NULL)
460n/a Py_FatalError(
461n/a "PyThreadState_DeleteCurrent: no current tstate");
462n/a tstate_delete_common(tstate);
463n/a if (autoInterpreterState && PyThread_get_key_value(autoTLSkey) == tstate)
464n/a PyThread_delete_key_value(autoTLSkey);
465n/a SET_TSTATE(NULL);
466n/a PyEval_ReleaseLock();
467n/a}
468n/a#endif /* WITH_THREAD */
469n/a
470n/a
471n/a/*
472n/a * Delete all thread states except the one passed as argument.
473n/a * Note that, if there is a current thread state, it *must* be the one
474n/a * passed as argument. Also, this won't touch any other interpreters
475n/a * than the current one, since we don't know which thread state should
476n/a * be kept in those other interpreteres.
477n/a */
478n/avoid
479n/a_PyThreadState_DeleteExcept(PyThreadState *tstate)
480n/a{
481n/a PyInterpreterState *interp = tstate->interp;
482n/a PyThreadState *p, *next, *garbage;
483n/a HEAD_LOCK();
484n/a /* Remove all thread states, except tstate, from the linked list of
485n/a thread states. This will allow calling PyThreadState_Clear()
486n/a without holding the lock. */
487n/a garbage = interp->tstate_head;
488n/a if (garbage == tstate)
489n/a garbage = tstate->next;
490n/a if (tstate->prev)
491n/a tstate->prev->next = tstate->next;
492n/a if (tstate->next)
493n/a tstate->next->prev = tstate->prev;
494n/a tstate->prev = tstate->next = NULL;
495n/a interp->tstate_head = tstate;
496n/a HEAD_UNLOCK();
497n/a /* Clear and deallocate all stale thread states. Even if this
498n/a executes Python code, we should be safe since it executes
499n/a in the current thread, not one of the stale threads. */
500n/a for (p = garbage; p; p = next) {
501n/a next = p->next;
502n/a PyThreadState_Clear(p);
503n/a PyMem_RawFree(p);
504n/a }
505n/a}
506n/a
507n/a
508n/aPyThreadState *
509n/a_PyThreadState_UncheckedGet(void)
510n/a{
511n/a return GET_TSTATE();
512n/a}
513n/a
514n/a
515n/aPyThreadState *
516n/aPyThreadState_Get(void)
517n/a{
518n/a PyThreadState *tstate = GET_TSTATE();
519n/a if (tstate == NULL)
520n/a Py_FatalError("PyThreadState_Get: no current thread");
521n/a
522n/a return tstate;
523n/a}
524n/a
525n/a
526n/aPyThreadState *
527n/aPyThreadState_Swap(PyThreadState *newts)
528n/a{
529n/a PyThreadState *oldts = GET_TSTATE();
530n/a
531n/a SET_TSTATE(newts);
532n/a /* It should not be possible for more than one thread state
533n/a to be used for a thread. Check this the best we can in debug
534n/a builds.
535n/a */
536n/a#if defined(Py_DEBUG) && defined(WITH_THREAD)
537n/a if (newts) {
538n/a /* This can be called from PyEval_RestoreThread(). Similar
539n/a to it, we need to ensure errno doesn't change.
540n/a */
541n/a int err = errno;
542n/a PyThreadState *check = PyGILState_GetThisThreadState();
543n/a if (check && check->interp == newts->interp && check != newts)
544n/a Py_FatalError("Invalid thread state for this thread");
545n/a errno = err;
546n/a }
547n/a#endif
548n/a return oldts;
549n/a}
550n/a
551n/a/* An extension mechanism to store arbitrary additional per-thread state.
552n/a PyThreadState_GetDict() returns a dictionary that can be used to hold such
553n/a state; the caller should pick a unique key and store its state there. If
554n/a PyThreadState_GetDict() returns NULL, an exception has *not* been raised
555n/a and the caller should assume no per-thread state is available. */
556n/a
557n/aPyObject *
558n/aPyThreadState_GetDict(void)
559n/a{
560n/a PyThreadState *tstate = GET_TSTATE();
561n/a if (tstate == NULL)
562n/a return NULL;
563n/a
564n/a if (tstate->dict == NULL) {
565n/a PyObject *d;
566n/a tstate->dict = d = PyDict_New();
567n/a if (d == NULL)
568n/a PyErr_Clear();
569n/a }
570n/a return tstate->dict;
571n/a}
572n/a
573n/a
574n/a/* Asynchronously raise an exception in a thread.
575n/a Requested by Just van Rossum and Alex Martelli.
576n/a To prevent naive misuse, you must write your own extension
577n/a to call this, or use ctypes. Must be called with the GIL held.
578n/a Returns the number of tstates modified (normally 1, but 0 if `id` didn't
579n/a match any known thread id). Can be called with exc=NULL to clear an
580n/a existing async exception. This raises no exceptions. */
581n/a
582n/aint
583n/aPyThreadState_SetAsyncExc(long id, PyObject *exc) {
584n/a PyInterpreterState *interp = GET_INTERP_STATE();
585n/a PyThreadState *p;
586n/a
587n/a /* Although the GIL is held, a few C API functions can be called
588n/a * without the GIL held, and in particular some that create and
589n/a * destroy thread and interpreter states. Those can mutate the
590n/a * list of thread states we're traversing, so to prevent that we lock
591n/a * head_mutex for the duration.
592n/a */
593n/a HEAD_LOCK();
594n/a for (p = interp->tstate_head; p != NULL; p = p->next) {
595n/a if (p->thread_id == id) {
596n/a /* Tricky: we need to decref the current value
597n/a * (if any) in p->async_exc, but that can in turn
598n/a * allow arbitrary Python code to run, including
599n/a * perhaps calls to this function. To prevent
600n/a * deadlock, we need to release head_mutex before
601n/a * the decref.
602n/a */
603n/a PyObject *old_exc = p->async_exc;
604n/a Py_XINCREF(exc);
605n/a p->async_exc = exc;
606n/a HEAD_UNLOCK();
607n/a Py_XDECREF(old_exc);
608n/a _PyEval_SignalAsyncExc();
609n/a return 1;
610n/a }
611n/a }
612n/a HEAD_UNLOCK();
613n/a return 0;
614n/a}
615n/a
616n/a
617n/a/* Routines for advanced debuggers, requested by David Beazley.
618n/a Don't use unless you know what you are doing! */
619n/a
620n/aPyInterpreterState *
621n/aPyInterpreterState_Head(void)
622n/a{
623n/a return interp_head;
624n/a}
625n/a
626n/aPyInterpreterState *
627n/aPyInterpreterState_Next(PyInterpreterState *interp) {
628n/a return interp->next;
629n/a}
630n/a
631n/aPyThreadState *
632n/aPyInterpreterState_ThreadHead(PyInterpreterState *interp) {
633n/a return interp->tstate_head;
634n/a}
635n/a
636n/aPyThreadState *
637n/aPyThreadState_Next(PyThreadState *tstate) {
638n/a return tstate->next;
639n/a}
640n/a
641n/a/* The implementation of sys._current_frames(). This is intended to be
642n/a called with the GIL held, as it will be when called via
643n/a sys._current_frames(). It's possible it would work fine even without
644n/a the GIL held, but haven't thought enough about that.
645n/a*/
646n/aPyObject *
647n/a_PyThread_CurrentFrames(void)
648n/a{
649n/a PyObject *result;
650n/a PyInterpreterState *i;
651n/a
652n/a result = PyDict_New();
653n/a if (result == NULL)
654n/a return NULL;
655n/a
656n/a /* for i in all interpreters:
657n/a * for t in all of i's thread states:
658n/a * if t's frame isn't NULL, map t's id to its frame
659n/a * Because these lists can mutate even when the GIL is held, we
660n/a * need to grab head_mutex for the duration.
661n/a */
662n/a HEAD_LOCK();
663n/a for (i = interp_head; i != NULL; i = i->next) {
664n/a PyThreadState *t;
665n/a for (t = i->tstate_head; t != NULL; t = t->next) {
666n/a PyObject *id;
667n/a int stat;
668n/a struct _frame *frame = t->frame;
669n/a if (frame == NULL)
670n/a continue;
671n/a id = PyLong_FromLong(t->thread_id);
672n/a if (id == NULL)
673n/a goto Fail;
674n/a stat = PyDict_SetItem(result, id, (PyObject *)frame);
675n/a Py_DECREF(id);
676n/a if (stat < 0)
677n/a goto Fail;
678n/a }
679n/a }
680n/a HEAD_UNLOCK();
681n/a return result;
682n/a
683n/a Fail:
684n/a HEAD_UNLOCK();
685n/a Py_DECREF(result);
686n/a return NULL;
687n/a}
688n/a
689n/a/* Python "auto thread state" API. */
690n/a#ifdef WITH_THREAD
691n/a
692n/a/* Keep this as a static, as it is not reliable! It can only
693n/a ever be compared to the state for the *current* thread.
694n/a * If not equal, then it doesn't matter that the actual
695n/a value may change immediately after comparison, as it can't
696n/a possibly change to the current thread's state.
697n/a * If equal, then the current thread holds the lock, so the value can't
698n/a change until we yield the lock.
699n/a*/
700n/astatic int
701n/aPyThreadState_IsCurrent(PyThreadState *tstate)
702n/a{
703n/a /* Must be the tstate for this thread */
704n/a assert(PyGILState_GetThisThreadState()==tstate);
705n/a return tstate == GET_TSTATE();
706n/a}
707n/a
708n/a/* Internal initialization/finalization functions called by
709n/a Py_Initialize/Py_FinalizeEx
710n/a*/
711n/avoid
712n/a_PyGILState_Init(PyInterpreterState *i, PyThreadState *t)
713n/a{
714n/a assert(i && t); /* must init with valid states */
715n/a autoTLSkey = PyThread_create_key();
716n/a if (autoTLSkey == -1)
717n/a Py_FatalError("Could not allocate TLS entry");
718n/a autoInterpreterState = i;
719n/a assert(PyThread_get_key_value(autoTLSkey) == NULL);
720n/a assert(t->gilstate_counter == 0);
721n/a
722n/a _PyGILState_NoteThreadState(t);
723n/a}
724n/a
725n/aPyInterpreterState *
726n/a_PyGILState_GetInterpreterStateUnsafe(void)
727n/a{
728n/a return autoInterpreterState;
729n/a}
730n/a
731n/avoid
732n/a_PyGILState_Fini(void)
733n/a{
734n/a PyThread_delete_key(autoTLSkey);
735n/a autoTLSkey = -1;
736n/a autoInterpreterState = NULL;
737n/a}
738n/a
739n/a/* Reset the TLS key - called by PyOS_AfterFork().
740n/a * This should not be necessary, but some - buggy - pthread implementations
741n/a * don't reset TLS upon fork(), see issue #10517.
742n/a */
743n/avoid
744n/a_PyGILState_Reinit(void)
745n/a{
746n/a PyThreadState *tstate = PyGILState_GetThisThreadState();
747n/a PyThread_delete_key(autoTLSkey);
748n/a if ((autoTLSkey = PyThread_create_key()) == -1)
749n/a Py_FatalError("Could not allocate TLS entry");
750n/a
751n/a /* If the thread had an associated auto thread state, reassociate it with
752n/a * the new key. */
753n/a if (tstate && PyThread_set_key_value(autoTLSkey, (void *)tstate) < 0)
754n/a Py_FatalError("Couldn't create autoTLSkey mapping");
755n/a}
756n/a
757n/a/* When a thread state is created for a thread by some mechanism other than
758n/a PyGILState_Ensure, it's important that the GILState machinery knows about
759n/a it so it doesn't try to create another thread state for the thread (this is
760n/a a better fix for SF bug #1010677 than the first one attempted).
761n/a*/
762n/astatic void
763n/a_PyGILState_NoteThreadState(PyThreadState* tstate)
764n/a{
765n/a /* If autoTLSkey isn't initialized, this must be the very first
766n/a threadstate created in Py_Initialize(). Don't do anything for now
767n/a (we'll be back here when _PyGILState_Init is called). */
768n/a if (!autoInterpreterState)
769n/a return;
770n/a
771n/a /* Stick the thread state for this thread in thread local storage.
772n/a
773n/a The only situation where you can legitimately have more than one
774n/a thread state for an OS level thread is when there are multiple
775n/a interpreters.
776n/a
777n/a You shouldn't really be using the PyGILState_ APIs anyway (see issues
778n/a #10915 and #15751).
779n/a
780n/a The first thread state created for that given OS level thread will
781n/a "win", which seems reasonable behaviour.
782n/a */
783n/a if (PyThread_get_key_value(autoTLSkey) == NULL) {
784n/a if (PyThread_set_key_value(autoTLSkey, (void *)tstate) < 0)
785n/a Py_FatalError("Couldn't create autoTLSkey mapping");
786n/a }
787n/a
788n/a /* PyGILState_Release must not try to delete this thread state. */
789n/a tstate->gilstate_counter = 1;
790n/a}
791n/a
792n/a/* The public functions */
793n/aPyThreadState *
794n/aPyGILState_GetThisThreadState(void)
795n/a{
796n/a if (autoInterpreterState == NULL)
797n/a return NULL;
798n/a return (PyThreadState *)PyThread_get_key_value(autoTLSkey);
799n/a}
800n/a
801n/aint
802n/aPyGILState_Check(void)
803n/a{
804n/a PyThreadState *tstate;
805n/a
806n/a if (!_PyGILState_check_enabled)
807n/a return 1;
808n/a
809n/a if (autoTLSkey == -1)
810n/a return 1;
811n/a
812n/a tstate = GET_TSTATE();
813n/a if (tstate == NULL)
814n/a return 0;
815n/a
816n/a return (tstate == PyGILState_GetThisThreadState());
817n/a}
818n/a
819n/aPyGILState_STATE
820n/aPyGILState_Ensure(void)
821n/a{
822n/a int current;
823n/a PyThreadState *tcur;
824n/a /* Note that we do not auto-init Python here - apart from
825n/a potential races with 2 threads auto-initializing, pep-311
826n/a spells out other issues. Embedders are expected to have
827n/a called Py_Initialize() and usually PyEval_InitThreads().
828n/a */
829n/a assert(autoInterpreterState); /* Py_Initialize() hasn't been called! */
830n/a tcur = (PyThreadState *)PyThread_get_key_value(autoTLSkey);
831n/a if (tcur == NULL) {
832n/a /* At startup, Python has no concrete GIL. If PyGILState_Ensure() is
833n/a called from a new thread for the first time, we need the create the
834n/a GIL. */
835n/a PyEval_InitThreads();
836n/a
837n/a /* Create a new thread state for this thread */
838n/a tcur = PyThreadState_New(autoInterpreterState);
839n/a if (tcur == NULL)
840n/a Py_FatalError("Couldn't create thread-state for new thread");
841n/a /* This is our thread state! We'll need to delete it in the
842n/a matching call to PyGILState_Release(). */
843n/a tcur->gilstate_counter = 0;
844n/a current = 0; /* new thread state is never current */
845n/a }
846n/a else
847n/a current = PyThreadState_IsCurrent(tcur);
848n/a if (current == 0)
849n/a PyEval_RestoreThread(tcur);
850n/a /* Update our counter in the thread-state - no need for locks:
851n/a - tcur will remain valid as we hold the GIL.
852n/a - the counter is safe as we are the only thread "allowed"
853n/a to modify this value
854n/a */
855n/a ++tcur->gilstate_counter;
856n/a return current ? PyGILState_LOCKED : PyGILState_UNLOCKED;
857n/a}
858n/a
859n/avoid
860n/aPyGILState_Release(PyGILState_STATE oldstate)
861n/a{
862n/a PyThreadState *tcur = (PyThreadState *)PyThread_get_key_value(
863n/a autoTLSkey);
864n/a if (tcur == NULL)
865n/a Py_FatalError("auto-releasing thread-state, "
866n/a "but no thread-state for this thread");
867n/a /* We must hold the GIL and have our thread state current */
868n/a /* XXX - remove the check - the assert should be fine,
869n/a but while this is very new (April 2003), the extra check
870n/a by release-only users can't hurt.
871n/a */
872n/a if (! PyThreadState_IsCurrent(tcur))
873n/a Py_FatalError("This thread state must be current when releasing");
874n/a assert(PyThreadState_IsCurrent(tcur));
875n/a --tcur->gilstate_counter;
876n/a assert(tcur->gilstate_counter >= 0); /* illegal counter value */
877n/a
878n/a /* If we're going to destroy this thread-state, we must
879n/a * clear it while the GIL is held, as destructors may run.
880n/a */
881n/a if (tcur->gilstate_counter == 0) {
882n/a /* can't have been locked when we created it */
883n/a assert(oldstate == PyGILState_UNLOCKED);
884n/a PyThreadState_Clear(tcur);
885n/a /* Delete the thread-state. Note this releases the GIL too!
886n/a * It's vital that the GIL be held here, to avoid shutdown
887n/a * races; see bugs 225673 and 1061968 (that nasty bug has a
888n/a * habit of coming back).
889n/a */
890n/a PyThreadState_DeleteCurrent();
891n/a }
892n/a /* Release the lock if necessary */
893n/a else if (oldstate == PyGILState_UNLOCKED)
894n/a PyEval_SaveThread();
895n/a}
896n/a
897n/a#endif /* WITH_THREAD */
898n/a
899n/a#ifdef __cplusplus
900n/a}
901n/a#endif
902n/a
903n/a