ยปCore Development>Code coverage>Modules/_ctypes/libffi/src/ia64/ffi.c

Python code coverage for Modules/_ctypes/libffi/src/ia64/ffi.c

#countcontent
1n/a/* -----------------------------------------------------------------------
2n/a ffi.c - Copyright (c) 1998, 2007, 2008, 2012 Red Hat, Inc.
3n/a Copyright (c) 2000 Hewlett Packard Company
4n/a Copyright (c) 2011 Anthony Green
5n/a
6n/a IA64 Foreign Function Interface
7n/a
8n/a Permission is hereby granted, free of charge, to any person obtaining
9n/a a copy of this software and associated documentation files (the
10n/a ``Software''), to deal in the Software without restriction, including
11n/a without limitation the rights to use, copy, modify, merge, publish,
12n/a distribute, sublicense, and/or sell copies of the Software, and to
13n/a permit persons to whom the Software is furnished to do so, subject to
14n/a the following conditions:
15n/a
16n/a The above copyright notice and this permission notice shall be included
17n/a in all copies or substantial portions of the Software.
18n/a
19n/a THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
20n/a EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21n/a MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
22n/a NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
23n/a HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
24n/a WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25n/a OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26n/a DEALINGS IN THE SOFTWARE.
27n/a ----------------------------------------------------------------------- */
28n/a
29n/a#include <ffi.h>
30n/a#include <ffi_common.h>
31n/a
32n/a#include <stdlib.h>
33n/a#include <stdbool.h>
34n/a#include <float.h>
35n/a
36n/a#include "ia64_flags.h"
37n/a
38n/a/* A 64-bit pointer value. In LP64 mode, this is effectively a plain
39n/a pointer. In ILP32 mode, it's a pointer that's been extended to
40n/a 64 bits by "addp4". */
41n/atypedef void *PTR64 __attribute__((mode(DI)));
42n/a
43n/a/* Memory image of fp register contents. This is the implementation
44n/a specific format used by ldf.fill/stf.spill. All we care about is
45n/a that it wants a 16 byte aligned slot. */
46n/atypedef struct
47n/a{
48n/a UINT64 x[2] __attribute__((aligned(16)));
49n/a} fpreg;
50n/a
51n/a
52n/a/* The stack layout given to ffi_call_unix and ffi_closure_unix_inner. */
53n/a
54n/astruct ia64_args
55n/a{
56n/a fpreg fp_regs[8]; /* Contents of 8 fp arg registers. */
57n/a UINT64 gp_regs[8]; /* Contents of 8 gp arg registers. */
58n/a UINT64 other_args[]; /* Arguments passed on stack, variable size. */
59n/a};
60n/a
61n/a
62n/a/* Adjust ADDR, a pointer to an 8 byte slot, to point to the low LEN bytes. */
63n/a
64n/astatic inline void *
65n/aendian_adjust (void *addr, size_t len)
66n/a{
67n/a#ifdef __BIG_ENDIAN__
68n/a return addr + (8 - len);
69n/a#else
70n/a return addr;
71n/a#endif
72n/a}
73n/a
74n/a/* Store VALUE to ADDR in the current cpu implementation's fp spill format.
75n/a This is a macro instead of a function, so that it works for all 3 floating
76n/a point types without type conversions. Type conversion to long double breaks
77n/a the denorm support. */
78n/a
79n/a#define stf_spill(addr, value) \
80n/a asm ("stf.spill %0 = %1%P0" : "=m" (*addr) : "f"(value));
81n/a
82n/a/* Load a value from ADDR, which is in the current cpu implementation's
83n/a fp spill format. As above, this must also be a macro. */
84n/a
85n/a#define ldf_fill(result, addr) \
86n/a asm ("ldf.fill %0 = %1%P1" : "=f"(result) : "m"(*addr));
87n/a
88n/a/* Return the size of the C type associated with with TYPE. Which will
89n/a be one of the FFI_IA64_TYPE_HFA_* values. */
90n/a
91n/astatic size_t
92n/ahfa_type_size (int type)
93n/a{
94n/a switch (type)
95n/a {
96n/a case FFI_IA64_TYPE_HFA_FLOAT:
97n/a return sizeof(float);
98n/a case FFI_IA64_TYPE_HFA_DOUBLE:
99n/a return sizeof(double);
100n/a case FFI_IA64_TYPE_HFA_LDOUBLE:
101n/a return sizeof(__float80);
102n/a default:
103n/a abort ();
104n/a }
105n/a}
106n/a
107n/a/* Load from ADDR a value indicated by TYPE. Which will be one of
108n/a the FFI_IA64_TYPE_HFA_* values. */
109n/a
110n/astatic void
111n/ahfa_type_load (fpreg *fpaddr, int type, void *addr)
112n/a{
113n/a switch (type)
114n/a {
115n/a case FFI_IA64_TYPE_HFA_FLOAT:
116n/a stf_spill (fpaddr, *(float *) addr);
117n/a return;
118n/a case FFI_IA64_TYPE_HFA_DOUBLE:
119n/a stf_spill (fpaddr, *(double *) addr);
120n/a return;
121n/a case FFI_IA64_TYPE_HFA_LDOUBLE:
122n/a stf_spill (fpaddr, *(__float80 *) addr);
123n/a return;
124n/a default:
125n/a abort ();
126n/a }
127n/a}
128n/a
129n/a/* Load VALUE into ADDR as indicated by TYPE. Which will be one of
130n/a the FFI_IA64_TYPE_HFA_* values. */
131n/a
132n/astatic void
133n/ahfa_type_store (int type, void *addr, fpreg *fpaddr)
134n/a{
135n/a switch (type)
136n/a {
137n/a case FFI_IA64_TYPE_HFA_FLOAT:
138n/a {
139n/a float result;
140n/a ldf_fill (result, fpaddr);
141n/a *(float *) addr = result;
142n/a break;
143n/a }
144n/a case FFI_IA64_TYPE_HFA_DOUBLE:
145n/a {
146n/a double result;
147n/a ldf_fill (result, fpaddr);
148n/a *(double *) addr = result;
149n/a break;
150n/a }
151n/a case FFI_IA64_TYPE_HFA_LDOUBLE:
152n/a {
153n/a __float80 result;
154n/a ldf_fill (result, fpaddr);
155n/a *(__float80 *) addr = result;
156n/a break;
157n/a }
158n/a default:
159n/a abort ();
160n/a }
161n/a}
162n/a
163n/a/* Is TYPE a struct containing floats, doubles, or extended doubles,
164n/a all of the same fp type? If so, return the element type. Return
165n/a FFI_TYPE_VOID if not. */
166n/a
167n/astatic int
168n/ahfa_element_type (ffi_type *type, int nested)
169n/a{
170n/a int element = FFI_TYPE_VOID;
171n/a
172n/a switch (type->type)
173n/a {
174n/a case FFI_TYPE_FLOAT:
175n/a /* We want to return VOID for raw floating-point types, but the
176n/a synthetic HFA type if we're nested within an aggregate. */
177n/a if (nested)
178n/a element = FFI_IA64_TYPE_HFA_FLOAT;
179n/a break;
180n/a
181n/a case FFI_TYPE_DOUBLE:
182n/a /* Similarly. */
183n/a if (nested)
184n/a element = FFI_IA64_TYPE_HFA_DOUBLE;
185n/a break;
186n/a
187n/a case FFI_TYPE_LONGDOUBLE:
188n/a /* Similarly, except that that HFA is true for double extended,
189n/a but not quad precision. Both have sizeof == 16, so tell the
190n/a difference based on the precision. */
191n/a if (LDBL_MANT_DIG == 64 && nested)
192n/a element = FFI_IA64_TYPE_HFA_LDOUBLE;
193n/a break;
194n/a
195n/a case FFI_TYPE_STRUCT:
196n/a {
197n/a ffi_type **ptr = &type->elements[0];
198n/a
199n/a for (ptr = &type->elements[0]; *ptr ; ptr++)
200n/a {
201n/a int sub_element = hfa_element_type (*ptr, 1);
202n/a if (sub_element == FFI_TYPE_VOID)
203n/a return FFI_TYPE_VOID;
204n/a
205n/a if (element == FFI_TYPE_VOID)
206n/a element = sub_element;
207n/a else if (element != sub_element)
208n/a return FFI_TYPE_VOID;
209n/a }
210n/a }
211n/a break;
212n/a
213n/a default:
214n/a return FFI_TYPE_VOID;
215n/a }
216n/a
217n/a return element;
218n/a}
219n/a
220n/a
221n/a/* Perform machine dependent cif processing. */
222n/a
223n/affi_status
224n/affi_prep_cif_machdep(ffi_cif *cif)
225n/a{
226n/a int flags;
227n/a
228n/a /* Adjust cif->bytes to include space for the bits of the ia64_args frame
229n/a that precedes the integer register portion. The estimate that the
230n/a generic bits did for the argument space required is good enough for the
231n/a integer component. */
232n/a cif->bytes += offsetof(struct ia64_args, gp_regs[0]);
233n/a if (cif->bytes < sizeof(struct ia64_args))
234n/a cif->bytes = sizeof(struct ia64_args);
235n/a
236n/a /* Set the return type flag. */
237n/a flags = cif->rtype->type;
238n/a switch (cif->rtype->type)
239n/a {
240n/a case FFI_TYPE_LONGDOUBLE:
241n/a /* Leave FFI_TYPE_LONGDOUBLE as meaning double extended precision,
242n/a and encode quad precision as a two-word integer structure. */
243n/a if (LDBL_MANT_DIG != 64)
244n/a flags = FFI_IA64_TYPE_SMALL_STRUCT | (16 << 8);
245n/a break;
246n/a
247n/a case FFI_TYPE_STRUCT:
248n/a {
249n/a size_t size = cif->rtype->size;
250n/a int hfa_type = hfa_element_type (cif->rtype, 0);
251n/a
252n/a if (hfa_type != FFI_TYPE_VOID)
253n/a {
254n/a size_t nelts = size / hfa_type_size (hfa_type);
255n/a if (nelts <= 8)
256n/a flags = hfa_type | (size << 8);
257n/a }
258n/a else
259n/a {
260n/a if (size <= 32)
261n/a flags = FFI_IA64_TYPE_SMALL_STRUCT | (size << 8);
262n/a }
263n/a }
264n/a break;
265n/a
266n/a default:
267n/a break;
268n/a }
269n/a cif->flags = flags;
270n/a
271n/a return FFI_OK;
272n/a}
273n/a
274n/aextern int ffi_call_unix (struct ia64_args *, PTR64, void (*)(void), UINT64);
275n/a
276n/avoid
277n/affi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
278n/a{
279n/a struct ia64_args *stack;
280n/a long i, avn, gpcount, fpcount;
281n/a ffi_type **p_arg;
282n/a
283n/a FFI_ASSERT (cif->abi == FFI_UNIX);
284n/a
285n/a /* If we have no spot for a return value, make one. */
286n/a if (rvalue == NULL && cif->rtype->type != FFI_TYPE_VOID)
287n/a rvalue = alloca (cif->rtype->size);
288n/a
289n/a /* Allocate the stack frame. */
290n/a stack = alloca (cif->bytes);
291n/a
292n/a gpcount = fpcount = 0;
293n/a avn = cif->nargs;
294n/a for (i = 0, p_arg = cif->arg_types; i < avn; i++, p_arg++)
295n/a {
296n/a switch ((*p_arg)->type)
297n/a {
298n/a case FFI_TYPE_SINT8:
299n/a stack->gp_regs[gpcount++] = *(SINT8 *)avalue[i];
300n/a break;
301n/a case FFI_TYPE_UINT8:
302n/a stack->gp_regs[gpcount++] = *(UINT8 *)avalue[i];
303n/a break;
304n/a case FFI_TYPE_SINT16:
305n/a stack->gp_regs[gpcount++] = *(SINT16 *)avalue[i];
306n/a break;
307n/a case FFI_TYPE_UINT16:
308n/a stack->gp_regs[gpcount++] = *(UINT16 *)avalue[i];
309n/a break;
310n/a case FFI_TYPE_SINT32:
311n/a stack->gp_regs[gpcount++] = *(SINT32 *)avalue[i];
312n/a break;
313n/a case FFI_TYPE_UINT32:
314n/a stack->gp_regs[gpcount++] = *(UINT32 *)avalue[i];
315n/a break;
316n/a case FFI_TYPE_SINT64:
317n/a case FFI_TYPE_UINT64:
318n/a stack->gp_regs[gpcount++] = *(UINT64 *)avalue[i];
319n/a break;
320n/a
321n/a case FFI_TYPE_POINTER:
322n/a stack->gp_regs[gpcount++] = (UINT64)(PTR64) *(void **)avalue[i];
323n/a break;
324n/a
325n/a case FFI_TYPE_FLOAT:
326n/a if (gpcount < 8 && fpcount < 8)
327n/a stf_spill (&stack->fp_regs[fpcount++], *(float *)avalue[i]);
328n/a {
329n/a UINT32 tmp;
330n/a memcpy (&tmp, avalue[i], sizeof (UINT32));
331n/a stack->gp_regs[gpcount++] = tmp;
332n/a }
333n/a break;
334n/a
335n/a case FFI_TYPE_DOUBLE:
336n/a if (gpcount < 8 && fpcount < 8)
337n/a stf_spill (&stack->fp_regs[fpcount++], *(double *)avalue[i]);
338n/a memcpy (&stack->gp_regs[gpcount++], avalue[i], sizeof (UINT64));
339n/a break;
340n/a
341n/a case FFI_TYPE_LONGDOUBLE:
342n/a if (gpcount & 1)
343n/a gpcount++;
344n/a if (LDBL_MANT_DIG == 64 && gpcount < 8 && fpcount < 8)
345n/a stf_spill (&stack->fp_regs[fpcount++], *(__float80 *)avalue[i]);
346n/a memcpy (&stack->gp_regs[gpcount], avalue[i], 16);
347n/a gpcount += 2;
348n/a break;
349n/a
350n/a case FFI_TYPE_STRUCT:
351n/a {
352n/a size_t size = (*p_arg)->size;
353n/a size_t align = (*p_arg)->alignment;
354n/a int hfa_type = hfa_element_type (*p_arg, 0);
355n/a
356n/a FFI_ASSERT (align <= 16);
357n/a if (align == 16 && (gpcount & 1))
358n/a gpcount++;
359n/a
360n/a if (hfa_type != FFI_TYPE_VOID)
361n/a {
362n/a size_t hfa_size = hfa_type_size (hfa_type);
363n/a size_t offset = 0;
364n/a size_t gp_offset = gpcount * 8;
365n/a
366n/a while (fpcount < 8
367n/a && offset < size
368n/a && gp_offset < 8 * 8)
369n/a {
370n/a hfa_type_load (&stack->fp_regs[fpcount], hfa_type,
371n/a avalue[i] + offset);
372n/a offset += hfa_size;
373n/a gp_offset += hfa_size;
374n/a fpcount += 1;
375n/a }
376n/a }
377n/a
378n/a memcpy (&stack->gp_regs[gpcount], avalue[i], size);
379n/a gpcount += (size + 7) / 8;
380n/a }
381n/a break;
382n/a
383n/a default:
384n/a abort ();
385n/a }
386n/a }
387n/a
388n/a ffi_call_unix (stack, rvalue, fn, cif->flags);
389n/a}
390n/a
391n/a/* Closures represent a pair consisting of a function pointer, and
392n/a some user data. A closure is invoked by reinterpreting the closure
393n/a as a function pointer, and branching to it. Thus we can make an
394n/a interpreted function callable as a C function: We turn the
395n/a interpreter itself, together with a pointer specifying the
396n/a interpreted procedure, into a closure.
397n/a
398n/a For IA64, function pointer are already pairs consisting of a code
399n/a pointer, and a gp pointer. The latter is needed to access global
400n/a variables. Here we set up such a pair as the first two words of
401n/a the closure (in the "trampoline" area), but we replace the gp
402n/a pointer with a pointer to the closure itself. We also add the real
403n/a gp pointer to the closure. This allows the function entry code to
404n/a both retrieve the user data, and to restore the correct gp pointer. */
405n/a
406n/aextern void ffi_closure_unix ();
407n/a
408n/affi_status
409n/affi_prep_closure_loc (ffi_closure* closure,
410n/a ffi_cif* cif,
411n/a void (*fun)(ffi_cif*,void*,void**,void*),
412n/a void *user_data,
413n/a void *codeloc)
414n/a{
415n/a /* The layout of a function descriptor. A C function pointer really
416n/a points to one of these. */
417n/a struct ia64_fd
418n/a {
419n/a UINT64 code_pointer;
420n/a UINT64 gp;
421n/a };
422n/a
423n/a struct ffi_ia64_trampoline_struct
424n/a {
425n/a UINT64 code_pointer; /* Pointer to ffi_closure_unix. */
426n/a UINT64 fake_gp; /* Pointer to closure, installed as gp. */
427n/a UINT64 real_gp; /* Real gp value. */
428n/a };
429n/a
430n/a struct ffi_ia64_trampoline_struct *tramp;
431n/a struct ia64_fd *fd;
432n/a
433n/a if (cif->abi != FFI_UNIX)
434n/a return FFI_BAD_ABI;
435n/a
436n/a tramp = (struct ffi_ia64_trampoline_struct *)closure->tramp;
437n/a fd = (struct ia64_fd *)(void *)ffi_closure_unix;
438n/a
439n/a tramp->code_pointer = fd->code_pointer;
440n/a tramp->real_gp = fd->gp;
441n/a tramp->fake_gp = (UINT64)(PTR64)codeloc;
442n/a closure->cif = cif;
443n/a closure->user_data = user_data;
444n/a closure->fun = fun;
445n/a
446n/a return FFI_OK;
447n/a}
448n/a
449n/a
450n/aUINT64
451n/affi_closure_unix_inner (ffi_closure *closure, struct ia64_args *stack,
452n/a void *rvalue, void *r8)
453n/a{
454n/a ffi_cif *cif;
455n/a void **avalue;
456n/a ffi_type **p_arg;
457n/a long i, avn, gpcount, fpcount;
458n/a
459n/a cif = closure->cif;
460n/a avn = cif->nargs;
461n/a avalue = alloca (avn * sizeof (void *));
462n/a
463n/a /* If the structure return value is passed in memory get that location
464n/a from r8 so as to pass the value directly back to the caller. */
465n/a if (cif->flags == FFI_TYPE_STRUCT)
466n/a rvalue = r8;
467n/a
468n/a gpcount = fpcount = 0;
469n/a for (i = 0, p_arg = cif->arg_types; i < avn; i++, p_arg++)
470n/a {
471n/a switch ((*p_arg)->type)
472n/a {
473n/a case FFI_TYPE_SINT8:
474n/a case FFI_TYPE_UINT8:
475n/a avalue[i] = endian_adjust(&stack->gp_regs[gpcount++], 1);
476n/a break;
477n/a case FFI_TYPE_SINT16:
478n/a case FFI_TYPE_UINT16:
479n/a avalue[i] = endian_adjust(&stack->gp_regs[gpcount++], 2);
480n/a break;
481n/a case FFI_TYPE_SINT32:
482n/a case FFI_TYPE_UINT32:
483n/a avalue[i] = endian_adjust(&stack->gp_regs[gpcount++], 4);
484n/a break;
485n/a case FFI_TYPE_SINT64:
486n/a case FFI_TYPE_UINT64:
487n/a avalue[i] = &stack->gp_regs[gpcount++];
488n/a break;
489n/a case FFI_TYPE_POINTER:
490n/a avalue[i] = endian_adjust(&stack->gp_regs[gpcount++], sizeof(void*));
491n/a break;
492n/a
493n/a case FFI_TYPE_FLOAT:
494n/a if (gpcount < 8 && fpcount < 8)
495n/a {
496n/a fpreg *addr = &stack->fp_regs[fpcount++];
497n/a float result;
498n/a avalue[i] = addr;
499n/a ldf_fill (result, addr);
500n/a *(float *)addr = result;
501n/a }
502n/a else
503n/a avalue[i] = endian_adjust(&stack->gp_regs[gpcount], 4);
504n/a gpcount++;
505n/a break;
506n/a
507n/a case FFI_TYPE_DOUBLE:
508n/a if (gpcount < 8 && fpcount < 8)
509n/a {
510n/a fpreg *addr = &stack->fp_regs[fpcount++];
511n/a double result;
512n/a avalue[i] = addr;
513n/a ldf_fill (result, addr);
514n/a *(double *)addr = result;
515n/a }
516n/a else
517n/a avalue[i] = &stack->gp_regs[gpcount];
518n/a gpcount++;
519n/a break;
520n/a
521n/a case FFI_TYPE_LONGDOUBLE:
522n/a if (gpcount & 1)
523n/a gpcount++;
524n/a if (LDBL_MANT_DIG == 64 && gpcount < 8 && fpcount < 8)
525n/a {
526n/a fpreg *addr = &stack->fp_regs[fpcount++];
527n/a __float80 result;
528n/a avalue[i] = addr;
529n/a ldf_fill (result, addr);
530n/a *(__float80 *)addr = result;
531n/a }
532n/a else
533n/a avalue[i] = &stack->gp_regs[gpcount];
534n/a gpcount += 2;
535n/a break;
536n/a
537n/a case FFI_TYPE_STRUCT:
538n/a {
539n/a size_t size = (*p_arg)->size;
540n/a size_t align = (*p_arg)->alignment;
541n/a int hfa_type = hfa_element_type (*p_arg, 0);
542n/a
543n/a FFI_ASSERT (align <= 16);
544n/a if (align == 16 && (gpcount & 1))
545n/a gpcount++;
546n/a
547n/a if (hfa_type != FFI_TYPE_VOID)
548n/a {
549n/a size_t hfa_size = hfa_type_size (hfa_type);
550n/a size_t offset = 0;
551n/a size_t gp_offset = gpcount * 8;
552n/a void *addr = alloca (size);
553n/a
554n/a avalue[i] = addr;
555n/a
556n/a while (fpcount < 8
557n/a && offset < size
558n/a && gp_offset < 8 * 8)
559n/a {
560n/a hfa_type_store (hfa_type, addr + offset,
561n/a &stack->fp_regs[fpcount]);
562n/a offset += hfa_size;
563n/a gp_offset += hfa_size;
564n/a fpcount += 1;
565n/a }
566n/a
567n/a if (offset < size)
568n/a memcpy (addr + offset, (char *)stack->gp_regs + gp_offset,
569n/a size - offset);
570n/a }
571n/a else
572n/a avalue[i] = &stack->gp_regs[gpcount];
573n/a
574n/a gpcount += (size + 7) / 8;
575n/a }
576n/a break;
577n/a
578n/a default:
579n/a abort ();
580n/a }
581n/a }
582n/a
583n/a closure->fun (cif, rvalue, avalue, closure->user_data);
584n/a
585n/a return cif->flags;
586n/a}