ยปCore Development>Code coverage>Modules/_math.c

Python code coverage for Modules/_math.c

#countcontent
1n/a/* Definitions of some C99 math library functions, for those platforms
2n/a that don't implement these functions already. */
3n/a
4n/a#include "Python.h"
5n/a#include <float.h>
6n/a#include "_math.h"
7n/a
8n/a/* The following copyright notice applies to the original
9n/a implementations of acosh, asinh and atanh. */
10n/a
11n/a/*
12n/a * ====================================================
13n/a * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
14n/a *
15n/a * Developed at SunPro, a Sun Microsystems, Inc. business.
16n/a * Permission to use, copy, modify, and distribute this
17n/a * software is freely granted, provided that this notice
18n/a * is preserved.
19n/a * ====================================================
20n/a */
21n/a
22n/a#if !defined(HAVE_ACOSH) || !defined(HAVE_ASINH)
23n/astatic const double ln2 = 6.93147180559945286227E-01;
24n/astatic const double two_pow_p28 = 268435456.0; /* 2**28 */
25n/a#endif
26n/a#if !defined(HAVE_ASINH) || !defined(HAVE_ATANH)
27n/astatic const double two_pow_m28 = 3.7252902984619141E-09; /* 2**-28 */
28n/a#endif
29n/a#if !defined(HAVE_ATANH) && !defined(Py_NAN)
30n/astatic const double zero = 0.0;
31n/a#endif
32n/a
33n/a
34n/a#ifndef HAVE_ACOSH
35n/a/* acosh(x)
36n/a * Method :
37n/a * Based on
38n/a * acosh(x) = log [ x + sqrt(x*x-1) ]
39n/a * we have
40n/a * acosh(x) := log(x)+ln2, if x is large; else
41n/a * acosh(x) := log(2x-1/(sqrt(x*x-1)+x)) if x>2; else
42n/a * acosh(x) := log1p(t+sqrt(2.0*t+t*t)); where t=x-1.
43n/a *
44n/a * Special cases:
45n/a * acosh(x) is NaN with signal if x<1.
46n/a * acosh(NaN) is NaN without signal.
47n/a */
48n/a
49n/adouble
50n/a_Py_acosh(double x)
51n/a{
52n/a if (Py_IS_NAN(x)) {
53n/a return x+x;
54n/a }
55n/a if (x < 1.) { /* x < 1; return a signaling NaN */
56n/a errno = EDOM;
57n/a#ifdef Py_NAN
58n/a return Py_NAN;
59n/a#else
60n/a return (x-x)/(x-x);
61n/a#endif
62n/a }
63n/a else if (x >= two_pow_p28) { /* x > 2**28 */
64n/a if (Py_IS_INFINITY(x)) {
65n/a return x+x;
66n/a }
67n/a else {
68n/a return log(x) + ln2; /* acosh(huge)=log(2x) */
69n/a }
70n/a }
71n/a else if (x == 1.) {
72n/a return 0.0; /* acosh(1) = 0 */
73n/a }
74n/a else if (x > 2.) { /* 2 < x < 2**28 */
75n/a double t = x * x;
76n/a return log(2.0 * x - 1.0 / (x + sqrt(t - 1.0)));
77n/a }
78n/a else { /* 1 < x <= 2 */
79n/a double t = x - 1.0;
80n/a return m_log1p(t + sqrt(2.0 * t + t * t));
81n/a }
82n/a}
83n/a#endif /* HAVE_ACOSH */
84n/a
85n/a
86n/a#ifndef HAVE_ASINH
87n/a/* asinh(x)
88n/a * Method :
89n/a * Based on
90n/a * asinh(x) = sign(x) * log [ |x| + sqrt(x*x+1) ]
91n/a * we have
92n/a * asinh(x) := x if 1+x*x=1,
93n/a * := sign(x)*(log(x)+ln2)) for large |x|, else
94n/a * := sign(x)*log(2|x|+1/(|x|+sqrt(x*x+1))) if|x|>2, else
95n/a * := sign(x)*log1p(|x| + x^2/(1 + sqrt(1+x^2)))
96n/a */
97n/a
98n/adouble
99n/a_Py_asinh(double x)
100n/a{
101n/a double w;
102n/a double absx = fabs(x);
103n/a
104n/a if (Py_IS_NAN(x) || Py_IS_INFINITY(x)) {
105n/a return x+x;
106n/a }
107n/a if (absx < two_pow_m28) { /* |x| < 2**-28 */
108n/a return x; /* return x inexact except 0 */
109n/a }
110n/a if (absx > two_pow_p28) { /* |x| > 2**28 */
111n/a w = log(absx) + ln2;
112n/a }
113n/a else if (absx > 2.0) { /* 2 < |x| < 2**28 */
114n/a w = log(2.0 * absx + 1.0 / (sqrt(x * x + 1.0) + absx));
115n/a }
116n/a else { /* 2**-28 <= |x| < 2= */
117n/a double t = x*x;
118n/a w = m_log1p(absx + t / (1.0 + sqrt(1.0 + t)));
119n/a }
120n/a return copysign(w, x);
121n/a
122n/a}
123n/a#endif /* HAVE_ASINH */
124n/a
125n/a
126n/a#ifndef HAVE_ATANH
127n/a/* atanh(x)
128n/a * Method :
129n/a * 1.Reduced x to positive by atanh(-x) = -atanh(x)
130n/a * 2.For x>=0.5
131n/a * 1 2x x
132n/a * atanh(x) = --- * log(1 + -------) = 0.5 * log1p(2 * -------)
133n/a * 2 1 - x 1 - x
134n/a *
135n/a * For x<0.5
136n/a * atanh(x) = 0.5*log1p(2x+2x*x/(1-x))
137n/a *
138n/a * Special cases:
139n/a * atanh(x) is NaN if |x| >= 1 with signal;
140n/a * atanh(NaN) is that NaN with no signal;
141n/a *
142n/a */
143n/a
144n/adouble
145n/a_Py_atanh(double x)
146n/a{
147n/a double absx;
148n/a double t;
149n/a
150n/a if (Py_IS_NAN(x)) {
151n/a return x+x;
152n/a }
153n/a absx = fabs(x);
154n/a if (absx >= 1.) { /* |x| >= 1 */
155n/a errno = EDOM;
156n/a#ifdef Py_NAN
157n/a return Py_NAN;
158n/a#else
159n/a return x / zero;
160n/a#endif
161n/a }
162n/a if (absx < two_pow_m28) { /* |x| < 2**-28 */
163n/a return x;
164n/a }
165n/a if (absx < 0.5) { /* |x| < 0.5 */
166n/a t = absx+absx;
167n/a t = 0.5 * m_log1p(t + t*absx / (1.0 - absx));
168n/a }
169n/a else { /* 0.5 <= |x| <= 1.0 */
170n/a t = 0.5 * m_log1p((absx + absx) / (1.0 - absx));
171n/a }
172n/a return copysign(t, x);
173n/a}
174n/a#endif /* HAVE_ATANH */
175n/a
176n/a
177n/a#ifndef HAVE_EXPM1
178n/a/* Mathematically, expm1(x) = exp(x) - 1. The expm1 function is designed
179n/a to avoid the significant loss of precision that arises from direct
180n/a evaluation of the expression exp(x) - 1, for x near 0. */
181n/a
182n/adouble
183n/a_Py_expm1(double x)
184n/a{
185n/a /* For abs(x) >= log(2), it's safe to evaluate exp(x) - 1 directly; this
186n/a also works fine for infinities and nans.
187n/a
188n/a For smaller x, we can use a method due to Kahan that achieves close to
189n/a full accuracy.
190n/a */
191n/a
192n/a if (fabs(x) < 0.7) {
193n/a double u;
194n/a u = exp(x);
195n/a if (u == 1.0)
196n/a return x;
197n/a else
198n/a return (u - 1.0) * x / log(u);
199n/a }
200n/a else
201n/a return exp(x) - 1.0;
202n/a}
203n/a#endif /* HAVE_EXPM1 */
204n/a
205n/a
206n/a/* log1p(x) = log(1+x). The log1p function is designed to avoid the
207n/a significant loss of precision that arises from direct evaluation when x is
208n/a small. */
209n/a
210n/adouble
211n/a_Py_log1p(double x)
212n/a{
213n/a#ifdef HAVE_LOG1P
214n/a /* Some platforms supply a log1p function but don't respect the sign of
215n/a zero: log1p(-0.0) gives 0.0 instead of the correct result of -0.0.
216n/a
217n/a To save fiddling with configure tests and platform checks, we handle the
218n/a special case of zero input directly on all platforms.
219n/a */
220n/a if (x == 0.0) {
221n/a return x;
222n/a }
223n/a else {
224n/a return log1p(x);
225n/a }
226n/a#else
227n/a /* For x small, we use the following approach. Let y be the nearest float
228n/a to 1+x, then
229n/a
230n/a 1+x = y * (1 - (y-1-x)/y)
231n/a
232n/a so log(1+x) = log(y) + log(1-(y-1-x)/y). Since (y-1-x)/y is tiny, the
233n/a second term is well approximated by (y-1-x)/y. If abs(x) >=
234n/a DBL_EPSILON/2 or the rounding-mode is some form of round-to-nearest
235n/a then y-1-x will be exactly representable, and is computed exactly by
236n/a (y-1)-x.
237n/a
238n/a If abs(x) < DBL_EPSILON/2 and the rounding mode is not known to be
239n/a round-to-nearest then this method is slightly dangerous: 1+x could be
240n/a rounded up to 1+DBL_EPSILON instead of down to 1, and in that case
241n/a y-1-x will not be exactly representable any more and the result can be
242n/a off by many ulps. But this is easily fixed: for a floating-point
243n/a number |x| < DBL_EPSILON/2., the closest floating-point number to
244n/a log(1+x) is exactly x.
245n/a */
246n/a
247n/a double y;
248n/a if (fabs(x) < DBL_EPSILON / 2.) {
249n/a return x;
250n/a }
251n/a else if (-0.5 <= x && x <= 1.) {
252n/a /* WARNING: it's possible that an overeager compiler
253n/a will incorrectly optimize the following two lines
254n/a to the equivalent of "return log(1.+x)". If this
255n/a happens, then results from log1p will be inaccurate
256n/a for small x. */
257n/a y = 1.+x;
258n/a return log(y) - ((y - 1.) - x) / y;
259n/a }
260n/a else {
261n/a /* NaNs and infinities should end up here */
262n/a return log(1.+x);
263n/a }
264n/a#endif /* ifdef HAVE_LOG1P */
265n/a}
266n/a