ยปCore Development>Code coverage>Modules/mathmodule.c

Python code coverage for Modules/mathmodule.c

#countcontent
1n/a/* Math module -- standard C math library functions, pi and e */
2n/a
3n/a/* Here are some comments from Tim Peters, extracted from the
4n/a discussion attached to http://bugs.python.org/issue1640. They
5n/a describe the general aims of the math module with respect to
6n/a special values, IEEE-754 floating-point exceptions, and Python
7n/a exceptions.
8n/a
9n/aThese are the "spirit of 754" rules:
10n/a
11n/a1. If the mathematical result is a real number, but of magnitude too
12n/alarge to approximate by a machine float, overflow is signaled and the
13n/aresult is an infinity (with the appropriate sign).
14n/a
15n/a2. If the mathematical result is a real number, but of magnitude too
16n/asmall to approximate by a machine float, underflow is signaled and the
17n/aresult is a zero (with the appropriate sign).
18n/a
19n/a3. At a singularity (a value x such that the limit of f(y) as y
20n/aapproaches x exists and is an infinity), "divide by zero" is signaled
21n/aand the result is an infinity (with the appropriate sign). This is
22n/acomplicated a little by that the left-side and right-side limits may
23n/anot be the same; e.g., 1/x approaches +inf or -inf as x approaches 0
24n/afrom the positive or negative directions. In that specific case, the
25n/asign of the zero determines the result of 1/0.
26n/a
27n/a4. At a point where a function has no defined result in the extended
28n/areals (i.e., the reals plus an infinity or two), invalid operation is
29n/asignaled and a NaN is returned.
30n/a
31n/aAnd these are what Python has historically /tried/ to do (but not
32n/aalways successfully, as platform libm behavior varies a lot):
33n/a
34n/aFor #1, raise OverflowError.
35n/a
36n/aFor #2, return a zero (with the appropriate sign if that happens by
37n/aaccident ;-)).
38n/a
39n/aFor #3 and #4, raise ValueError. It may have made sense to raise
40n/aPython's ZeroDivisionError in #3, but historically that's only been
41n/araised for division by zero and mod by zero.
42n/a
43n/a*/
44n/a
45n/a/*
46n/a In general, on an IEEE-754 platform the aim is to follow the C99
47n/a standard, including Annex 'F', whenever possible. Where the
48n/a standard recommends raising the 'divide-by-zero' or 'invalid'
49n/a floating-point exceptions, Python should raise a ValueError. Where
50n/a the standard recommends raising 'overflow', Python should raise an
51n/a OverflowError. In all other circumstances a value should be
52n/a returned.
53n/a */
54n/a
55n/a#include "Python.h"
56n/a#include "_math.h"
57n/a
58n/a#include "clinic/mathmodule.c.h"
59n/a
60n/a/*[clinic input]
61n/amodule math
62n/a[clinic start generated code]*/
63n/a/*[clinic end generated code: output=da39a3ee5e6b4b0d input=76bc7002685dd942]*/
64n/a
65n/a
66n/a/*
67n/a sin(pi*x), giving accurate results for all finite x (especially x
68n/a integral or close to an integer). This is here for use in the
69n/a reflection formula for the gamma function. It conforms to IEEE
70n/a 754-2008 for finite arguments, but not for infinities or nans.
71n/a*/
72n/a
73n/astatic const double pi = 3.141592653589793238462643383279502884197;
74n/astatic const double sqrtpi = 1.772453850905516027298167483341145182798;
75n/astatic const double logpi = 1.144729885849400174143427351353058711647;
76n/a
77n/astatic double
78n/asinpi(double x)
79n/a{
80n/a double y, r;
81n/a int n;
82n/a /* this function should only ever be called for finite arguments */
83n/a assert(Py_IS_FINITE(x));
84n/a y = fmod(fabs(x), 2.0);
85n/a n = (int)round(2.0*y);
86n/a assert(0 <= n && n <= 4);
87n/a switch (n) {
88n/a case 0:
89n/a r = sin(pi*y);
90n/a break;
91n/a case 1:
92n/a r = cos(pi*(y-0.5));
93n/a break;
94n/a case 2:
95n/a /* N.B. -sin(pi*(y-1.0)) is *not* equivalent: it would give
96n/a -0.0 instead of 0.0 when y == 1.0. */
97n/a r = sin(pi*(1.0-y));
98n/a break;
99n/a case 3:
100n/a r = -cos(pi*(y-1.5));
101n/a break;
102n/a case 4:
103n/a r = sin(pi*(y-2.0));
104n/a break;
105n/a default:
106n/a assert(0); /* should never get here */
107n/a r = -1.23e200; /* silence gcc warning */
108n/a }
109n/a return copysign(1.0, x)*r;
110n/a}
111n/a
112n/a/* Implementation of the real gamma function. In extensive but non-exhaustive
113n/a random tests, this function proved accurate to within <= 10 ulps across the
114n/a entire float domain. Note that accuracy may depend on the quality of the
115n/a system math functions, the pow function in particular. Special cases
116n/a follow C99 annex F. The parameters and method are tailored to platforms
117n/a whose double format is the IEEE 754 binary64 format.
118n/a
119n/a Method: for x > 0.0 we use the Lanczos approximation with parameters N=13
120n/a and g=6.024680040776729583740234375; these parameters are amongst those
121n/a used by the Boost library. Following Boost (again), we re-express the
122n/a Lanczos sum as a rational function, and compute it that way. The
123n/a coefficients below were computed independently using MPFR, and have been
124n/a double-checked against the coefficients in the Boost source code.
125n/a
126n/a For x < 0.0 we use the reflection formula.
127n/a
128n/a There's one minor tweak that deserves explanation: Lanczos' formula for
129n/a Gamma(x) involves computing pow(x+g-0.5, x-0.5) / exp(x+g-0.5). For many x
130n/a values, x+g-0.5 can be represented exactly. However, in cases where it
131n/a can't be represented exactly the small error in x+g-0.5 can be magnified
132n/a significantly by the pow and exp calls, especially for large x. A cheap
133n/a correction is to multiply by (1 + e*g/(x+g-0.5)), where e is the error
134n/a involved in the computation of x+g-0.5 (that is, e = computed value of
135n/a x+g-0.5 - exact value of x+g-0.5). Here's the proof:
136n/a
137n/a Correction factor
138n/a -----------------
139n/a Write x+g-0.5 = y-e, where y is exactly representable as an IEEE 754
140n/a double, and e is tiny. Then:
141n/a
142n/a pow(x+g-0.5,x-0.5)/exp(x+g-0.5) = pow(y-e, x-0.5)/exp(y-e)
143n/a = pow(y, x-0.5)/exp(y) * C,
144n/a
145n/a where the correction_factor C is given by
146n/a
147n/a C = pow(1-e/y, x-0.5) * exp(e)
148n/a
149n/a Since e is tiny, pow(1-e/y, x-0.5) ~ 1-(x-0.5)*e/y, and exp(x) ~ 1+e, so:
150n/a
151n/a C ~ (1-(x-0.5)*e/y) * (1+e) ~ 1 + e*(y-(x-0.5))/y
152n/a
153n/a But y-(x-0.5) = g+e, and g+e ~ g. So we get C ~ 1 + e*g/y, and
154n/a
155n/a pow(x+g-0.5,x-0.5)/exp(x+g-0.5) ~ pow(y, x-0.5)/exp(y) * (1 + e*g/y),
156n/a
157n/a Note that for accuracy, when computing r*C it's better to do
158n/a
159n/a r + e*g/y*r;
160n/a
161n/a than
162n/a
163n/a r * (1 + e*g/y);
164n/a
165n/a since the addition in the latter throws away most of the bits of
166n/a information in e*g/y.
167n/a*/
168n/a
169n/a#define LANCZOS_N 13
170n/astatic const double lanczos_g = 6.024680040776729583740234375;
171n/astatic const double lanczos_g_minus_half = 5.524680040776729583740234375;
172n/astatic const double lanczos_num_coeffs[LANCZOS_N] = {
173n/a 23531376880.410759688572007674451636754734846804940,
174n/a 42919803642.649098768957899047001988850926355848959,
175n/a 35711959237.355668049440185451547166705960488635843,
176n/a 17921034426.037209699919755754458931112671403265390,
177n/a 6039542586.3520280050642916443072979210699388420708,
178n/a 1439720407.3117216736632230727949123939715485786772,
179n/a 248874557.86205415651146038641322942321632125127801,
180n/a 31426415.585400194380614231628318205362874684987640,
181n/a 2876370.6289353724412254090516208496135991145378768,
182n/a 186056.26539522349504029498971604569928220784236328,
183n/a 8071.6720023658162106380029022722506138218516325024,
184n/a 210.82427775157934587250973392071336271166969580291,
185n/a 2.5066282746310002701649081771338373386264310793408
186n/a};
187n/a
188n/a/* denominator is x*(x+1)*...*(x+LANCZOS_N-2) */
189n/astatic const double lanczos_den_coeffs[LANCZOS_N] = {
190n/a 0.0, 39916800.0, 120543840.0, 150917976.0, 105258076.0, 45995730.0,
191n/a 13339535.0, 2637558.0, 357423.0, 32670.0, 1925.0, 66.0, 1.0};
192n/a
193n/a/* gamma values for small positive integers, 1 though NGAMMA_INTEGRAL */
194n/a#define NGAMMA_INTEGRAL 23
195n/astatic const double gamma_integral[NGAMMA_INTEGRAL] = {
196n/a 1.0, 1.0, 2.0, 6.0, 24.0, 120.0, 720.0, 5040.0, 40320.0, 362880.0,
197n/a 3628800.0, 39916800.0, 479001600.0, 6227020800.0, 87178291200.0,
198n/a 1307674368000.0, 20922789888000.0, 355687428096000.0,
199n/a 6402373705728000.0, 121645100408832000.0, 2432902008176640000.0,
200n/a 51090942171709440000.0, 1124000727777607680000.0,
201n/a};
202n/a
203n/a/* Lanczos' sum L_g(x), for positive x */
204n/a
205n/astatic double
206n/alanczos_sum(double x)
207n/a{
208n/a double num = 0.0, den = 0.0;
209n/a int i;
210n/a assert(x > 0.0);
211n/a /* evaluate the rational function lanczos_sum(x). For large
212n/a x, the obvious algorithm risks overflow, so we instead
213n/a rescale the denominator and numerator of the rational
214n/a function by x**(1-LANCZOS_N) and treat this as a
215n/a rational function in 1/x. This also reduces the error for
216n/a larger x values. The choice of cutoff point (5.0 below) is
217n/a somewhat arbitrary; in tests, smaller cutoff values than
218n/a this resulted in lower accuracy. */
219n/a if (x < 5.0) {
220n/a for (i = LANCZOS_N; --i >= 0; ) {
221n/a num = num * x + lanczos_num_coeffs[i];
222n/a den = den * x + lanczos_den_coeffs[i];
223n/a }
224n/a }
225n/a else {
226n/a for (i = 0; i < LANCZOS_N; i++) {
227n/a num = num / x + lanczos_num_coeffs[i];
228n/a den = den / x + lanczos_den_coeffs[i];
229n/a }
230n/a }
231n/a return num/den;
232n/a}
233n/a
234n/a/* Constant for +infinity, generated in the same way as float('inf'). */
235n/a
236n/astatic double
237n/am_inf(void)
238n/a{
239n/a#ifndef PY_NO_SHORT_FLOAT_REPR
240n/a return _Py_dg_infinity(0);
241n/a#else
242n/a return Py_HUGE_VAL;
243n/a#endif
244n/a}
245n/a
246n/a/* Constant nan value, generated in the same way as float('nan'). */
247n/a/* We don't currently assume that Py_NAN is defined everywhere. */
248n/a
249n/a#if !defined(PY_NO_SHORT_FLOAT_REPR) || defined(Py_NAN)
250n/a
251n/astatic double
252n/am_nan(void)
253n/a{
254n/a#ifndef PY_NO_SHORT_FLOAT_REPR
255n/a return _Py_dg_stdnan(0);
256n/a#else
257n/a return Py_NAN;
258n/a#endif
259n/a}
260n/a
261n/a#endif
262n/a
263n/astatic double
264n/am_tgamma(double x)
265n/a{
266n/a double absx, r, y, z, sqrtpow;
267n/a
268n/a /* special cases */
269n/a if (!Py_IS_FINITE(x)) {
270n/a if (Py_IS_NAN(x) || x > 0.0)
271n/a return x; /* tgamma(nan) = nan, tgamma(inf) = inf */
272n/a else {
273n/a errno = EDOM;
274n/a return Py_NAN; /* tgamma(-inf) = nan, invalid */
275n/a }
276n/a }
277n/a if (x == 0.0) {
278n/a errno = EDOM;
279n/a /* tgamma(+-0.0) = +-inf, divide-by-zero */
280n/a return copysign(Py_HUGE_VAL, x);
281n/a }
282n/a
283n/a /* integer arguments */
284n/a if (x == floor(x)) {
285n/a if (x < 0.0) {
286n/a errno = EDOM; /* tgamma(n) = nan, invalid for */
287n/a return Py_NAN; /* negative integers n */
288n/a }
289n/a if (x <= NGAMMA_INTEGRAL)
290n/a return gamma_integral[(int)x - 1];
291n/a }
292n/a absx = fabs(x);
293n/a
294n/a /* tiny arguments: tgamma(x) ~ 1/x for x near 0 */
295n/a if (absx < 1e-20) {
296n/a r = 1.0/x;
297n/a if (Py_IS_INFINITY(r))
298n/a errno = ERANGE;
299n/a return r;
300n/a }
301n/a
302n/a /* large arguments: assuming IEEE 754 doubles, tgamma(x) overflows for
303n/a x > 200, and underflows to +-0.0 for x < -200, not a negative
304n/a integer. */
305n/a if (absx > 200.0) {
306n/a if (x < 0.0) {
307n/a return 0.0/sinpi(x);
308n/a }
309n/a else {
310n/a errno = ERANGE;
311n/a return Py_HUGE_VAL;
312n/a }
313n/a }
314n/a
315n/a y = absx + lanczos_g_minus_half;
316n/a /* compute error in sum */
317n/a if (absx > lanczos_g_minus_half) {
318n/a /* note: the correction can be foiled by an optimizing
319n/a compiler that (incorrectly) thinks that an expression like
320n/a a + b - a - b can be optimized to 0.0. This shouldn't
321n/a happen in a standards-conforming compiler. */
322n/a double q = y - absx;
323n/a z = q - lanczos_g_minus_half;
324n/a }
325n/a else {
326n/a double q = y - lanczos_g_minus_half;
327n/a z = q - absx;
328n/a }
329n/a z = z * lanczos_g / y;
330n/a if (x < 0.0) {
331n/a r = -pi / sinpi(absx) / absx * exp(y) / lanczos_sum(absx);
332n/a r -= z * r;
333n/a if (absx < 140.0) {
334n/a r /= pow(y, absx - 0.5);
335n/a }
336n/a else {
337n/a sqrtpow = pow(y, absx / 2.0 - 0.25);
338n/a r /= sqrtpow;
339n/a r /= sqrtpow;
340n/a }
341n/a }
342n/a else {
343n/a r = lanczos_sum(absx) / exp(y);
344n/a r += z * r;
345n/a if (absx < 140.0) {
346n/a r *= pow(y, absx - 0.5);
347n/a }
348n/a else {
349n/a sqrtpow = pow(y, absx / 2.0 - 0.25);
350n/a r *= sqrtpow;
351n/a r *= sqrtpow;
352n/a }
353n/a }
354n/a if (Py_IS_INFINITY(r))
355n/a errno = ERANGE;
356n/a return r;
357n/a}
358n/a
359n/a/*
360n/a lgamma: natural log of the absolute value of the Gamma function.
361n/a For large arguments, Lanczos' formula works extremely well here.
362n/a*/
363n/a
364n/astatic double
365n/am_lgamma(double x)
366n/a{
367n/a double r, absx;
368n/a
369n/a /* special cases */
370n/a if (!Py_IS_FINITE(x)) {
371n/a if (Py_IS_NAN(x))
372n/a return x; /* lgamma(nan) = nan */
373n/a else
374n/a return Py_HUGE_VAL; /* lgamma(+-inf) = +inf */
375n/a }
376n/a
377n/a /* integer arguments */
378n/a if (x == floor(x) && x <= 2.0) {
379n/a if (x <= 0.0) {
380n/a errno = EDOM; /* lgamma(n) = inf, divide-by-zero for */
381n/a return Py_HUGE_VAL; /* integers n <= 0 */
382n/a }
383n/a else {
384n/a return 0.0; /* lgamma(1) = lgamma(2) = 0.0 */
385n/a }
386n/a }
387n/a
388n/a absx = fabs(x);
389n/a /* tiny arguments: lgamma(x) ~ -log(fabs(x)) for small x */
390n/a if (absx < 1e-20)
391n/a return -log(absx);
392n/a
393n/a /* Lanczos' formula. We could save a fraction of a ulp in accuracy by
394n/a having a second set of numerator coefficients for lanczos_sum that
395n/a absorbed the exp(-lanczos_g) term, and throwing out the lanczos_g
396n/a subtraction below; it's probably not worth it. */
397n/a r = log(lanczos_sum(absx)) - lanczos_g;
398n/a r += (absx - 0.5) * (log(absx + lanczos_g - 0.5) - 1);
399n/a if (x < 0.0)
400n/a /* Use reflection formula to get value for negative x. */
401n/a r = logpi - log(fabs(sinpi(absx))) - log(absx) - r;
402n/a if (Py_IS_INFINITY(r))
403n/a errno = ERANGE;
404n/a return r;
405n/a}
406n/a
407n/a/*
408n/a Implementations of the error function erf(x) and the complementary error
409n/a function erfc(x).
410n/a
411n/a Method: we use a series approximation for erf for small x, and a continued
412n/a fraction approximation for erfc(x) for larger x;
413n/a combined with the relations erf(-x) = -erf(x) and erfc(x) = 1.0 - erf(x),
414n/a this gives us erf(x) and erfc(x) for all x.
415n/a
416n/a The series expansion used is:
417n/a
418n/a erf(x) = x*exp(-x*x)/sqrt(pi) * [
419n/a 2/1 + 4/3 x**2 + 8/15 x**4 + 16/105 x**6 + ...]
420n/a
421n/a The coefficient of x**(2k-2) here is 4**k*factorial(k)/factorial(2*k).
422n/a This series converges well for smallish x, but slowly for larger x.
423n/a
424n/a The continued fraction expansion used is:
425n/a
426n/a erfc(x) = x*exp(-x*x)/sqrt(pi) * [1/(0.5 + x**2 -) 0.5/(2.5 + x**2 - )
427n/a 3.0/(4.5 + x**2 - ) 7.5/(6.5 + x**2 - ) ...]
428n/a
429n/a after the first term, the general term has the form:
430n/a
431n/a k*(k-0.5)/(2*k+0.5 + x**2 - ...).
432n/a
433n/a This expansion converges fast for larger x, but convergence becomes
434n/a infinitely slow as x approaches 0.0. The (somewhat naive) continued
435n/a fraction evaluation algorithm used below also risks overflow for large x;
436n/a but for large x, erfc(x) == 0.0 to within machine precision. (For
437n/a example, erfc(30.0) is approximately 2.56e-393).
438n/a
439n/a Parameters: use series expansion for abs(x) < ERF_SERIES_CUTOFF and
440n/a continued fraction expansion for ERF_SERIES_CUTOFF <= abs(x) <
441n/a ERFC_CONTFRAC_CUTOFF. ERFC_SERIES_TERMS and ERFC_CONTFRAC_TERMS are the
442n/a numbers of terms to use for the relevant expansions. */
443n/a
444n/a#define ERF_SERIES_CUTOFF 1.5
445n/a#define ERF_SERIES_TERMS 25
446n/a#define ERFC_CONTFRAC_CUTOFF 30.0
447n/a#define ERFC_CONTFRAC_TERMS 50
448n/a
449n/a/*
450n/a Error function, via power series.
451n/a
452n/a Given a finite float x, return an approximation to erf(x).
453n/a Converges reasonably fast for small x.
454n/a*/
455n/a
456n/astatic double
457n/am_erf_series(double x)
458n/a{
459n/a double x2, acc, fk, result;
460n/a int i, saved_errno;
461n/a
462n/a x2 = x * x;
463n/a acc = 0.0;
464n/a fk = (double)ERF_SERIES_TERMS + 0.5;
465n/a for (i = 0; i < ERF_SERIES_TERMS; i++) {
466n/a acc = 2.0 + x2 * acc / fk;
467n/a fk -= 1.0;
468n/a }
469n/a /* Make sure the exp call doesn't affect errno;
470n/a see m_erfc_contfrac for more. */
471n/a saved_errno = errno;
472n/a result = acc * x * exp(-x2) / sqrtpi;
473n/a errno = saved_errno;
474n/a return result;
475n/a}
476n/a
477n/a/*
478n/a Complementary error function, via continued fraction expansion.
479n/a
480n/a Given a positive float x, return an approximation to erfc(x). Converges
481n/a reasonably fast for x large (say, x > 2.0), and should be safe from
482n/a overflow if x and nterms are not too large. On an IEEE 754 machine, with x
483n/a <= 30.0, we're safe up to nterms = 100. For x >= 30.0, erfc(x) is smaller
484n/a than the smallest representable nonzero float. */
485n/a
486n/astatic double
487n/am_erfc_contfrac(double x)
488n/a{
489n/a double x2, a, da, p, p_last, q, q_last, b, result;
490n/a int i, saved_errno;
491n/a
492n/a if (x >= ERFC_CONTFRAC_CUTOFF)
493n/a return 0.0;
494n/a
495n/a x2 = x*x;
496n/a a = 0.0;
497n/a da = 0.5;
498n/a p = 1.0; p_last = 0.0;
499n/a q = da + x2; q_last = 1.0;
500n/a for (i = 0; i < ERFC_CONTFRAC_TERMS; i++) {
501n/a double temp;
502n/a a += da;
503n/a da += 2.0;
504n/a b = da + x2;
505n/a temp = p; p = b*p - a*p_last; p_last = temp;
506n/a temp = q; q = b*q - a*q_last; q_last = temp;
507n/a }
508n/a /* Issue #8986: On some platforms, exp sets errno on underflow to zero;
509n/a save the current errno value so that we can restore it later. */
510n/a saved_errno = errno;
511n/a result = p / q * x * exp(-x2) / sqrtpi;
512n/a errno = saved_errno;
513n/a return result;
514n/a}
515n/a
516n/a/* Error function erf(x), for general x */
517n/a
518n/astatic double
519n/am_erf(double x)
520n/a{
521n/a double absx, cf;
522n/a
523n/a if (Py_IS_NAN(x))
524n/a return x;
525n/a absx = fabs(x);
526n/a if (absx < ERF_SERIES_CUTOFF)
527n/a return m_erf_series(x);
528n/a else {
529n/a cf = m_erfc_contfrac(absx);
530n/a return x > 0.0 ? 1.0 - cf : cf - 1.0;
531n/a }
532n/a}
533n/a
534n/a/* Complementary error function erfc(x), for general x. */
535n/a
536n/astatic double
537n/am_erfc(double x)
538n/a{
539n/a double absx, cf;
540n/a
541n/a if (Py_IS_NAN(x))
542n/a return x;
543n/a absx = fabs(x);
544n/a if (absx < ERF_SERIES_CUTOFF)
545n/a return 1.0 - m_erf_series(x);
546n/a else {
547n/a cf = m_erfc_contfrac(absx);
548n/a return x > 0.0 ? cf : 2.0 - cf;
549n/a }
550n/a}
551n/a
552n/a/*
553n/a wrapper for atan2 that deals directly with special cases before
554n/a delegating to the platform libm for the remaining cases. This
555n/a is necessary to get consistent behaviour across platforms.
556n/a Windows, FreeBSD and alpha Tru64 are amongst platforms that don't
557n/a always follow C99.
558n/a*/
559n/a
560n/astatic double
561n/am_atan2(double y, double x)
562n/a{
563n/a if (Py_IS_NAN(x) || Py_IS_NAN(y))
564n/a return Py_NAN;
565n/a if (Py_IS_INFINITY(y)) {
566n/a if (Py_IS_INFINITY(x)) {
567n/a if (copysign(1., x) == 1.)
568n/a /* atan2(+-inf, +inf) == +-pi/4 */
569n/a return copysign(0.25*Py_MATH_PI, y);
570n/a else
571n/a /* atan2(+-inf, -inf) == +-pi*3/4 */
572n/a return copysign(0.75*Py_MATH_PI, y);
573n/a }
574n/a /* atan2(+-inf, x) == +-pi/2 for finite x */
575n/a return copysign(0.5*Py_MATH_PI, y);
576n/a }
577n/a if (Py_IS_INFINITY(x) || y == 0.) {
578n/a if (copysign(1., x) == 1.)
579n/a /* atan2(+-y, +inf) = atan2(+-0, +x) = +-0. */
580n/a return copysign(0., y);
581n/a else
582n/a /* atan2(+-y, -inf) = atan2(+-0., -x) = +-pi. */
583n/a return copysign(Py_MATH_PI, y);
584n/a }
585n/a return atan2(y, x);
586n/a}
587n/a
588n/a/*
589n/a Various platforms (Solaris, OpenBSD) do nonstandard things for log(0),
590n/a log(-ve), log(NaN). Here are wrappers for log and log10 that deal with
591n/a special values directly, passing positive non-special values through to
592n/a the system log/log10.
593n/a */
594n/a
595n/astatic double
596n/am_log(double x)
597n/a{
598n/a if (Py_IS_FINITE(x)) {
599n/a if (x > 0.0)
600n/a return log(x);
601n/a errno = EDOM;
602n/a if (x == 0.0)
603n/a return -Py_HUGE_VAL; /* log(0) = -inf */
604n/a else
605n/a return Py_NAN; /* log(-ve) = nan */
606n/a }
607n/a else if (Py_IS_NAN(x))
608n/a return x; /* log(nan) = nan */
609n/a else if (x > 0.0)
610n/a return x; /* log(inf) = inf */
611n/a else {
612n/a errno = EDOM;
613n/a return Py_NAN; /* log(-inf) = nan */
614n/a }
615n/a}
616n/a
617n/a/*
618n/a log2: log to base 2.
619n/a
620n/a Uses an algorithm that should:
621n/a
622n/a (a) produce exact results for powers of 2, and
623n/a (b) give a monotonic log2 (for positive finite floats),
624n/a assuming that the system log is monotonic.
625n/a*/
626n/a
627n/astatic double
628n/am_log2(double x)
629n/a{
630n/a if (!Py_IS_FINITE(x)) {
631n/a if (Py_IS_NAN(x))
632n/a return x; /* log2(nan) = nan */
633n/a else if (x > 0.0)
634n/a return x; /* log2(+inf) = +inf */
635n/a else {
636n/a errno = EDOM;
637n/a return Py_NAN; /* log2(-inf) = nan, invalid-operation */
638n/a }
639n/a }
640n/a
641n/a if (x > 0.0) {
642n/a#ifdef HAVE_LOG2
643n/a return log2(x);
644n/a#else
645n/a double m;
646n/a int e;
647n/a m = frexp(x, &e);
648n/a /* We want log2(m * 2**e) == log(m) / log(2) + e. Care is needed when
649n/a * x is just greater than 1.0: in that case e is 1, log(m) is negative,
650n/a * and we get significant cancellation error from the addition of
651n/a * log(m) / log(2) to e. The slight rewrite of the expression below
652n/a * avoids this problem.
653n/a */
654n/a if (x >= 1.0) {
655n/a return log(2.0 * m) / log(2.0) + (e - 1);
656n/a }
657n/a else {
658n/a return log(m) / log(2.0) + e;
659n/a }
660n/a#endif
661n/a }
662n/a else if (x == 0.0) {
663n/a errno = EDOM;
664n/a return -Py_HUGE_VAL; /* log2(0) = -inf, divide-by-zero */
665n/a }
666n/a else {
667n/a errno = EDOM;
668n/a return Py_NAN; /* log2(-inf) = nan, invalid-operation */
669n/a }
670n/a}
671n/a
672n/astatic double
673n/am_log10(double x)
674n/a{
675n/a if (Py_IS_FINITE(x)) {
676n/a if (x > 0.0)
677n/a return log10(x);
678n/a errno = EDOM;
679n/a if (x == 0.0)
680n/a return -Py_HUGE_VAL; /* log10(0) = -inf */
681n/a else
682n/a return Py_NAN; /* log10(-ve) = nan */
683n/a }
684n/a else if (Py_IS_NAN(x))
685n/a return x; /* log10(nan) = nan */
686n/a else if (x > 0.0)
687n/a return x; /* log10(inf) = inf */
688n/a else {
689n/a errno = EDOM;
690n/a return Py_NAN; /* log10(-inf) = nan */
691n/a }
692n/a}
693n/a
694n/a
695n/a/*[clinic input]
696n/amath.gcd
697n/a
698n/a x as a: object
699n/a y as b: object
700n/a /
701n/a
702n/agreatest common divisor of x and y
703n/a[clinic start generated code]*/
704n/a
705n/astatic PyObject *
706n/amath_gcd_impl(PyObject *module, PyObject *a, PyObject *b)
707n/a/*[clinic end generated code: output=7b2e0c151bd7a5d8 input=c2691e57fb2a98fa]*/
708n/a{
709n/a PyObject *g;
710n/a
711n/a a = PyNumber_Index(a);
712n/a if (a == NULL)
713n/a return NULL;
714n/a b = PyNumber_Index(b);
715n/a if (b == NULL) {
716n/a Py_DECREF(a);
717n/a return NULL;
718n/a }
719n/a g = _PyLong_GCD(a, b);
720n/a Py_DECREF(a);
721n/a Py_DECREF(b);
722n/a return g;
723n/a}
724n/a
725n/a
726n/a/* Call is_error when errno != 0, and where x is the result libm
727n/a * returned. is_error will usually set up an exception and return
728n/a * true (1), but may return false (0) without setting up an exception.
729n/a */
730n/astatic int
731n/ais_error(double x)
732n/a{
733n/a int result = 1; /* presumption of guilt */
734n/a assert(errno); /* non-zero errno is a precondition for calling */
735n/a if (errno == EDOM)
736n/a PyErr_SetString(PyExc_ValueError, "math domain error");
737n/a
738n/a else if (errno == ERANGE) {
739n/a /* ANSI C generally requires libm functions to set ERANGE
740n/a * on overflow, but also generally *allows* them to set
741n/a * ERANGE on underflow too. There's no consistency about
742n/a * the latter across platforms.
743n/a * Alas, C99 never requires that errno be set.
744n/a * Here we suppress the underflow errors (libm functions
745n/a * should return a zero on underflow, and +- HUGE_VAL on
746n/a * overflow, so testing the result for zero suffices to
747n/a * distinguish the cases).
748n/a *
749n/a * On some platforms (Ubuntu/ia64) it seems that errno can be
750n/a * set to ERANGE for subnormal results that do *not* underflow
751n/a * to zero. So to be safe, we'll ignore ERANGE whenever the
752n/a * function result is less than one in absolute value.
753n/a */
754n/a if (fabs(x) < 1.0)
755n/a result = 0;
756n/a else
757n/a PyErr_SetString(PyExc_OverflowError,
758n/a "math range error");
759n/a }
760n/a else
761n/a /* Unexpected math error */
762n/a PyErr_SetFromErrno(PyExc_ValueError);
763n/a return result;
764n/a}
765n/a
766n/a/*
767n/a math_1 is used to wrap a libm function f that takes a double
768n/a argument and returns a double.
769n/a
770n/a The error reporting follows these rules, which are designed to do
771n/a the right thing on C89/C99 platforms and IEEE 754/non IEEE 754
772n/a platforms.
773n/a
774n/a - a NaN result from non-NaN inputs causes ValueError to be raised
775n/a - an infinite result from finite inputs causes OverflowError to be
776n/a raised if can_overflow is 1, or raises ValueError if can_overflow
777n/a is 0.
778n/a - if the result is finite and errno == EDOM then ValueError is
779n/a raised
780n/a - if the result is finite and nonzero and errno == ERANGE then
781n/a OverflowError is raised
782n/a
783n/a The last rule is used to catch overflow on platforms which follow
784n/a C89 but for which HUGE_VAL is not an infinity.
785n/a
786n/a For the majority of one-argument functions these rules are enough
787n/a to ensure that Python's functions behave as specified in 'Annex F'
788n/a of the C99 standard, with the 'invalid' and 'divide-by-zero'
789n/a floating-point exceptions mapping to Python's ValueError and the
790n/a 'overflow' floating-point exception mapping to OverflowError.
791n/a math_1 only works for functions that don't have singularities *and*
792n/a the possibility of overflow; fortunately, that covers everything we
793n/a care about right now.
794n/a*/
795n/a
796n/astatic PyObject *
797n/amath_1_to_whatever(PyObject *arg, double (*func) (double),
798n/a PyObject *(*from_double_func) (double),
799n/a int can_overflow)
800n/a{
801n/a double x, r;
802n/a x = PyFloat_AsDouble(arg);
803n/a if (x == -1.0 && PyErr_Occurred())
804n/a return NULL;
805n/a errno = 0;
806n/a PyFPE_START_PROTECT("in math_1", return 0);
807n/a r = (*func)(x);
808n/a PyFPE_END_PROTECT(r);
809n/a if (Py_IS_NAN(r) && !Py_IS_NAN(x)) {
810n/a PyErr_SetString(PyExc_ValueError,
811n/a "math domain error"); /* invalid arg */
812n/a return NULL;
813n/a }
814n/a if (Py_IS_INFINITY(r) && Py_IS_FINITE(x)) {
815n/a if (can_overflow)
816n/a PyErr_SetString(PyExc_OverflowError,
817n/a "math range error"); /* overflow */
818n/a else
819n/a PyErr_SetString(PyExc_ValueError,
820n/a "math domain error"); /* singularity */
821n/a return NULL;
822n/a }
823n/a if (Py_IS_FINITE(r) && errno && is_error(r))
824n/a /* this branch unnecessary on most platforms */
825n/a return NULL;
826n/a
827n/a return (*from_double_func)(r);
828n/a}
829n/a
830n/a/* variant of math_1, to be used when the function being wrapped is known to
831n/a set errno properly (that is, errno = EDOM for invalid or divide-by-zero,
832n/a errno = ERANGE for overflow). */
833n/a
834n/astatic PyObject *
835n/amath_1a(PyObject *arg, double (*func) (double))
836n/a{
837n/a double x, r;
838n/a x = PyFloat_AsDouble(arg);
839n/a if (x == -1.0 && PyErr_Occurred())
840n/a return NULL;
841n/a errno = 0;
842n/a PyFPE_START_PROTECT("in math_1a", return 0);
843n/a r = (*func)(x);
844n/a PyFPE_END_PROTECT(r);
845n/a if (errno && is_error(r))
846n/a return NULL;
847n/a return PyFloat_FromDouble(r);
848n/a}
849n/a
850n/a/*
851n/a math_2 is used to wrap a libm function f that takes two double
852n/a arguments and returns a double.
853n/a
854n/a The error reporting follows these rules, which are designed to do
855n/a the right thing on C89/C99 platforms and IEEE 754/non IEEE 754
856n/a platforms.
857n/a
858n/a - a NaN result from non-NaN inputs causes ValueError to be raised
859n/a - an infinite result from finite inputs causes OverflowError to be
860n/a raised.
861n/a - if the result is finite and errno == EDOM then ValueError is
862n/a raised
863n/a - if the result is finite and nonzero and errno == ERANGE then
864n/a OverflowError is raised
865n/a
866n/a The last rule is used to catch overflow on platforms which follow
867n/a C89 but for which HUGE_VAL is not an infinity.
868n/a
869n/a For most two-argument functions (copysign, fmod, hypot, atan2)
870n/a these rules are enough to ensure that Python's functions behave as
871n/a specified in 'Annex F' of the C99 standard, with the 'invalid' and
872n/a 'divide-by-zero' floating-point exceptions mapping to Python's
873n/a ValueError and the 'overflow' floating-point exception mapping to
874n/a OverflowError.
875n/a*/
876n/a
877n/astatic PyObject *
878n/amath_1(PyObject *arg, double (*func) (double), int can_overflow)
879n/a{
880n/a return math_1_to_whatever(arg, func, PyFloat_FromDouble, can_overflow);
881n/a}
882n/a
883n/astatic PyObject *
884n/amath_1_to_int(PyObject *arg, double (*func) (double), int can_overflow)
885n/a{
886n/a return math_1_to_whatever(arg, func, PyLong_FromDouble, can_overflow);
887n/a}
888n/a
889n/astatic PyObject *
890n/amath_2(PyObject *args, double (*func) (double, double), const char *funcname)
891n/a{
892n/a PyObject *ox, *oy;
893n/a double x, y, r;
894n/a if (! PyArg_UnpackTuple(args, funcname, 2, 2, &ox, &oy))
895n/a return NULL;
896n/a x = PyFloat_AsDouble(ox);
897n/a y = PyFloat_AsDouble(oy);
898n/a if ((x == -1.0 || y == -1.0) && PyErr_Occurred())
899n/a return NULL;
900n/a errno = 0;
901n/a PyFPE_START_PROTECT("in math_2", return 0);
902n/a r = (*func)(x, y);
903n/a PyFPE_END_PROTECT(r);
904n/a if (Py_IS_NAN(r)) {
905n/a if (!Py_IS_NAN(x) && !Py_IS_NAN(y))
906n/a errno = EDOM;
907n/a else
908n/a errno = 0;
909n/a }
910n/a else if (Py_IS_INFINITY(r)) {
911n/a if (Py_IS_FINITE(x) && Py_IS_FINITE(y))
912n/a errno = ERANGE;
913n/a else
914n/a errno = 0;
915n/a }
916n/a if (errno && is_error(r))
917n/a return NULL;
918n/a else
919n/a return PyFloat_FromDouble(r);
920n/a}
921n/a
922n/a#define FUNC1(funcname, func, can_overflow, docstring) \
923n/a static PyObject * math_##funcname(PyObject *self, PyObject *args) { \
924n/a return math_1(args, func, can_overflow); \
925n/a }\
926n/a PyDoc_STRVAR(math_##funcname##_doc, docstring);
927n/a
928n/a#define FUNC1A(funcname, func, docstring) \
929n/a static PyObject * math_##funcname(PyObject *self, PyObject *args) { \
930n/a return math_1a(args, func); \
931n/a }\
932n/a PyDoc_STRVAR(math_##funcname##_doc, docstring);
933n/a
934n/a#define FUNC2(funcname, func, docstring) \
935n/a static PyObject * math_##funcname(PyObject *self, PyObject *args) { \
936n/a return math_2(args, func, #funcname); \
937n/a }\
938n/a PyDoc_STRVAR(math_##funcname##_doc, docstring);
939n/a
940n/aFUNC1(acos, acos, 0,
941n/a "acos($module, x, /)\n--\n\n"
942n/a "Return the arc cosine (measured in radians) of x.")
943n/aFUNC1(acosh, m_acosh, 0,
944n/a "acosh($module, x, /)\n--\n\n"
945n/a "Return the inverse hyperbolic cosine of x.")
946n/aFUNC1(asin, asin, 0,
947n/a "asin($module, x, /)\n--\n\n"
948n/a "Return the arc sine (measured in radians) of x.")
949n/aFUNC1(asinh, m_asinh, 0,
950n/a "asinh($module, x, /)\n--\n\n"
951n/a "Return the inverse hyperbolic sine of x.")
952n/aFUNC1(atan, atan, 0,
953n/a "atan($module, x, /)\n--\n\n"
954n/a "Return the arc tangent (measured in radians) of x.")
955n/aFUNC2(atan2, m_atan2,
956n/a "atan2($module, y, x, /)\n--\n\n"
957n/a "Return the arc tangent (measured in radians) of y/x.\n\n"
958n/a "Unlike atan(y/x), the signs of both x and y are considered.")
959n/aFUNC1(atanh, m_atanh, 0,
960n/a "atanh($module, x, /)\n--\n\n"
961n/a "Return the inverse hyperbolic tangent of x.")
962n/a
963n/a/*[clinic input]
964n/amath.ceil
965n/a
966n/a x as number: object
967n/a /
968n/a
969n/aReturn the ceiling of x as an Integral.
970n/a
971n/aThis is the smallest integer >= x.
972n/a[clinic start generated code]*/
973n/a
974n/astatic PyObject *
975n/amath_ceil(PyObject *module, PyObject *number)
976n/a/*[clinic end generated code: output=6c3b8a78bc201c67 input=2725352806399cab]*/
977n/a{
978n/a _Py_IDENTIFIER(__ceil__);
979n/a PyObject *method, *result;
980n/a
981n/a method = _PyObject_LookupSpecial(number, &PyId___ceil__);
982n/a if (method == NULL) {
983n/a if (PyErr_Occurred())
984n/a return NULL;
985n/a return math_1_to_int(number, ceil, 0);
986n/a }
987n/a result = _PyObject_CallNoArg(method);
988n/a Py_DECREF(method);
989n/a return result;
990n/a}
991n/a
992n/aFUNC2(copysign, copysign,
993n/a "copysign($module, x, y, /)\n--\n\n"
994n/a "Return a float with the magnitude (absolute value) of x but the sign of y.\n\n"
995n/a "On platforms that support signed zeros, copysign(1.0, -0.0)\n"
996n/a "returns -1.0.\n")
997n/aFUNC1(cos, cos, 0,
998n/a "cos($module, x, /)\n--\n\n"
999n/a "Return the cosine of x (measured in radians).")
1000n/aFUNC1(cosh, cosh, 1,
1001n/a "cosh($module, x, /)\n--\n\n"
1002n/a "Return the hyperbolic cosine of x.")
1003n/aFUNC1A(erf, m_erf,
1004n/a "erf($module, x, /)\n--\n\n"
1005n/a "Error function at x.")
1006n/aFUNC1A(erfc, m_erfc,
1007n/a "erfc($module, x, /)\n--\n\n"
1008n/a "Complementary error function at x.")
1009n/aFUNC1(exp, exp, 1,
1010n/a "exp($module, x, /)\n--\n\n"
1011n/a "Return e raised to the power of x.")
1012n/aFUNC1(expm1, m_expm1, 1,
1013n/a "expm1($module, x, /)\n--\n\n"
1014n/a "Return exp(x)-1.\n\n"
1015n/a "This function avoids the loss of precision involved in the direct "
1016n/a "evaluation of exp(x)-1 for small x.")
1017n/aFUNC1(fabs, fabs, 0,
1018n/a "fabs($module, x, /)\n--\n\n"
1019n/a "Return the absolute value of the float x.")
1020n/a
1021n/a/*[clinic input]
1022n/amath.floor
1023n/a
1024n/a x as number: object
1025n/a /
1026n/a
1027n/aReturn the floor of x as an Integral.
1028n/a
1029n/aThis is the largest integer <= x.
1030n/a[clinic start generated code]*/
1031n/a
1032n/astatic PyObject *
1033n/amath_floor(PyObject *module, PyObject *number)
1034n/a/*[clinic end generated code: output=c6a65c4884884b8a input=63af6b5d7ebcc3d6]*/
1035n/a{
1036n/a _Py_IDENTIFIER(__floor__);
1037n/a PyObject *method, *result;
1038n/a
1039n/a method = _PyObject_LookupSpecial(number, &PyId___floor__);
1040n/a if (method == NULL) {
1041n/a if (PyErr_Occurred())
1042n/a return NULL;
1043n/a return math_1_to_int(number, floor, 0);
1044n/a }
1045n/a result = _PyObject_CallNoArg(method);
1046n/a Py_DECREF(method);
1047n/a return result;
1048n/a}
1049n/a
1050n/aFUNC1A(gamma, m_tgamma,
1051n/a "gamma($module, x, /)\n--\n\n"
1052n/a "Gamma function at x.")
1053n/aFUNC1A(lgamma, m_lgamma,
1054n/a "lgamma($module, x, /)\n--\n\n"
1055n/a "Natural logarithm of absolute value of Gamma function at x.")
1056n/aFUNC1(log1p, m_log1p, 0,
1057n/a "log1p($module, x, /)\n--\n\n"
1058n/a "Return the natural logarithm of 1+x (base e).\n\n"
1059n/a "The result is computed in a way which is accurate for x near zero.")
1060n/aFUNC1(sin, sin, 0,
1061n/a "sin($module, x, /)\n--\n\n"
1062n/a "Return the sine of x (measured in radians).")
1063n/aFUNC1(sinh, sinh, 1,
1064n/a "sinh($module, x, /)\n--\n\n"
1065n/a "Return the hyperbolic sine of x.")
1066n/aFUNC1(sqrt, sqrt, 0,
1067n/a "sqrt($module, x, /)\n--\n\n"
1068n/a "Return the square root of x.")
1069n/aFUNC1(tan, tan, 0,
1070n/a "tan($module, x, /)\n--\n\n"
1071n/a "Return the tangent of x (measured in radians).")
1072n/aFUNC1(tanh, tanh, 0,
1073n/a "tanh($module, x, /)\n--\n\n"
1074n/a "Return the hyperbolic tangent of x.")
1075n/a
1076n/a/* Precision summation function as msum() by Raymond Hettinger in
1077n/a <http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/393090>,
1078n/a enhanced with the exact partials sum and roundoff from Mark
1079n/a Dickinson's post at <http://bugs.python.org/file10357/msum4.py>.
1080n/a See those links for more details, proofs and other references.
1081n/a
1082n/a Note 1: IEEE 754R floating point semantics are assumed,
1083n/a but the current implementation does not re-establish special
1084n/a value semantics across iterations (i.e. handling -Inf + Inf).
1085n/a
1086n/a Note 2: No provision is made for intermediate overflow handling;
1087n/a therefore, sum([1e+308, 1e-308, 1e+308]) returns 1e+308 while
1088n/a sum([1e+308, 1e+308, 1e-308]) raises an OverflowError due to the
1089n/a overflow of the first partial sum.
1090n/a
1091n/a Note 3: The intermediate values lo, yr, and hi are declared volatile so
1092n/a aggressive compilers won't algebraically reduce lo to always be exactly 0.0.
1093n/a Also, the volatile declaration forces the values to be stored in memory as
1094n/a regular doubles instead of extended long precision (80-bit) values. This
1095n/a prevents double rounding because any addition or subtraction of two doubles
1096n/a can be resolved exactly into double-sized hi and lo values. As long as the
1097n/a hi value gets forced into a double before yr and lo are computed, the extra
1098n/a bits in downstream extended precision operations (x87 for example) will be
1099n/a exactly zero and therefore can be losslessly stored back into a double,
1100n/a thereby preventing double rounding.
1101n/a
1102n/a Note 4: A similar implementation is in Modules/cmathmodule.c.
1103n/a Be sure to update both when making changes.
1104n/a
1105n/a Note 5: The signature of math.fsum() differs from builtins.sum()
1106n/a because the start argument doesn't make sense in the context of
1107n/a accurate summation. Since the partials table is collapsed before
1108n/a returning a result, sum(seq2, start=sum(seq1)) may not equal the
1109n/a accurate result returned by sum(itertools.chain(seq1, seq2)).
1110n/a*/
1111n/a
1112n/a#define NUM_PARTIALS 32 /* initial partials array size, on stack */
1113n/a
1114n/a/* Extend the partials array p[] by doubling its size. */
1115n/astatic int /* non-zero on error */
1116n/a_fsum_realloc(double **p_ptr, Py_ssize_t n,
1117n/a double *ps, Py_ssize_t *m_ptr)
1118n/a{
1119n/a void *v = NULL;
1120n/a Py_ssize_t m = *m_ptr;
1121n/a
1122n/a m += m; /* double */
1123n/a if (n < m && (size_t)m < ((size_t)PY_SSIZE_T_MAX / sizeof(double))) {
1124n/a double *p = *p_ptr;
1125n/a if (p == ps) {
1126n/a v = PyMem_Malloc(sizeof(double) * m);
1127n/a if (v != NULL)
1128n/a memcpy(v, ps, sizeof(double) * n);
1129n/a }
1130n/a else
1131n/a v = PyMem_Realloc(p, sizeof(double) * m);
1132n/a }
1133n/a if (v == NULL) { /* size overflow or no memory */
1134n/a PyErr_SetString(PyExc_MemoryError, "math.fsum partials");
1135n/a return 1;
1136n/a }
1137n/a *p_ptr = (double*) v;
1138n/a *m_ptr = m;
1139n/a return 0;
1140n/a}
1141n/a
1142n/a/* Full precision summation of a sequence of floats.
1143n/a
1144n/a def msum(iterable):
1145n/a partials = [] # sorted, non-overlapping partial sums
1146n/a for x in iterable:
1147n/a i = 0
1148n/a for y in partials:
1149n/a if abs(x) < abs(y):
1150n/a x, y = y, x
1151n/a hi = x + y
1152n/a lo = y - (hi - x)
1153n/a if lo:
1154n/a partials[i] = lo
1155n/a i += 1
1156n/a x = hi
1157n/a partials[i:] = [x]
1158n/a return sum_exact(partials)
1159n/a
1160n/a Rounded x+y stored in hi with the roundoff stored in lo. Together hi+lo
1161n/a are exactly equal to x+y. The inner loop applies hi/lo summation to each
1162n/a partial so that the list of partial sums remains exact.
1163n/a
1164n/a Sum_exact() adds the partial sums exactly and correctly rounds the final
1165n/a result (using the round-half-to-even rule). The items in partials remain
1166n/a non-zero, non-special, non-overlapping and strictly increasing in
1167n/a magnitude, but possibly not all having the same sign.
1168n/a
1169n/a Depends on IEEE 754 arithmetic guarantees and half-even rounding.
1170n/a*/
1171n/a
1172n/a/*[clinic input]
1173n/amath.fsum
1174n/a
1175n/a seq: object
1176n/a /
1177n/a
1178n/aReturn an accurate floating point sum of values in the iterable seq.
1179n/a
1180n/aAssumes IEEE-754 floating point arithmetic.
1181n/a[clinic start generated code]*/
1182n/a
1183n/astatic PyObject *
1184n/amath_fsum(PyObject *module, PyObject *seq)
1185n/a/*[clinic end generated code: output=ba5c672b87fe34fc input=c51b7d8caf6f6e82]*/
1186n/a{
1187n/a PyObject *item, *iter, *sum = NULL;
1188n/a Py_ssize_t i, j, n = 0, m = NUM_PARTIALS;
1189n/a double x, y, t, ps[NUM_PARTIALS], *p = ps;
1190n/a double xsave, special_sum = 0.0, inf_sum = 0.0;
1191n/a volatile double hi, yr, lo;
1192n/a
1193n/a iter = PyObject_GetIter(seq);
1194n/a if (iter == NULL)
1195n/a return NULL;
1196n/a
1197n/a PyFPE_START_PROTECT("fsum", Py_DECREF(iter); return NULL)
1198n/a
1199n/a for(;;) { /* for x in iterable */
1200n/a assert(0 <= n && n <= m);
1201n/a assert((m == NUM_PARTIALS && p == ps) ||
1202n/a (m > NUM_PARTIALS && p != NULL));
1203n/a
1204n/a item = PyIter_Next(iter);
1205n/a if (item == NULL) {
1206n/a if (PyErr_Occurred())
1207n/a goto _fsum_error;
1208n/a break;
1209n/a }
1210n/a x = PyFloat_AsDouble(item);
1211n/a Py_DECREF(item);
1212n/a if (PyErr_Occurred())
1213n/a goto _fsum_error;
1214n/a
1215n/a xsave = x;
1216n/a for (i = j = 0; j < n; j++) { /* for y in partials */
1217n/a y = p[j];
1218n/a if (fabs(x) < fabs(y)) {
1219n/a t = x; x = y; y = t;
1220n/a }
1221n/a hi = x + y;
1222n/a yr = hi - x;
1223n/a lo = y - yr;
1224n/a if (lo != 0.0)
1225n/a p[i++] = lo;
1226n/a x = hi;
1227n/a }
1228n/a
1229n/a n = i; /* ps[i:] = [x] */
1230n/a if (x != 0.0) {
1231n/a if (! Py_IS_FINITE(x)) {
1232n/a /* a nonfinite x could arise either as
1233n/a a result of intermediate overflow, or
1234n/a as a result of a nan or inf in the
1235n/a summands */
1236n/a if (Py_IS_FINITE(xsave)) {
1237n/a PyErr_SetString(PyExc_OverflowError,
1238n/a "intermediate overflow in fsum");
1239n/a goto _fsum_error;
1240n/a }
1241n/a if (Py_IS_INFINITY(xsave))
1242n/a inf_sum += xsave;
1243n/a special_sum += xsave;
1244n/a /* reset partials */
1245n/a n = 0;
1246n/a }
1247n/a else if (n >= m && _fsum_realloc(&p, n, ps, &m))
1248n/a goto _fsum_error;
1249n/a else
1250n/a p[n++] = x;
1251n/a }
1252n/a }
1253n/a
1254n/a if (special_sum != 0.0) {
1255n/a if (Py_IS_NAN(inf_sum))
1256n/a PyErr_SetString(PyExc_ValueError,
1257n/a "-inf + inf in fsum");
1258n/a else
1259n/a sum = PyFloat_FromDouble(special_sum);
1260n/a goto _fsum_error;
1261n/a }
1262n/a
1263n/a hi = 0.0;
1264n/a if (n > 0) {
1265n/a hi = p[--n];
1266n/a /* sum_exact(ps, hi) from the top, stop when the sum becomes
1267n/a inexact. */
1268n/a while (n > 0) {
1269n/a x = hi;
1270n/a y = p[--n];
1271n/a assert(fabs(y) < fabs(x));
1272n/a hi = x + y;
1273n/a yr = hi - x;
1274n/a lo = y - yr;
1275n/a if (lo != 0.0)
1276n/a break;
1277n/a }
1278n/a /* Make half-even rounding work across multiple partials.
1279n/a Needed so that sum([1e-16, 1, 1e16]) will round-up the last
1280n/a digit to two instead of down to zero (the 1e-16 makes the 1
1281n/a slightly closer to two). With a potential 1 ULP rounding
1282n/a error fixed-up, math.fsum() can guarantee commutativity. */
1283n/a if (n > 0 && ((lo < 0.0 && p[n-1] < 0.0) ||
1284n/a (lo > 0.0 && p[n-1] > 0.0))) {
1285n/a y = lo * 2.0;
1286n/a x = hi + y;
1287n/a yr = x - hi;
1288n/a if (y == yr)
1289n/a hi = x;
1290n/a }
1291n/a }
1292n/a sum = PyFloat_FromDouble(hi);
1293n/a
1294n/a_fsum_error:
1295n/a PyFPE_END_PROTECT(hi)
1296n/a Py_DECREF(iter);
1297n/a if (p != ps)
1298n/a PyMem_Free(p);
1299n/a return sum;
1300n/a}
1301n/a
1302n/a#undef NUM_PARTIALS
1303n/a
1304n/a
1305n/a/* Return the smallest integer k such that n < 2**k, or 0 if n == 0.
1306n/a * Equivalent to floor(lg(x))+1. Also equivalent to: bitwidth_of_type -
1307n/a * count_leading_zero_bits(x)
1308n/a */
1309n/a
1310n/a/* XXX: This routine does more or less the same thing as
1311n/a * bits_in_digit() in Objects/longobject.c. Someday it would be nice to
1312n/a * consolidate them. On BSD, there's a library function called fls()
1313n/a * that we could use, and GCC provides __builtin_clz().
1314n/a */
1315n/a
1316n/astatic unsigned long
1317n/abit_length(unsigned long n)
1318n/a{
1319n/a unsigned long len = 0;
1320n/a while (n != 0) {
1321n/a ++len;
1322n/a n >>= 1;
1323n/a }
1324n/a return len;
1325n/a}
1326n/a
1327n/astatic unsigned long
1328n/acount_set_bits(unsigned long n)
1329n/a{
1330n/a unsigned long count = 0;
1331n/a while (n != 0) {
1332n/a ++count;
1333n/a n &= n - 1; /* clear least significant bit */
1334n/a }
1335n/a return count;
1336n/a}
1337n/a
1338n/a/* Divide-and-conquer factorial algorithm
1339n/a *
1340n/a * Based on the formula and pseudo-code provided at:
1341n/a * http://www.luschny.de/math/factorial/binarysplitfact.html
1342n/a *
1343n/a * Faster algorithms exist, but they're more complicated and depend on
1344n/a * a fast prime factorization algorithm.
1345n/a *
1346n/a * Notes on the algorithm
1347n/a * ----------------------
1348n/a *
1349n/a * factorial(n) is written in the form 2**k * m, with m odd. k and m are
1350n/a * computed separately, and then combined using a left shift.
1351n/a *
1352n/a * The function factorial_odd_part computes the odd part m (i.e., the greatest
1353n/a * odd divisor) of factorial(n), using the formula:
1354n/a *
1355n/a * factorial_odd_part(n) =
1356n/a *
1357n/a * product_{i >= 0} product_{0 < j <= n / 2**i, j odd} j
1358n/a *
1359n/a * Example: factorial_odd_part(20) =
1360n/a *
1361n/a * (1) *
1362n/a * (1) *
1363n/a * (1 * 3 * 5) *
1364n/a * (1 * 3 * 5 * 7 * 9)
1365n/a * (1 * 3 * 5 * 7 * 9 * 11 * 13 * 15 * 17 * 19)
1366n/a *
1367n/a * Here i goes from large to small: the first term corresponds to i=4 (any
1368n/a * larger i gives an empty product), and the last term corresponds to i=0.
1369n/a * Each term can be computed from the last by multiplying by the extra odd
1370n/a * numbers required: e.g., to get from the penultimate term to the last one,
1371n/a * we multiply by (11 * 13 * 15 * 17 * 19).
1372n/a *
1373n/a * To see a hint of why this formula works, here are the same numbers as above
1374n/a * but with the even parts (i.e., the appropriate powers of 2) included. For
1375n/a * each subterm in the product for i, we multiply that subterm by 2**i:
1376n/a *
1377n/a * factorial(20) =
1378n/a *
1379n/a * (16) *
1380n/a * (8) *
1381n/a * (4 * 12 * 20) *
1382n/a * (2 * 6 * 10 * 14 * 18) *
1383n/a * (1 * 3 * 5 * 7 * 9 * 11 * 13 * 15 * 17 * 19)
1384n/a *
1385n/a * The factorial_partial_product function computes the product of all odd j in
1386n/a * range(start, stop) for given start and stop. It's used to compute the
1387n/a * partial products like (11 * 13 * 15 * 17 * 19) in the example above. It
1388n/a * operates recursively, repeatedly splitting the range into two roughly equal
1389n/a * pieces until the subranges are small enough to be computed using only C
1390n/a * integer arithmetic.
1391n/a *
1392n/a * The two-valuation k (i.e., the exponent of the largest power of 2 dividing
1393n/a * the factorial) is computed independently in the main math_factorial
1394n/a * function. By standard results, its value is:
1395n/a *
1396n/a * two_valuation = n//2 + n//4 + n//8 + ....
1397n/a *
1398n/a * It can be shown (e.g., by complete induction on n) that two_valuation is
1399n/a * equal to n - count_set_bits(n), where count_set_bits(n) gives the number of
1400n/a * '1'-bits in the binary expansion of n.
1401n/a */
1402n/a
1403n/a/* factorial_partial_product: Compute product(range(start, stop, 2)) using
1404n/a * divide and conquer. Assumes start and stop are odd and stop > start.
1405n/a * max_bits must be >= bit_length(stop - 2). */
1406n/a
1407n/astatic PyObject *
1408n/afactorial_partial_product(unsigned long start, unsigned long stop,
1409n/a unsigned long max_bits)
1410n/a{
1411n/a unsigned long midpoint, num_operands;
1412n/a PyObject *left = NULL, *right = NULL, *result = NULL;
1413n/a
1414n/a /* If the return value will fit an unsigned long, then we can
1415n/a * multiply in a tight, fast loop where each multiply is O(1).
1416n/a * Compute an upper bound on the number of bits required to store
1417n/a * the answer.
1418n/a *
1419n/a * Storing some integer z requires floor(lg(z))+1 bits, which is
1420n/a * conveniently the value returned by bit_length(z). The
1421n/a * product x*y will require at most
1422n/a * bit_length(x) + bit_length(y) bits to store, based
1423n/a * on the idea that lg product = lg x + lg y.
1424n/a *
1425n/a * We know that stop - 2 is the largest number to be multiplied. From
1426n/a * there, we have: bit_length(answer) <= num_operands *
1427n/a * bit_length(stop - 2)
1428n/a */
1429n/a
1430n/a num_operands = (stop - start) / 2;
1431n/a /* The "num_operands <= 8 * SIZEOF_LONG" check guards against the
1432n/a * unlikely case of an overflow in num_operands * max_bits. */
1433n/a if (num_operands <= 8 * SIZEOF_LONG &&
1434n/a num_operands * max_bits <= 8 * SIZEOF_LONG) {
1435n/a unsigned long j, total;
1436n/a for (total = start, j = start + 2; j < stop; j += 2)
1437n/a total *= j;
1438n/a return PyLong_FromUnsignedLong(total);
1439n/a }
1440n/a
1441n/a /* find midpoint of range(start, stop), rounded up to next odd number. */
1442n/a midpoint = (start + num_operands) | 1;
1443n/a left = factorial_partial_product(start, midpoint,
1444n/a bit_length(midpoint - 2));
1445n/a if (left == NULL)
1446n/a goto error;
1447n/a right = factorial_partial_product(midpoint, stop, max_bits);
1448n/a if (right == NULL)
1449n/a goto error;
1450n/a result = PyNumber_Multiply(left, right);
1451n/a
1452n/a error:
1453n/a Py_XDECREF(left);
1454n/a Py_XDECREF(right);
1455n/a return result;
1456n/a}
1457n/a
1458n/a/* factorial_odd_part: compute the odd part of factorial(n). */
1459n/a
1460n/astatic PyObject *
1461n/afactorial_odd_part(unsigned long n)
1462n/a{
1463n/a long i;
1464n/a unsigned long v, lower, upper;
1465n/a PyObject *partial, *tmp, *inner, *outer;
1466n/a
1467n/a inner = PyLong_FromLong(1);
1468n/a if (inner == NULL)
1469n/a return NULL;
1470n/a outer = inner;
1471n/a Py_INCREF(outer);
1472n/a
1473n/a upper = 3;
1474n/a for (i = bit_length(n) - 2; i >= 0; i--) {
1475n/a v = n >> i;
1476n/a if (v <= 2)
1477n/a continue;
1478n/a lower = upper;
1479n/a /* (v + 1) | 1 = least odd integer strictly larger than n / 2**i */
1480n/a upper = (v + 1) | 1;
1481n/a /* Here inner is the product of all odd integers j in the range (0,
1482n/a n/2**(i+1)]. The factorial_partial_product call below gives the
1483n/a product of all odd integers j in the range (n/2**(i+1), n/2**i]. */
1484n/a partial = factorial_partial_product(lower, upper, bit_length(upper-2));
1485n/a /* inner *= partial */
1486n/a if (partial == NULL)
1487n/a goto error;
1488n/a tmp = PyNumber_Multiply(inner, partial);
1489n/a Py_DECREF(partial);
1490n/a if (tmp == NULL)
1491n/a goto error;
1492n/a Py_DECREF(inner);
1493n/a inner = tmp;
1494n/a /* Now inner is the product of all odd integers j in the range (0,
1495n/a n/2**i], giving the inner product in the formula above. */
1496n/a
1497n/a /* outer *= inner; */
1498n/a tmp = PyNumber_Multiply(outer, inner);
1499n/a if (tmp == NULL)
1500n/a goto error;
1501n/a Py_DECREF(outer);
1502n/a outer = tmp;
1503n/a }
1504n/a Py_DECREF(inner);
1505n/a return outer;
1506n/a
1507n/a error:
1508n/a Py_DECREF(outer);
1509n/a Py_DECREF(inner);
1510n/a return NULL;
1511n/a}
1512n/a
1513n/a
1514n/a/* Lookup table for small factorial values */
1515n/a
1516n/astatic const unsigned long SmallFactorials[] = {
1517n/a 1, 1, 2, 6, 24, 120, 720, 5040, 40320,
1518n/a 362880, 3628800, 39916800, 479001600,
1519n/a#if SIZEOF_LONG >= 8
1520n/a 6227020800, 87178291200, 1307674368000,
1521n/a 20922789888000, 355687428096000, 6402373705728000,
1522n/a 121645100408832000, 2432902008176640000
1523n/a#endif
1524n/a};
1525n/a
1526n/a/*[clinic input]
1527n/amath.factorial
1528n/a
1529n/a x as arg: object
1530n/a /
1531n/a
1532n/aFind x!.
1533n/a
1534n/aRaise a ValueError if x is negative or non-integral.
1535n/a[clinic start generated code]*/
1536n/a
1537n/astatic PyObject *
1538n/amath_factorial(PyObject *module, PyObject *arg)
1539n/a/*[clinic end generated code: output=6686f26fae00e9ca input=6d1c8105c0d91fb4]*/
1540n/a{
1541n/a long x;
1542n/a int overflow;
1543n/a PyObject *result, *odd_part, *two_valuation;
1544n/a
1545n/a if (PyFloat_Check(arg)) {
1546n/a PyObject *lx;
1547n/a double dx = PyFloat_AS_DOUBLE((PyFloatObject *)arg);
1548n/a if (!(Py_IS_FINITE(dx) && dx == floor(dx))) {
1549n/a PyErr_SetString(PyExc_ValueError,
1550n/a "factorial() only accepts integral values");
1551n/a return NULL;
1552n/a }
1553n/a lx = PyLong_FromDouble(dx);
1554n/a if (lx == NULL)
1555n/a return NULL;
1556n/a x = PyLong_AsLongAndOverflow(lx, &overflow);
1557n/a Py_DECREF(lx);
1558n/a }
1559n/a else
1560n/a x = PyLong_AsLongAndOverflow(arg, &overflow);
1561n/a
1562n/a if (x == -1 && PyErr_Occurred()) {
1563n/a return NULL;
1564n/a }
1565n/a else if (overflow == 1) {
1566n/a PyErr_Format(PyExc_OverflowError,
1567n/a "factorial() argument should not exceed %ld",
1568n/a LONG_MAX);
1569n/a return NULL;
1570n/a }
1571n/a else if (overflow == -1 || x < 0) {
1572n/a PyErr_SetString(PyExc_ValueError,
1573n/a "factorial() not defined for negative values");
1574n/a return NULL;
1575n/a }
1576n/a
1577n/a /* use lookup table if x is small */
1578n/a if (x < (long)Py_ARRAY_LENGTH(SmallFactorials))
1579n/a return PyLong_FromUnsignedLong(SmallFactorials[x]);
1580n/a
1581n/a /* else express in the form odd_part * 2**two_valuation, and compute as
1582n/a odd_part << two_valuation. */
1583n/a odd_part = factorial_odd_part(x);
1584n/a if (odd_part == NULL)
1585n/a return NULL;
1586n/a two_valuation = PyLong_FromLong(x - count_set_bits(x));
1587n/a if (two_valuation == NULL) {
1588n/a Py_DECREF(odd_part);
1589n/a return NULL;
1590n/a }
1591n/a result = PyNumber_Lshift(odd_part, two_valuation);
1592n/a Py_DECREF(two_valuation);
1593n/a Py_DECREF(odd_part);
1594n/a return result;
1595n/a}
1596n/a
1597n/a
1598n/a/*[clinic input]
1599n/amath.trunc
1600n/a
1601n/a x: object
1602n/a /
1603n/a
1604n/aTruncates the Real x to the nearest Integral toward 0.
1605n/a
1606n/aUses the __trunc__ magic method.
1607n/a[clinic start generated code]*/
1608n/a
1609n/astatic PyObject *
1610n/amath_trunc(PyObject *module, PyObject *x)
1611n/a/*[clinic end generated code: output=34b9697b707e1031 input=2168b34e0a09134d]*/
1612n/a{
1613n/a _Py_IDENTIFIER(__trunc__);
1614n/a PyObject *trunc, *result;
1615n/a
1616n/a if (Py_TYPE(x)->tp_dict == NULL) {
1617n/a if (PyType_Ready(Py_TYPE(x)) < 0)
1618n/a return NULL;
1619n/a }
1620n/a
1621n/a trunc = _PyObject_LookupSpecial(x, &PyId___trunc__);
1622n/a if (trunc == NULL) {
1623n/a if (!PyErr_Occurred())
1624n/a PyErr_Format(PyExc_TypeError,
1625n/a "type %.100s doesn't define __trunc__ method",
1626n/a Py_TYPE(x)->tp_name);
1627n/a return NULL;
1628n/a }
1629n/a result = _PyObject_CallNoArg(trunc);
1630n/a Py_DECREF(trunc);
1631n/a return result;
1632n/a}
1633n/a
1634n/a
1635n/a/*[clinic input]
1636n/amath.frexp
1637n/a
1638n/a x: double
1639n/a /
1640n/a
1641n/aReturn the mantissa and exponent of x, as pair (m, e).
1642n/a
1643n/am is a float and e is an int, such that x = m * 2.**e.
1644n/aIf x is 0, m and e are both 0. Else 0.5 <= abs(m) < 1.0.
1645n/a[clinic start generated code]*/
1646n/a
1647n/astatic PyObject *
1648n/amath_frexp_impl(PyObject *module, double x)
1649n/a/*[clinic end generated code: output=03e30d252a15ad4a input=96251c9e208bc6e9]*/
1650n/a{
1651n/a int i;
1652n/a /* deal with special cases directly, to sidestep platform
1653n/a differences */
1654n/a if (Py_IS_NAN(x) || Py_IS_INFINITY(x) || !x) {
1655n/a i = 0;
1656n/a }
1657n/a else {
1658n/a PyFPE_START_PROTECT("in math_frexp", return 0);
1659n/a x = frexp(x, &i);
1660n/a PyFPE_END_PROTECT(x);
1661n/a }
1662n/a return Py_BuildValue("(di)", x, i);
1663n/a}
1664n/a
1665n/a
1666n/a/*[clinic input]
1667n/amath.ldexp
1668n/a
1669n/a x: double
1670n/a i: object
1671n/a /
1672n/a
1673n/aReturn x * (2**i).
1674n/a
1675n/aThis is essentially the inverse of frexp().
1676n/a[clinic start generated code]*/
1677n/a
1678n/astatic PyObject *
1679n/amath_ldexp_impl(PyObject *module, double x, PyObject *i)
1680n/a/*[clinic end generated code: output=b6892f3c2df9cc6a input=17d5970c1a40a8c1]*/
1681n/a{
1682n/a double r;
1683n/a long exp;
1684n/a int overflow;
1685n/a
1686n/a if (PyLong_Check(i)) {
1687n/a /* on overflow, replace exponent with either LONG_MAX
1688n/a or LONG_MIN, depending on the sign. */
1689n/a exp = PyLong_AsLongAndOverflow(i, &overflow);
1690n/a if (exp == -1 && PyErr_Occurred())
1691n/a return NULL;
1692n/a if (overflow)
1693n/a exp = overflow < 0 ? LONG_MIN : LONG_MAX;
1694n/a }
1695n/a else {
1696n/a PyErr_SetString(PyExc_TypeError,
1697n/a "Expected an int as second argument to ldexp.");
1698n/a return NULL;
1699n/a }
1700n/a
1701n/a if (x == 0. || !Py_IS_FINITE(x)) {
1702n/a /* NaNs, zeros and infinities are returned unchanged */
1703n/a r = x;
1704n/a errno = 0;
1705n/a } else if (exp > INT_MAX) {
1706n/a /* overflow */
1707n/a r = copysign(Py_HUGE_VAL, x);
1708n/a errno = ERANGE;
1709n/a } else if (exp < INT_MIN) {
1710n/a /* underflow to +-0 */
1711n/a r = copysign(0., x);
1712n/a errno = 0;
1713n/a } else {
1714n/a errno = 0;
1715n/a PyFPE_START_PROTECT("in math_ldexp", return 0);
1716n/a r = ldexp(x, (int)exp);
1717n/a PyFPE_END_PROTECT(r);
1718n/a if (Py_IS_INFINITY(r))
1719n/a errno = ERANGE;
1720n/a }
1721n/a
1722n/a if (errno && is_error(r))
1723n/a return NULL;
1724n/a return PyFloat_FromDouble(r);
1725n/a}
1726n/a
1727n/a
1728n/a/*[clinic input]
1729n/amath.modf
1730n/a
1731n/a x: double
1732n/a /
1733n/a
1734n/aReturn the fractional and integer parts of x.
1735n/a
1736n/aBoth results carry the sign of x and are floats.
1737n/a[clinic start generated code]*/
1738n/a
1739n/astatic PyObject *
1740n/amath_modf_impl(PyObject *module, double x)
1741n/a/*[clinic end generated code: output=90cee0260014c3c0 input=b4cfb6786afd9035]*/
1742n/a{
1743n/a double y;
1744n/a /* some platforms don't do the right thing for NaNs and
1745n/a infinities, so we take care of special cases directly. */
1746n/a if (!Py_IS_FINITE(x)) {
1747n/a if (Py_IS_INFINITY(x))
1748n/a return Py_BuildValue("(dd)", copysign(0., x), x);
1749n/a else if (Py_IS_NAN(x))
1750n/a return Py_BuildValue("(dd)", x, x);
1751n/a }
1752n/a
1753n/a errno = 0;
1754n/a PyFPE_START_PROTECT("in math_modf", return 0);
1755n/a x = modf(x, &y);
1756n/a PyFPE_END_PROTECT(x);
1757n/a return Py_BuildValue("(dd)", x, y);
1758n/a}
1759n/a
1760n/a
1761n/a/* A decent logarithm is easy to compute even for huge ints, but libm can't
1762n/a do that by itself -- loghelper can. func is log or log10, and name is
1763n/a "log" or "log10". Note that overflow of the result isn't possible: an int
1764n/a can contain no more than INT_MAX * SHIFT bits, so has value certainly less
1765n/a than 2**(2**64 * 2**16) == 2**2**80, and log2 of that is 2**80, which is
1766n/a small enough to fit in an IEEE single. log and log10 are even smaller.
1767n/a However, intermediate overflow is possible for an int if the number of bits
1768n/a in that int is larger than PY_SSIZE_T_MAX. */
1769n/a
1770n/astatic PyObject*
1771n/aloghelper(PyObject* arg, double (*func)(double), const char *funcname)
1772n/a{
1773n/a /* If it is int, do it ourselves. */
1774n/a if (PyLong_Check(arg)) {
1775n/a double x, result;
1776n/a Py_ssize_t e;
1777n/a
1778n/a /* Negative or zero inputs give a ValueError. */
1779n/a if (Py_SIZE(arg) <= 0) {
1780n/a PyErr_SetString(PyExc_ValueError,
1781n/a "math domain error");
1782n/a return NULL;
1783n/a }
1784n/a
1785n/a x = PyLong_AsDouble(arg);
1786n/a if (x == -1.0 && PyErr_Occurred()) {
1787n/a if (!PyErr_ExceptionMatches(PyExc_OverflowError))
1788n/a return NULL;
1789n/a /* Here the conversion to double overflowed, but it's possible
1790n/a to compute the log anyway. Clear the exception and continue. */
1791n/a PyErr_Clear();
1792n/a x = _PyLong_Frexp((PyLongObject *)arg, &e);
1793n/a if (x == -1.0 && PyErr_Occurred())
1794n/a return NULL;
1795n/a /* Value is ~= x * 2**e, so the log ~= log(x) + log(2) * e. */
1796n/a result = func(x) + func(2.0) * e;
1797n/a }
1798n/a else
1799n/a /* Successfully converted x to a double. */
1800n/a result = func(x);
1801n/a return PyFloat_FromDouble(result);
1802n/a }
1803n/a
1804n/a /* Else let libm handle it by itself. */
1805n/a return math_1(arg, func, 0);
1806n/a}
1807n/a
1808n/a
1809n/a/*[clinic input]
1810n/amath.log
1811n/a
1812n/a x: object
1813n/a [
1814n/a base: object(c_default="NULL") = math.e
1815n/a ]
1816n/a /
1817n/a
1818n/aReturn the logarithm of x to the given base.
1819n/a
1820n/aIf the base not specified, returns the natural logarithm (base e) of x.
1821n/a[clinic start generated code]*/
1822n/a
1823n/astatic PyObject *
1824n/amath_log_impl(PyObject *module, PyObject *x, int group_right_1,
1825n/a PyObject *base)
1826n/a/*[clinic end generated code: output=7b5a39e526b73fc9 input=0f62d5726cbfebbd]*/
1827n/a{
1828n/a PyObject *num, *den;
1829n/a PyObject *ans;
1830n/a
1831n/a num = loghelper(x, m_log, "log");
1832n/a if (num == NULL || base == NULL)
1833n/a return num;
1834n/a
1835n/a den = loghelper(base, m_log, "log");
1836n/a if (den == NULL) {
1837n/a Py_DECREF(num);
1838n/a return NULL;
1839n/a }
1840n/a
1841n/a ans = PyNumber_TrueDivide(num, den);
1842n/a Py_DECREF(num);
1843n/a Py_DECREF(den);
1844n/a return ans;
1845n/a}
1846n/a
1847n/a
1848n/a/*[clinic input]
1849n/amath.log2
1850n/a
1851n/a x: object
1852n/a /
1853n/a
1854n/aReturn the base 2 logarithm of x.
1855n/a[clinic start generated code]*/
1856n/a
1857n/astatic PyObject *
1858n/amath_log2(PyObject *module, PyObject *x)
1859n/a/*[clinic end generated code: output=5425899a4d5d6acb input=08321262bae4f39b]*/
1860n/a{
1861n/a return loghelper(x, m_log2, "log2");
1862n/a}
1863n/a
1864n/a
1865n/a/*[clinic input]
1866n/amath.log10
1867n/a
1868n/a x: object
1869n/a /
1870n/a
1871n/aReturn the base 10 logarithm of x.
1872n/a[clinic start generated code]*/
1873n/a
1874n/astatic PyObject *
1875n/amath_log10(PyObject *module, PyObject *x)
1876n/a/*[clinic end generated code: output=be72a64617df9c6f input=b2469d02c6469e53]*/
1877n/a{
1878n/a return loghelper(x, m_log10, "log10");
1879n/a}
1880n/a
1881n/a
1882n/a/*[clinic input]
1883n/amath.fmod
1884n/a
1885n/a x: double
1886n/a y: double
1887n/a /
1888n/a
1889n/aReturn fmod(x, y), according to platform C.
1890n/a
1891n/ax % y may differ.
1892n/a[clinic start generated code]*/
1893n/a
1894n/astatic PyObject *
1895n/amath_fmod_impl(PyObject *module, double x, double y)
1896n/a/*[clinic end generated code: output=7559d794343a27b5 input=4f84caa8cfc26a03]*/
1897n/a{
1898n/a double r;
1899n/a /* fmod(x, +/-Inf) returns x for finite x. */
1900n/a if (Py_IS_INFINITY(y) && Py_IS_FINITE(x))
1901n/a return PyFloat_FromDouble(x);
1902n/a errno = 0;
1903n/a PyFPE_START_PROTECT("in math_fmod", return 0);
1904n/a r = fmod(x, y);
1905n/a PyFPE_END_PROTECT(r);
1906n/a if (Py_IS_NAN(r)) {
1907n/a if (!Py_IS_NAN(x) && !Py_IS_NAN(y))
1908n/a errno = EDOM;
1909n/a else
1910n/a errno = 0;
1911n/a }
1912n/a if (errno && is_error(r))
1913n/a return NULL;
1914n/a else
1915n/a return PyFloat_FromDouble(r);
1916n/a}
1917n/a
1918n/a
1919n/a/*[clinic input]
1920n/amath.hypot
1921n/a
1922n/a x: double
1923n/a y: double
1924n/a /
1925n/a
1926n/aReturn the Euclidean distance, sqrt(x*x + y*y).
1927n/a[clinic start generated code]*/
1928n/a
1929n/astatic PyObject *
1930n/amath_hypot_impl(PyObject *module, double x, double y)
1931n/a/*[clinic end generated code: output=b7686e5be468ef87 input=7f8eea70406474aa]*/
1932n/a{
1933n/a double r;
1934n/a /* hypot(x, +/-Inf) returns Inf, even if x is a NaN. */
1935n/a if (Py_IS_INFINITY(x))
1936n/a return PyFloat_FromDouble(fabs(x));
1937n/a if (Py_IS_INFINITY(y))
1938n/a return PyFloat_FromDouble(fabs(y));
1939n/a errno = 0;
1940n/a PyFPE_START_PROTECT("in math_hypot", return 0);
1941n/a r = hypot(x, y);
1942n/a PyFPE_END_PROTECT(r);
1943n/a if (Py_IS_NAN(r)) {
1944n/a if (!Py_IS_NAN(x) && !Py_IS_NAN(y))
1945n/a errno = EDOM;
1946n/a else
1947n/a errno = 0;
1948n/a }
1949n/a else if (Py_IS_INFINITY(r)) {
1950n/a if (Py_IS_FINITE(x) && Py_IS_FINITE(y))
1951n/a errno = ERANGE;
1952n/a else
1953n/a errno = 0;
1954n/a }
1955n/a if (errno && is_error(r))
1956n/a return NULL;
1957n/a else
1958n/a return PyFloat_FromDouble(r);
1959n/a}
1960n/a
1961n/a
1962n/a/* pow can't use math_2, but needs its own wrapper: the problem is
1963n/a that an infinite result can arise either as a result of overflow
1964n/a (in which case OverflowError should be raised) or as a result of
1965n/a e.g. 0.**-5. (for which ValueError needs to be raised.)
1966n/a*/
1967n/a
1968n/a/*[clinic input]
1969n/amath.pow
1970n/a
1971n/a x: double
1972n/a y: double
1973n/a /
1974n/a
1975n/aReturn x**y (x to the power of y).
1976n/a[clinic start generated code]*/
1977n/a
1978n/astatic PyObject *
1979n/amath_pow_impl(PyObject *module, double x, double y)
1980n/a/*[clinic end generated code: output=fff93e65abccd6b0 input=c26f1f6075088bfd]*/
1981n/a{
1982n/a double r;
1983n/a int odd_y;
1984n/a
1985n/a /* deal directly with IEEE specials, to cope with problems on various
1986n/a platforms whose semantics don't exactly match C99 */
1987n/a r = 0.; /* silence compiler warning */
1988n/a if (!Py_IS_FINITE(x) || !Py_IS_FINITE(y)) {
1989n/a errno = 0;
1990n/a if (Py_IS_NAN(x))
1991n/a r = y == 0. ? 1. : x; /* NaN**0 = 1 */
1992n/a else if (Py_IS_NAN(y))
1993n/a r = x == 1. ? 1. : y; /* 1**NaN = 1 */
1994n/a else if (Py_IS_INFINITY(x)) {
1995n/a odd_y = Py_IS_FINITE(y) && fmod(fabs(y), 2.0) == 1.0;
1996n/a if (y > 0.)
1997n/a r = odd_y ? x : fabs(x);
1998n/a else if (y == 0.)
1999n/a r = 1.;
2000n/a else /* y < 0. */
2001n/a r = odd_y ? copysign(0., x) : 0.;
2002n/a }
2003n/a else if (Py_IS_INFINITY(y)) {
2004n/a if (fabs(x) == 1.0)
2005n/a r = 1.;
2006n/a else if (y > 0. && fabs(x) > 1.0)
2007n/a r = y;
2008n/a else if (y < 0. && fabs(x) < 1.0) {
2009n/a r = -y; /* result is +inf */
2010n/a if (x == 0.) /* 0**-inf: divide-by-zero */
2011n/a errno = EDOM;
2012n/a }
2013n/a else
2014n/a r = 0.;
2015n/a }
2016n/a }
2017n/a else {
2018n/a /* let libm handle finite**finite */
2019n/a errno = 0;
2020n/a PyFPE_START_PROTECT("in math_pow", return 0);
2021n/a r = pow(x, y);
2022n/a PyFPE_END_PROTECT(r);
2023n/a /* a NaN result should arise only from (-ve)**(finite
2024n/a non-integer); in this case we want to raise ValueError. */
2025n/a if (!Py_IS_FINITE(r)) {
2026n/a if (Py_IS_NAN(r)) {
2027n/a errno = EDOM;
2028n/a }
2029n/a /*
2030n/a an infinite result here arises either from:
2031n/a (A) (+/-0.)**negative (-> divide-by-zero)
2032n/a (B) overflow of x**y with x and y finite
2033n/a */
2034n/a else if (Py_IS_INFINITY(r)) {
2035n/a if (x == 0.)
2036n/a errno = EDOM;
2037n/a else
2038n/a errno = ERANGE;
2039n/a }
2040n/a }
2041n/a }
2042n/a
2043n/a if (errno && is_error(r))
2044n/a return NULL;
2045n/a else
2046n/a return PyFloat_FromDouble(r);
2047n/a}
2048n/a
2049n/a
2050n/astatic const double degToRad = Py_MATH_PI / 180.0;
2051n/astatic const double radToDeg = 180.0 / Py_MATH_PI;
2052n/a
2053n/a/*[clinic input]
2054n/amath.degrees
2055n/a
2056n/a x: double
2057n/a /
2058n/a
2059n/aConvert angle x from radians to degrees.
2060n/a[clinic start generated code]*/
2061n/a
2062n/astatic PyObject *
2063n/amath_degrees_impl(PyObject *module, double x)
2064n/a/*[clinic end generated code: output=7fea78b294acd12f input=81e016555d6e3660]*/
2065n/a{
2066n/a return PyFloat_FromDouble(x * radToDeg);
2067n/a}
2068n/a
2069n/a
2070n/a/*[clinic input]
2071n/amath.radians
2072n/a
2073n/a x: double
2074n/a /
2075n/a
2076n/aConvert angle x from degrees to radians.
2077n/a[clinic start generated code]*/
2078n/a
2079n/astatic PyObject *
2080n/amath_radians_impl(PyObject *module, double x)
2081n/a/*[clinic end generated code: output=34daa47caf9b1590 input=91626fc489fe3d63]*/
2082n/a{
2083n/a return PyFloat_FromDouble(x * degToRad);
2084n/a}
2085n/a
2086n/a
2087n/a/*[clinic input]
2088n/amath.isfinite
2089n/a
2090n/a x: double
2091n/a /
2092n/a
2093n/aReturn True if x is neither an infinity nor a NaN, and False otherwise.
2094n/a[clinic start generated code]*/
2095n/a
2096n/astatic PyObject *
2097n/amath_isfinite_impl(PyObject *module, double x)
2098n/a/*[clinic end generated code: output=8ba1f396440c9901 input=46967d254812e54a]*/
2099n/a{
2100n/a return PyBool_FromLong((long)Py_IS_FINITE(x));
2101n/a}
2102n/a
2103n/a
2104n/a/*[clinic input]
2105n/amath.isnan
2106n/a
2107n/a x: double
2108n/a /
2109n/a
2110n/aReturn True if x is a NaN (not a number), and False otherwise.
2111n/a[clinic start generated code]*/
2112n/a
2113n/astatic PyObject *
2114n/amath_isnan_impl(PyObject *module, double x)
2115n/a/*[clinic end generated code: output=f537b4d6df878c3e input=935891e66083f46a]*/
2116n/a{
2117n/a return PyBool_FromLong((long)Py_IS_NAN(x));
2118n/a}
2119n/a
2120n/a
2121n/a/*[clinic input]
2122n/amath.isinf
2123n/a
2124n/a x: double
2125n/a /
2126n/a
2127n/aReturn True if x is a positive or negative infinity, and False otherwise.
2128n/a[clinic start generated code]*/
2129n/a
2130n/astatic PyObject *
2131n/amath_isinf_impl(PyObject *module, double x)
2132n/a/*[clinic end generated code: output=9f00cbec4de7b06b input=32630e4212cf961f]*/
2133n/a{
2134n/a return PyBool_FromLong((long)Py_IS_INFINITY(x));
2135n/a}
2136n/a
2137n/a
2138n/a/*[clinic input]
2139n/amath.isclose -> bool
2140n/a
2141n/a a: double
2142n/a b: double
2143n/a *
2144n/a rel_tol: double = 1e-09
2145n/a maximum difference for being considered "close", relative to the
2146n/a magnitude of the input values
2147n/a abs_tol: double = 0.0
2148n/a maximum difference for being considered "close", regardless of the
2149n/a magnitude of the input values
2150n/a
2151n/aDetermine whether two floating point numbers are close in value.
2152n/a
2153n/aReturn True if a is close in value to b, and False otherwise.
2154n/a
2155n/aFor the values to be considered close, the difference between them
2156n/amust be smaller than at least one of the tolerances.
2157n/a
2158n/a-inf, inf and NaN behave similarly to the IEEE 754 Standard. That
2159n/ais, NaN is not close to anything, even itself. inf and -inf are
2160n/aonly close to themselves.
2161n/a[clinic start generated code]*/
2162n/a
2163n/astatic int
2164n/amath_isclose_impl(PyObject *module, double a, double b, double rel_tol,
2165n/a double abs_tol)
2166n/a/*[clinic end generated code: output=b73070207511952d input=f28671871ea5bfba]*/
2167n/a{
2168n/a double diff = 0.0;
2169n/a
2170n/a /* sanity check on the inputs */
2171n/a if (rel_tol < 0.0 || abs_tol < 0.0 ) {
2172n/a PyErr_SetString(PyExc_ValueError,
2173n/a "tolerances must be non-negative");
2174n/a return -1;
2175n/a }
2176n/a
2177n/a if ( a == b ) {
2178n/a /* short circuit exact equality -- needed to catch two infinities of
2179n/a the same sign. And perhaps speeds things up a bit sometimes.
2180n/a */
2181n/a return 1;
2182n/a }
2183n/a
2184n/a /* This catches the case of two infinities of opposite sign, or
2185n/a one infinity and one finite number. Two infinities of opposite
2186n/a sign would otherwise have an infinite relative tolerance.
2187n/a Two infinities of the same sign are caught by the equality check
2188n/a above.
2189n/a */
2190n/a
2191n/a if (Py_IS_INFINITY(a) || Py_IS_INFINITY(b)) {
2192n/a return 0;
2193n/a }
2194n/a
2195n/a /* now do the regular computation
2196n/a this is essentially the "weak" test from the Boost library
2197n/a */
2198n/a
2199n/a diff = fabs(b - a);
2200n/a
2201n/a return (((diff <= fabs(rel_tol * b)) ||
2202n/a (diff <= fabs(rel_tol * a))) ||
2203n/a (diff <= abs_tol));
2204n/a}
2205n/a
2206n/a
2207n/astatic PyMethodDef math_methods[] = {
2208n/a {"acos", math_acos, METH_O, math_acos_doc},
2209n/a {"acosh", math_acosh, METH_O, math_acosh_doc},
2210n/a {"asin", math_asin, METH_O, math_asin_doc},
2211n/a {"asinh", math_asinh, METH_O, math_asinh_doc},
2212n/a {"atan", math_atan, METH_O, math_atan_doc},
2213n/a {"atan2", math_atan2, METH_VARARGS, math_atan2_doc},
2214n/a {"atanh", math_atanh, METH_O, math_atanh_doc},
2215n/a MATH_CEIL_METHODDEF
2216n/a {"copysign", math_copysign, METH_VARARGS, math_copysign_doc},
2217n/a {"cos", math_cos, METH_O, math_cos_doc},
2218n/a {"cosh", math_cosh, METH_O, math_cosh_doc},
2219n/a MATH_DEGREES_METHODDEF
2220n/a {"erf", math_erf, METH_O, math_erf_doc},
2221n/a {"erfc", math_erfc, METH_O, math_erfc_doc},
2222n/a {"exp", math_exp, METH_O, math_exp_doc},
2223n/a {"expm1", math_expm1, METH_O, math_expm1_doc},
2224n/a {"fabs", math_fabs, METH_O, math_fabs_doc},
2225n/a MATH_FACTORIAL_METHODDEF
2226n/a MATH_FLOOR_METHODDEF
2227n/a MATH_FMOD_METHODDEF
2228n/a MATH_FREXP_METHODDEF
2229n/a MATH_FSUM_METHODDEF
2230n/a {"gamma", math_gamma, METH_O, math_gamma_doc},
2231n/a MATH_GCD_METHODDEF
2232n/a MATH_HYPOT_METHODDEF
2233n/a MATH_ISCLOSE_METHODDEF
2234n/a MATH_ISFINITE_METHODDEF
2235n/a MATH_ISINF_METHODDEF
2236n/a MATH_ISNAN_METHODDEF
2237n/a MATH_LDEXP_METHODDEF
2238n/a {"lgamma", math_lgamma, METH_O, math_lgamma_doc},
2239n/a MATH_LOG_METHODDEF
2240n/a {"log1p", math_log1p, METH_O, math_log1p_doc},
2241n/a MATH_LOG10_METHODDEF
2242n/a MATH_LOG2_METHODDEF
2243n/a MATH_MODF_METHODDEF
2244n/a MATH_POW_METHODDEF
2245n/a MATH_RADIANS_METHODDEF
2246n/a {"sin", math_sin, METH_O, math_sin_doc},
2247n/a {"sinh", math_sinh, METH_O, math_sinh_doc},
2248n/a {"sqrt", math_sqrt, METH_O, math_sqrt_doc},
2249n/a {"tan", math_tan, METH_O, math_tan_doc},
2250n/a {"tanh", math_tanh, METH_O, math_tanh_doc},
2251n/a MATH_TRUNC_METHODDEF
2252n/a {NULL, NULL} /* sentinel */
2253n/a};
2254n/a
2255n/a
2256n/aPyDoc_STRVAR(module_doc,
2257n/a"This module is always available. It provides access to the\n"
2258n/a"mathematical functions defined by the C standard.");
2259n/a
2260n/a
2261n/astatic struct PyModuleDef mathmodule = {
2262n/a PyModuleDef_HEAD_INIT,
2263n/a "math",
2264n/a module_doc,
2265n/a -1,
2266n/a math_methods,
2267n/a NULL,
2268n/a NULL,
2269n/a NULL,
2270n/a NULL
2271n/a};
2272n/a
2273n/aPyMODINIT_FUNC
2274n/aPyInit_math(void)
2275n/a{
2276n/a PyObject *m;
2277n/a
2278n/a m = PyModule_Create(&mathmodule);
2279n/a if (m == NULL)
2280n/a goto finally;
2281n/a
2282n/a PyModule_AddObject(m, "pi", PyFloat_FromDouble(Py_MATH_PI));
2283n/a PyModule_AddObject(m, "e", PyFloat_FromDouble(Py_MATH_E));
2284n/a PyModule_AddObject(m, "tau", PyFloat_FromDouble(Py_MATH_TAU)); /* 2pi */
2285n/a PyModule_AddObject(m, "inf", PyFloat_FromDouble(m_inf()));
2286n/a#if !defined(PY_NO_SHORT_FLOAT_REPR) || defined(Py_NAN)
2287n/a PyModule_AddObject(m, "nan", PyFloat_FromDouble(m_nan()));
2288n/a#endif
2289n/a
2290n/a finally:
2291n/a return m;
2292n/a}