ยปCore Development>Code coverage>Modules/audioop.c

Python code coverage for Modules/audioop.c

#countcontent
1n/a
2n/a/* audioopmodule - Module to detect peak values in arrays */
3n/a
4n/a#define PY_SSIZE_T_CLEAN
5n/a
6n/a#include "Python.h"
7n/a
8n/a#if defined(__CHAR_UNSIGNED__)
9n/a#if defined(signed)
10n/a/* This module currently does not work on systems where only unsigned
11n/a characters are available. Take it out of Setup. Sorry. */
12n/a#endif
13n/a#endif
14n/a
15n/astatic const int maxvals[] = {0, 0x7F, 0x7FFF, 0x7FFFFF, 0x7FFFFFFF};
16n/a/* -1 trick is needed on Windows to support -0x80000000 without a warning */
17n/astatic const int minvals[] = {0, -0x80, -0x8000, -0x800000, -0x7FFFFFFF-1};
18n/astatic const unsigned int masks[] = {0, 0xFF, 0xFFFF, 0xFFFFFF, 0xFFFFFFFF};
19n/a
20n/astatic int
21n/afbound(double val, double minval, double maxval)
22n/a{
23n/a if (val > maxval)
24n/a val = maxval;
25n/a else if (val < minval + 1)
26n/a val = minval;
27n/a return (int)val;
28n/a}
29n/a
30n/a
31n/a/* Code shamelessly stolen from sox, 12.17.7, g711.c
32n/a** (c) Craig Reese, Joe Campbell and Jeff Poskanzer 1989 */
33n/a
34n/a/* From g711.c:
35n/a *
36n/a * December 30, 1994:
37n/a * Functions linear2alaw, linear2ulaw have been updated to correctly
38n/a * convert unquantized 16 bit values.
39n/a * Tables for direct u- to A-law and A- to u-law conversions have been
40n/a * corrected.
41n/a * Borge Lindberg, Center for PersonKommunikation, Aalborg University.
42n/a * bli@cpk.auc.dk
43n/a *
44n/a */
45n/a#define BIAS 0x84 /* define the add-in bias for 16 bit samples */
46n/a#define CLIP 32635
47n/a#define SIGN_BIT (0x80) /* Sign bit for an A-law byte. */
48n/a#define QUANT_MASK (0xf) /* Quantization field mask. */
49n/a#define SEG_SHIFT (4) /* Left shift for segment number. */
50n/a#define SEG_MASK (0x70) /* Segment field mask. */
51n/a
52n/astatic const int16_t seg_aend[8] = {
53n/a 0x1F, 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF
54n/a};
55n/astatic const int16_t seg_uend[8] = {
56n/a 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF
57n/a};
58n/a
59n/astatic int16_t
60n/asearch(int16_t val, const int16_t *table, int size)
61n/a{
62n/a int i;
63n/a
64n/a for (i = 0; i < size; i++) {
65n/a if (val <= *table++)
66n/a return (i);
67n/a }
68n/a return (size);
69n/a}
70n/a#define st_ulaw2linear16(uc) (_st_ulaw2linear16[uc])
71n/a#define st_alaw2linear16(uc) (_st_alaw2linear16[uc])
72n/a
73n/astatic const int16_t _st_ulaw2linear16[256] = {
74n/a -32124, -31100, -30076, -29052, -28028, -27004, -25980,
75n/a -24956, -23932, -22908, -21884, -20860, -19836, -18812,
76n/a -17788, -16764, -15996, -15484, -14972, -14460, -13948,
77n/a -13436, -12924, -12412, -11900, -11388, -10876, -10364,
78n/a -9852, -9340, -8828, -8316, -7932, -7676, -7420,
79n/a -7164, -6908, -6652, -6396, -6140, -5884, -5628,
80n/a -5372, -5116, -4860, -4604, -4348, -4092, -3900,
81n/a -3772, -3644, -3516, -3388, -3260, -3132, -3004,
82n/a -2876, -2748, -2620, -2492, -2364, -2236, -2108,
83n/a -1980, -1884, -1820, -1756, -1692, -1628, -1564,
84n/a -1500, -1436, -1372, -1308, -1244, -1180, -1116,
85n/a -1052, -988, -924, -876, -844, -812, -780,
86n/a -748, -716, -684, -652, -620, -588, -556,
87n/a -524, -492, -460, -428, -396, -372, -356,
88n/a -340, -324, -308, -292, -276, -260, -244,
89n/a -228, -212, -196, -180, -164, -148, -132,
90n/a -120, -112, -104, -96, -88, -80, -72,
91n/a -64, -56, -48, -40, -32, -24, -16,
92n/a -8, 0, 32124, 31100, 30076, 29052, 28028,
93n/a 27004, 25980, 24956, 23932, 22908, 21884, 20860,
94n/a 19836, 18812, 17788, 16764, 15996, 15484, 14972,
95n/a 14460, 13948, 13436, 12924, 12412, 11900, 11388,
96n/a 10876, 10364, 9852, 9340, 8828, 8316, 7932,
97n/a 7676, 7420, 7164, 6908, 6652, 6396, 6140,
98n/a 5884, 5628, 5372, 5116, 4860, 4604, 4348,
99n/a 4092, 3900, 3772, 3644, 3516, 3388, 3260,
100n/a 3132, 3004, 2876, 2748, 2620, 2492, 2364,
101n/a 2236, 2108, 1980, 1884, 1820, 1756, 1692,
102n/a 1628, 1564, 1500, 1436, 1372, 1308, 1244,
103n/a 1180, 1116, 1052, 988, 924, 876, 844,
104n/a 812, 780, 748, 716, 684, 652, 620,
105n/a 588, 556, 524, 492, 460, 428, 396,
106n/a 372, 356, 340, 324, 308, 292, 276,
107n/a 260, 244, 228, 212, 196, 180, 164,
108n/a 148, 132, 120, 112, 104, 96, 88,
109n/a 80, 72, 64, 56, 48, 40, 32,
110n/a 24, 16, 8, 0
111n/a};
112n/a
113n/a/*
114n/a * linear2ulaw() accepts a 14-bit signed integer and encodes it as u-law data
115n/a * stored in an unsigned char. This function should only be called with
116n/a * the data shifted such that it only contains information in the lower
117n/a * 14-bits.
118n/a *
119n/a * In order to simplify the encoding process, the original linear magnitude
120n/a * is biased by adding 33 which shifts the encoding range from (0 - 8158) to
121n/a * (33 - 8191). The result can be seen in the following encoding table:
122n/a *
123n/a * Biased Linear Input Code Compressed Code
124n/a * ------------------------ ---------------
125n/a * 00000001wxyza 000wxyz
126n/a * 0000001wxyzab 001wxyz
127n/a * 000001wxyzabc 010wxyz
128n/a * 00001wxyzabcd 011wxyz
129n/a * 0001wxyzabcde 100wxyz
130n/a * 001wxyzabcdef 101wxyz
131n/a * 01wxyzabcdefg 110wxyz
132n/a * 1wxyzabcdefgh 111wxyz
133n/a *
134n/a * Each biased linear code has a leading 1 which identifies the segment
135n/a * number. The value of the segment number is equal to 7 minus the number
136n/a * of leading 0's. The quantization interval is directly available as the
137n/a * four bits wxyz. * The trailing bits (a - h) are ignored.
138n/a *
139n/a * Ordinarily the complement of the resulting code word is used for
140n/a * transmission, and so the code word is complemented before it is returned.
141n/a *
142n/a * For further information see John C. Bellamy's Digital Telephony, 1982,
143n/a * John Wiley & Sons, pps 98-111 and 472-476.
144n/a */
145n/astatic unsigned char
146n/ast_14linear2ulaw(int16_t pcm_val) /* 2's complement (14-bit range) */
147n/a{
148n/a int16_t mask;
149n/a int16_t seg;
150n/a unsigned char uval;
151n/a
152n/a /* u-law inverts all bits */
153n/a /* Get the sign and the magnitude of the value. */
154n/a if (pcm_val < 0) {
155n/a pcm_val = -pcm_val;
156n/a mask = 0x7F;
157n/a } else {
158n/a mask = 0xFF;
159n/a }
160n/a if ( pcm_val > CLIP ) pcm_val = CLIP; /* clip the magnitude */
161n/a pcm_val += (BIAS >> 2);
162n/a
163n/a /* Convert the scaled magnitude to segment number. */
164n/a seg = search(pcm_val, seg_uend, 8);
165n/a
166n/a /*
167n/a * Combine the sign, segment, quantization bits;
168n/a * and complement the code word.
169n/a */
170n/a if (seg >= 8) /* out of range, return maximum value. */
171n/a return (unsigned char) (0x7F ^ mask);
172n/a else {
173n/a uval = (unsigned char) (seg << 4) | ((pcm_val >> (seg + 1)) & 0xF);
174n/a return (uval ^ mask);
175n/a }
176n/a
177n/a}
178n/a
179n/astatic const int16_t _st_alaw2linear16[256] = {
180n/a -5504, -5248, -6016, -5760, -4480, -4224, -4992,
181n/a -4736, -7552, -7296, -8064, -7808, -6528, -6272,
182n/a -7040, -6784, -2752, -2624, -3008, -2880, -2240,
183n/a -2112, -2496, -2368, -3776, -3648, -4032, -3904,
184n/a -3264, -3136, -3520, -3392, -22016, -20992, -24064,
185n/a -23040, -17920, -16896, -19968, -18944, -30208, -29184,
186n/a -32256, -31232, -26112, -25088, -28160, -27136, -11008,
187n/a -10496, -12032, -11520, -8960, -8448, -9984, -9472,
188n/a -15104, -14592, -16128, -15616, -13056, -12544, -14080,
189n/a -13568, -344, -328, -376, -360, -280, -264,
190n/a -312, -296, -472, -456, -504, -488, -408,
191n/a -392, -440, -424, -88, -72, -120, -104,
192n/a -24, -8, -56, -40, -216, -200, -248,
193n/a -232, -152, -136, -184, -168, -1376, -1312,
194n/a -1504, -1440, -1120, -1056, -1248, -1184, -1888,
195n/a -1824, -2016, -1952, -1632, -1568, -1760, -1696,
196n/a -688, -656, -752, -720, -560, -528, -624,
197n/a -592, -944, -912, -1008, -976, -816, -784,
198n/a -880, -848, 5504, 5248, 6016, 5760, 4480,
199n/a 4224, 4992, 4736, 7552, 7296, 8064, 7808,
200n/a 6528, 6272, 7040, 6784, 2752, 2624, 3008,
201n/a 2880, 2240, 2112, 2496, 2368, 3776, 3648,
202n/a 4032, 3904, 3264, 3136, 3520, 3392, 22016,
203n/a 20992, 24064, 23040, 17920, 16896, 19968, 18944,
204n/a 30208, 29184, 32256, 31232, 26112, 25088, 28160,
205n/a 27136, 11008, 10496, 12032, 11520, 8960, 8448,
206n/a 9984, 9472, 15104, 14592, 16128, 15616, 13056,
207n/a 12544, 14080, 13568, 344, 328, 376, 360,
208n/a 280, 264, 312, 296, 472, 456, 504,
209n/a 488, 408, 392, 440, 424, 88, 72,
210n/a 120, 104, 24, 8, 56, 40, 216,
211n/a 200, 248, 232, 152, 136, 184, 168,
212n/a 1376, 1312, 1504, 1440, 1120, 1056, 1248,
213n/a 1184, 1888, 1824, 2016, 1952, 1632, 1568,
214n/a 1760, 1696, 688, 656, 752, 720, 560,
215n/a 528, 624, 592, 944, 912, 1008, 976,
216n/a 816, 784, 880, 848
217n/a};
218n/a
219n/a/*
220n/a * linear2alaw() accepts a 13-bit signed integer and encodes it as A-law data
221n/a * stored in an unsigned char. This function should only be called with
222n/a * the data shifted such that it only contains information in the lower
223n/a * 13-bits.
224n/a *
225n/a * Linear Input Code Compressed Code
226n/a * ------------------------ ---------------
227n/a * 0000000wxyza 000wxyz
228n/a * 0000001wxyza 001wxyz
229n/a * 000001wxyzab 010wxyz
230n/a * 00001wxyzabc 011wxyz
231n/a * 0001wxyzabcd 100wxyz
232n/a * 001wxyzabcde 101wxyz
233n/a * 01wxyzabcdef 110wxyz
234n/a * 1wxyzabcdefg 111wxyz
235n/a *
236n/a * For further information see John C. Bellamy's Digital Telephony, 1982,
237n/a * John Wiley & Sons, pps 98-111 and 472-476.
238n/a */
239n/astatic unsigned char
240n/ast_linear2alaw(int16_t pcm_val) /* 2's complement (13-bit range) */
241n/a{
242n/a int16_t mask;
243n/a int16_t seg;
244n/a unsigned char aval;
245n/a
246n/a /* A-law using even bit inversion */
247n/a if (pcm_val >= 0) {
248n/a mask = 0xD5; /* sign (7th) bit = 1 */
249n/a } else {
250n/a mask = 0x55; /* sign bit = 0 */
251n/a pcm_val = -pcm_val - 1;
252n/a }
253n/a
254n/a /* Convert the scaled magnitude to segment number. */
255n/a seg = search(pcm_val, seg_aend, 8);
256n/a
257n/a /* Combine the sign, segment, and quantization bits. */
258n/a
259n/a if (seg >= 8) /* out of range, return maximum value. */
260n/a return (unsigned char) (0x7F ^ mask);
261n/a else {
262n/a aval = (unsigned char) seg << SEG_SHIFT;
263n/a if (seg < 2)
264n/a aval |= (pcm_val >> 1) & QUANT_MASK;
265n/a else
266n/a aval |= (pcm_val >> seg) & QUANT_MASK;
267n/a return (aval ^ mask);
268n/a }
269n/a}
270n/a/* End of code taken from sox */
271n/a
272n/a/* Intel ADPCM step variation table */
273n/astatic const int indexTable[16] = {
274n/a -1, -1, -1, -1, 2, 4, 6, 8,
275n/a -1, -1, -1, -1, 2, 4, 6, 8,
276n/a};
277n/a
278n/astatic const int stepsizeTable[89] = {
279n/a 7, 8, 9, 10, 11, 12, 13, 14, 16, 17,
280n/a 19, 21, 23, 25, 28, 31, 34, 37, 41, 45,
281n/a 50, 55, 60, 66, 73, 80, 88, 97, 107, 118,
282n/a 130, 143, 157, 173, 190, 209, 230, 253, 279, 307,
283n/a 337, 371, 408, 449, 494, 544, 598, 658, 724, 796,
284n/a 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066,
285n/a 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358,
286n/a 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899,
287n/a 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767
288n/a};
289n/a
290n/a#define GETINTX(T, cp, i) (*(T *)((unsigned char *)(cp) + (i)))
291n/a#define SETINTX(T, cp, i, val) do { \
292n/a *(T *)((unsigned char *)(cp) + (i)) = (T)(val); \
293n/a } while (0)
294n/a
295n/a
296n/a#define GETINT8(cp, i) GETINTX(signed char, (cp), (i))
297n/a#define GETINT16(cp, i) GETINTX(int16_t, (cp), (i))
298n/a#define GETINT32(cp, i) GETINTX(int32_t, (cp), (i))
299n/a
300n/a#if WORDS_BIGENDIAN
301n/a#define GETINT24(cp, i) ( \
302n/a ((unsigned char *)(cp) + (i))[2] + \
303n/a (((unsigned char *)(cp) + (i))[1] << 8) + \
304n/a (((signed char *)(cp) + (i))[0] << 16) )
305n/a#else
306n/a#define GETINT24(cp, i) ( \
307n/a ((unsigned char *)(cp) + (i))[0] + \
308n/a (((unsigned char *)(cp) + (i))[1] << 8) + \
309n/a (((signed char *)(cp) + (i))[2] << 16) )
310n/a#endif
311n/a
312n/a
313n/a#define SETINT8(cp, i, val) SETINTX(signed char, (cp), (i), (val))
314n/a#define SETINT16(cp, i, val) SETINTX(int16_t, (cp), (i), (val))
315n/a#define SETINT32(cp, i, val) SETINTX(int32_t, (cp), (i), (val))
316n/a
317n/a#if WORDS_BIGENDIAN
318n/a#define SETINT24(cp, i, val) do { \
319n/a ((unsigned char *)(cp) + (i))[2] = (int)(val); \
320n/a ((unsigned char *)(cp) + (i))[1] = (int)(val) >> 8; \
321n/a ((signed char *)(cp) + (i))[0] = (int)(val) >> 16; \
322n/a } while (0)
323n/a#else
324n/a#define SETINT24(cp, i, val) do { \
325n/a ((unsigned char *)(cp) + (i))[0] = (int)(val); \
326n/a ((unsigned char *)(cp) + (i))[1] = (int)(val) >> 8; \
327n/a ((signed char *)(cp) + (i))[2] = (int)(val) >> 16; \
328n/a } while (0)
329n/a#endif
330n/a
331n/a
332n/a#define GETRAWSAMPLE(size, cp, i) ( \
333n/a (size == 1) ? (int)GETINT8((cp), (i)) : \
334n/a (size == 2) ? (int)GETINT16((cp), (i)) : \
335n/a (size == 3) ? (int)GETINT24((cp), (i)) : \
336n/a (int)GETINT32((cp), (i)))
337n/a
338n/a#define SETRAWSAMPLE(size, cp, i, val) do { \
339n/a if (size == 1) \
340n/a SETINT8((cp), (i), (val)); \
341n/a else if (size == 2) \
342n/a SETINT16((cp), (i), (val)); \
343n/a else if (size == 3) \
344n/a SETINT24((cp), (i), (val)); \
345n/a else \
346n/a SETINT32((cp), (i), (val)); \
347n/a } while(0)
348n/a
349n/a
350n/a#define GETSAMPLE32(size, cp, i) ( \
351n/a (size == 1) ? (int)GETINT8((cp), (i)) << 24 : \
352n/a (size == 2) ? (int)GETINT16((cp), (i)) << 16 : \
353n/a (size == 3) ? (int)GETINT24((cp), (i)) << 8 : \
354n/a (int)GETINT32((cp), (i)))
355n/a
356n/a#define SETSAMPLE32(size, cp, i, val) do { \
357n/a if (size == 1) \
358n/a SETINT8((cp), (i), (val) >> 24); \
359n/a else if (size == 2) \
360n/a SETINT16((cp), (i), (val) >> 16); \
361n/a else if (size == 3) \
362n/a SETINT24((cp), (i), (val) >> 8); \
363n/a else \
364n/a SETINT32((cp), (i), (val)); \
365n/a } while(0)
366n/a
367n/a
368n/astatic PyObject *AudioopError;
369n/a
370n/astatic int
371n/aaudioop_check_size(int size)
372n/a{
373n/a if (size < 1 || size > 4) {
374n/a PyErr_SetString(AudioopError, "Size should be 1, 2, 3 or 4");
375n/a return 0;
376n/a }
377n/a else
378n/a return 1;
379n/a}
380n/a
381n/astatic int
382n/aaudioop_check_parameters(Py_ssize_t len, int size)
383n/a{
384n/a if (!audioop_check_size(size))
385n/a return 0;
386n/a if (len % size != 0) {
387n/a PyErr_SetString(AudioopError, "not a whole number of frames");
388n/a return 0;
389n/a }
390n/a return 1;
391n/a}
392n/a
393n/a/*[clinic input]
394n/amodule audioop
395n/a[clinic start generated code]*/
396n/a/*[clinic end generated code: output=da39a3ee5e6b4b0d input=8fa8f6611be3591a]*/
397n/a
398n/a/*[clinic input]
399n/aaudioop.getsample
400n/a
401n/a fragment: Py_buffer
402n/a width: int
403n/a index: Py_ssize_t
404n/a /
405n/a
406n/aReturn the value of sample index from the fragment.
407n/a[clinic start generated code]*/
408n/a
409n/astatic PyObject *
410n/aaudioop_getsample_impl(PyObject *module, Py_buffer *fragment, int width,
411n/a Py_ssize_t index)
412n/a/*[clinic end generated code: output=8fe1b1775134f39a input=88edbe2871393549]*/
413n/a{
414n/a int val;
415n/a
416n/a if (!audioop_check_parameters(fragment->len, width))
417n/a return NULL;
418n/a if (index < 0 || index >= fragment->len/width) {
419n/a PyErr_SetString(AudioopError, "Index out of range");
420n/a return NULL;
421n/a }
422n/a val = GETRAWSAMPLE(width, fragment->buf, index*width);
423n/a return PyLong_FromLong(val);
424n/a}
425n/a
426n/a/*[clinic input]
427n/aaudioop.max
428n/a
429n/a fragment: Py_buffer
430n/a width: int
431n/a /
432n/a
433n/aReturn the maximum of the absolute value of all samples in a fragment.
434n/a[clinic start generated code]*/
435n/a
436n/astatic PyObject *
437n/aaudioop_max_impl(PyObject *module, Py_buffer *fragment, int width)
438n/a/*[clinic end generated code: output=e6c5952714f1c3f0 input=32bea5ea0ac8c223]*/
439n/a{
440n/a Py_ssize_t i;
441n/a unsigned int absval, max = 0;
442n/a
443n/a if (!audioop_check_parameters(fragment->len, width))
444n/a return NULL;
445n/a for (i = 0; i < fragment->len; i += width) {
446n/a int val = GETRAWSAMPLE(width, fragment->buf, i);
447n/a /* Cast to unsigned before negating. Unsigned overflow is well-
448n/a defined, but signed overflow is not. */
449n/a if (val < 0) absval = (unsigned int)-(int64_t)val;
450n/a else absval = val;
451n/a if (absval > max) max = absval;
452n/a }
453n/a return PyLong_FromUnsignedLong(max);
454n/a}
455n/a
456n/a/*[clinic input]
457n/aaudioop.minmax
458n/a
459n/a fragment: Py_buffer
460n/a width: int
461n/a /
462n/a
463n/aReturn the minimum and maximum values of all samples in the sound fragment.
464n/a[clinic start generated code]*/
465n/a
466n/astatic PyObject *
467n/aaudioop_minmax_impl(PyObject *module, Py_buffer *fragment, int width)
468n/a/*[clinic end generated code: output=473fda66b15c836e input=89848e9b927a0696]*/
469n/a{
470n/a Py_ssize_t i;
471n/a /* -1 trick below is needed on Windows to support -0x80000000 without
472n/a a warning */
473n/a int min = 0x7fffffff, max = -0x7FFFFFFF-1;
474n/a
475n/a if (!audioop_check_parameters(fragment->len, width))
476n/a return NULL;
477n/a for (i = 0; i < fragment->len; i += width) {
478n/a int val = GETRAWSAMPLE(width, fragment->buf, i);
479n/a if (val > max) max = val;
480n/a if (val < min) min = val;
481n/a }
482n/a return Py_BuildValue("(ii)", min, max);
483n/a}
484n/a
485n/a/*[clinic input]
486n/aaudioop.avg
487n/a
488n/a fragment: Py_buffer
489n/a width: int
490n/a /
491n/a
492n/aReturn the average over all samples in the fragment.
493n/a[clinic start generated code]*/
494n/a
495n/astatic PyObject *
496n/aaudioop_avg_impl(PyObject *module, Py_buffer *fragment, int width)
497n/a/*[clinic end generated code: output=4410a4c12c3586e6 input=1114493c7611334d]*/
498n/a{
499n/a Py_ssize_t i;
500n/a int avg;
501n/a double sum = 0.0;
502n/a
503n/a if (!audioop_check_parameters(fragment->len, width))
504n/a return NULL;
505n/a for (i = 0; i < fragment->len; i += width)
506n/a sum += GETRAWSAMPLE(width, fragment->buf, i);
507n/a if (fragment->len == 0)
508n/a avg = 0;
509n/a else
510n/a avg = (int)floor(sum / (double)(fragment->len/width));
511n/a return PyLong_FromLong(avg);
512n/a}
513n/a
514n/a/*[clinic input]
515n/aaudioop.rms
516n/a
517n/a fragment: Py_buffer
518n/a width: int
519n/a /
520n/a
521n/aReturn the root-mean-square of the fragment, i.e. sqrt(sum(S_i^2)/n).
522n/a[clinic start generated code]*/
523n/a
524n/astatic PyObject *
525n/aaudioop_rms_impl(PyObject *module, Py_buffer *fragment, int width)
526n/a/*[clinic end generated code: output=1e7871c826445698 input=4cc57c6c94219d78]*/
527n/a{
528n/a Py_ssize_t i;
529n/a unsigned int res;
530n/a double sum_squares = 0.0;
531n/a
532n/a if (!audioop_check_parameters(fragment->len, width))
533n/a return NULL;
534n/a for (i = 0; i < fragment->len; i += width) {
535n/a double val = GETRAWSAMPLE(width, fragment->buf, i);
536n/a sum_squares += val*val;
537n/a }
538n/a if (fragment->len == 0)
539n/a res = 0;
540n/a else
541n/a res = (unsigned int)sqrt(sum_squares / (double)(fragment->len/width));
542n/a return PyLong_FromUnsignedLong(res);
543n/a}
544n/a
545n/astatic double _sum2(const int16_t *a, const int16_t *b, Py_ssize_t len)
546n/a{
547n/a Py_ssize_t i;
548n/a double sum = 0.0;
549n/a
550n/a for( i=0; i<len; i++) {
551n/a sum = sum + (double)a[i]*(double)b[i];
552n/a }
553n/a return sum;
554n/a}
555n/a
556n/a/*
557n/a** Findfit tries to locate a sample within another sample. Its main use
558n/a** is in echo-cancellation (to find the feedback of the output signal in
559n/a** the input signal).
560n/a** The method used is as follows:
561n/a**
562n/a** let R be the reference signal (length n) and A the input signal (length N)
563n/a** with N > n, and let all sums be over i from 0 to n-1.
564n/a**
565n/a** Now, for each j in {0..N-n} we compute a factor fj so that -fj*R matches A
566n/a** as good as possible, i.e. sum( (A[j+i]+fj*R[i])^2 ) is minimal. This
567n/a** equation gives fj = sum( A[j+i]R[i] ) / sum(R[i]^2).
568n/a**
569n/a** Next, we compute the relative distance between the original signal and
570n/a** the modified signal and minimize that over j:
571n/a** vj = sum( (A[j+i]-fj*R[i])^2 ) / sum( A[j+i]^2 ) =>
572n/a** vj = ( sum(A[j+i]^2)*sum(R[i]^2) - sum(A[j+i]R[i])^2 ) / sum( A[j+i]^2 )
573n/a**
574n/a** In the code variables correspond as follows:
575n/a** cp1 A
576n/a** cp2 R
577n/a** len1 N
578n/a** len2 n
579n/a** aj_m1 A[j-1]
580n/a** aj_lm1 A[j+n-1]
581n/a** sum_ri_2 sum(R[i]^2)
582n/a** sum_aij_2 sum(A[i+j]^2)
583n/a** sum_aij_ri sum(A[i+j]R[i])
584n/a**
585n/a** sum_ri is calculated once, sum_aij_2 is updated each step and sum_aij_ri
586n/a** is completely recalculated each step.
587n/a*/
588n/a/*[clinic input]
589n/aaudioop.findfit
590n/a
591n/a fragment: Py_buffer
592n/a reference: Py_buffer
593n/a /
594n/a
595n/aTry to match reference as well as possible to a portion of fragment.
596n/a[clinic start generated code]*/
597n/a
598n/astatic PyObject *
599n/aaudioop_findfit_impl(PyObject *module, Py_buffer *fragment,
600n/a Py_buffer *reference)
601n/a/*[clinic end generated code: output=5752306d83cbbada input=62c305605e183c9a]*/
602n/a{
603n/a const int16_t *cp1, *cp2;
604n/a Py_ssize_t len1, len2;
605n/a Py_ssize_t j, best_j;
606n/a double aj_m1, aj_lm1;
607n/a double sum_ri_2, sum_aij_2, sum_aij_ri, result, best_result, factor;
608n/a
609n/a if (fragment->len & 1 || reference->len & 1) {
610n/a PyErr_SetString(AudioopError, "Strings should be even-sized");
611n/a return NULL;
612n/a }
613n/a cp1 = (const int16_t *)fragment->buf;
614n/a len1 = fragment->len >> 1;
615n/a cp2 = (const int16_t *)reference->buf;
616n/a len2 = reference->len >> 1;
617n/a
618n/a if (len1 < len2) {
619n/a PyErr_SetString(AudioopError, "First sample should be longer");
620n/a return NULL;
621n/a }
622n/a sum_ri_2 = _sum2(cp2, cp2, len2);
623n/a sum_aij_2 = _sum2(cp1, cp1, len2);
624n/a sum_aij_ri = _sum2(cp1, cp2, len2);
625n/a
626n/a result = (sum_ri_2*sum_aij_2 - sum_aij_ri*sum_aij_ri) / sum_aij_2;
627n/a
628n/a best_result = result;
629n/a best_j = 0;
630n/a
631n/a for ( j=1; j<=len1-len2; j++) {
632n/a aj_m1 = (double)cp1[j-1];
633n/a aj_lm1 = (double)cp1[j+len2-1];
634n/a
635n/a sum_aij_2 = sum_aij_2 + aj_lm1*aj_lm1 - aj_m1*aj_m1;
636n/a sum_aij_ri = _sum2(cp1+j, cp2, len2);
637n/a
638n/a result = (sum_ri_2*sum_aij_2 - sum_aij_ri*sum_aij_ri)
639n/a / sum_aij_2;
640n/a
641n/a if ( result < best_result ) {
642n/a best_result = result;
643n/a best_j = j;
644n/a }
645n/a
646n/a }
647n/a
648n/a factor = _sum2(cp1+best_j, cp2, len2) / sum_ri_2;
649n/a
650n/a return Py_BuildValue("(nf)", best_j, factor);
651n/a}
652n/a
653n/a/*
654n/a** findfactor finds a factor f so that the energy in A-fB is minimal.
655n/a** See the comment for findfit for details.
656n/a*/
657n/a/*[clinic input]
658n/aaudioop.findfactor
659n/a
660n/a fragment: Py_buffer
661n/a reference: Py_buffer
662n/a /
663n/a
664n/aReturn a factor F such that rms(add(fragment, mul(reference, -F))) is minimal.
665n/a[clinic start generated code]*/
666n/a
667n/astatic PyObject *
668n/aaudioop_findfactor_impl(PyObject *module, Py_buffer *fragment,
669n/a Py_buffer *reference)
670n/a/*[clinic end generated code: output=14ea95652c1afcf8 input=816680301d012b21]*/
671n/a{
672n/a const int16_t *cp1, *cp2;
673n/a Py_ssize_t len;
674n/a double sum_ri_2, sum_aij_ri, result;
675n/a
676n/a if (fragment->len & 1 || reference->len & 1) {
677n/a PyErr_SetString(AudioopError, "Strings should be even-sized");
678n/a return NULL;
679n/a }
680n/a if (fragment->len != reference->len) {
681n/a PyErr_SetString(AudioopError, "Samples should be same size");
682n/a return NULL;
683n/a }
684n/a cp1 = (const int16_t *)fragment->buf;
685n/a cp2 = (const int16_t *)reference->buf;
686n/a len = fragment->len >> 1;
687n/a sum_ri_2 = _sum2(cp2, cp2, len);
688n/a sum_aij_ri = _sum2(cp1, cp2, len);
689n/a
690n/a result = sum_aij_ri / sum_ri_2;
691n/a
692n/a return PyFloat_FromDouble(result);
693n/a}
694n/a
695n/a/*
696n/a** findmax returns the index of the n-sized segment of the input sample
697n/a** that contains the most energy.
698n/a*/
699n/a/*[clinic input]
700n/aaudioop.findmax
701n/a
702n/a fragment: Py_buffer
703n/a length: Py_ssize_t
704n/a /
705n/a
706n/aSearch fragment for a slice of specified number of samples with maximum energy.
707n/a[clinic start generated code]*/
708n/a
709n/astatic PyObject *
710n/aaudioop_findmax_impl(PyObject *module, Py_buffer *fragment,
711n/a Py_ssize_t length)
712n/a/*[clinic end generated code: output=f008128233523040 input=2f304801ed42383c]*/
713n/a{
714n/a const int16_t *cp1;
715n/a Py_ssize_t len1;
716n/a Py_ssize_t j, best_j;
717n/a double aj_m1, aj_lm1;
718n/a double result, best_result;
719n/a
720n/a if (fragment->len & 1) {
721n/a PyErr_SetString(AudioopError, "Strings should be even-sized");
722n/a return NULL;
723n/a }
724n/a cp1 = (const int16_t *)fragment->buf;
725n/a len1 = fragment->len >> 1;
726n/a
727n/a if (length < 0 || len1 < length) {
728n/a PyErr_SetString(AudioopError, "Input sample should be longer");
729n/a return NULL;
730n/a }
731n/a
732n/a result = _sum2(cp1, cp1, length);
733n/a
734n/a best_result = result;
735n/a best_j = 0;
736n/a
737n/a for ( j=1; j<=len1-length; j++) {
738n/a aj_m1 = (double)cp1[j-1];
739n/a aj_lm1 = (double)cp1[j+length-1];
740n/a
741n/a result = result + aj_lm1*aj_lm1 - aj_m1*aj_m1;
742n/a
743n/a if ( result > best_result ) {
744n/a best_result = result;
745n/a best_j = j;
746n/a }
747n/a
748n/a }
749n/a
750n/a return PyLong_FromSsize_t(best_j);
751n/a}
752n/a
753n/a/*[clinic input]
754n/aaudioop.avgpp
755n/a
756n/a fragment: Py_buffer
757n/a width: int
758n/a /
759n/a
760n/aReturn the average peak-peak value over all samples in the fragment.
761n/a[clinic start generated code]*/
762n/a
763n/astatic PyObject *
764n/aaudioop_avgpp_impl(PyObject *module, Py_buffer *fragment, int width)
765n/a/*[clinic end generated code: output=269596b0d5ae0b2b input=0b3cceeae420a7d9]*/
766n/a{
767n/a Py_ssize_t i;
768n/a int prevval, prevextremevalid = 0, prevextreme = 0;
769n/a double sum = 0.0;
770n/a unsigned int avg;
771n/a int diff, prevdiff, nextreme = 0;
772n/a
773n/a if (!audioop_check_parameters(fragment->len, width))
774n/a return NULL;
775n/a if (fragment->len <= width)
776n/a return PyLong_FromLong(0);
777n/a prevval = GETRAWSAMPLE(width, fragment->buf, 0);
778n/a prevdiff = 17; /* Anything != 0, 1 */
779n/a for (i = width; i < fragment->len; i += width) {
780n/a int val = GETRAWSAMPLE(width, fragment->buf, i);
781n/a if (val != prevval) {
782n/a diff = val < prevval;
783n/a if (prevdiff == !diff) {
784n/a /* Derivative changed sign. Compute difference to last
785n/a ** extreme value and remember.
786n/a */
787n/a if (prevextremevalid) {
788n/a if (prevval < prevextreme)
789n/a sum += (double)((unsigned int)prevextreme -
790n/a (unsigned int)prevval);
791n/a else
792n/a sum += (double)((unsigned int)prevval -
793n/a (unsigned int)prevextreme);
794n/a nextreme++;
795n/a }
796n/a prevextremevalid = 1;
797n/a prevextreme = prevval;
798n/a }
799n/a prevval = val;
800n/a prevdiff = diff;
801n/a }
802n/a }
803n/a if ( nextreme == 0 )
804n/a avg = 0;
805n/a else
806n/a avg = (unsigned int)(sum / (double)nextreme);
807n/a return PyLong_FromUnsignedLong(avg);
808n/a}
809n/a
810n/a/*[clinic input]
811n/aaudioop.maxpp
812n/a
813n/a fragment: Py_buffer
814n/a width: int
815n/a /
816n/a
817n/aReturn the maximum peak-peak value in the sound fragment.
818n/a[clinic start generated code]*/
819n/a
820n/astatic PyObject *
821n/aaudioop_maxpp_impl(PyObject *module, Py_buffer *fragment, int width)
822n/a/*[clinic end generated code: output=5b918ed5dbbdb978 input=671a13e1518f80a1]*/
823n/a{
824n/a Py_ssize_t i;
825n/a int prevval, prevextremevalid = 0, prevextreme = 0;
826n/a unsigned int max = 0, extremediff;
827n/a int diff, prevdiff;
828n/a
829n/a if (!audioop_check_parameters(fragment->len, width))
830n/a return NULL;
831n/a if (fragment->len <= width)
832n/a return PyLong_FromLong(0);
833n/a prevval = GETRAWSAMPLE(width, fragment->buf, 0);
834n/a prevdiff = 17; /* Anything != 0, 1 */
835n/a for (i = width; i < fragment->len; i += width) {
836n/a int val = GETRAWSAMPLE(width, fragment->buf, i);
837n/a if (val != prevval) {
838n/a diff = val < prevval;
839n/a if (prevdiff == !diff) {
840n/a /* Derivative changed sign. Compute difference to
841n/a ** last extreme value and remember.
842n/a */
843n/a if (prevextremevalid) {
844n/a if (prevval < prevextreme)
845n/a extremediff = (unsigned int)prevextreme -
846n/a (unsigned int)prevval;
847n/a else
848n/a extremediff = (unsigned int)prevval -
849n/a (unsigned int)prevextreme;
850n/a if ( extremediff > max )
851n/a max = extremediff;
852n/a }
853n/a prevextremevalid = 1;
854n/a prevextreme = prevval;
855n/a }
856n/a prevval = val;
857n/a prevdiff = diff;
858n/a }
859n/a }
860n/a return PyLong_FromUnsignedLong(max);
861n/a}
862n/a
863n/a/*[clinic input]
864n/aaudioop.cross
865n/a
866n/a fragment: Py_buffer
867n/a width: int
868n/a /
869n/a
870n/aReturn the number of zero crossings in the fragment passed as an argument.
871n/a[clinic start generated code]*/
872n/a
873n/astatic PyObject *
874n/aaudioop_cross_impl(PyObject *module, Py_buffer *fragment, int width)
875n/a/*[clinic end generated code: output=5938dcdd74a1f431 input=b1b3f15b83f6b41a]*/
876n/a{
877n/a Py_ssize_t i;
878n/a int prevval;
879n/a Py_ssize_t ncross;
880n/a
881n/a if (!audioop_check_parameters(fragment->len, width))
882n/a return NULL;
883n/a ncross = -1;
884n/a prevval = 17; /* Anything <> 0,1 */
885n/a for (i = 0; i < fragment->len; i += width) {
886n/a int val = GETRAWSAMPLE(width, fragment->buf, i) < 0;
887n/a if (val != prevval) ncross++;
888n/a prevval = val;
889n/a }
890n/a return PyLong_FromSsize_t(ncross);
891n/a}
892n/a
893n/a/*[clinic input]
894n/aaudioop.mul
895n/a
896n/a fragment: Py_buffer
897n/a width: int
898n/a factor: double
899n/a /
900n/a
901n/aReturn a fragment that has all samples in the original fragment multiplied by the floating-point value factor.
902n/a[clinic start generated code]*/
903n/a
904n/astatic PyObject *
905n/aaudioop_mul_impl(PyObject *module, Py_buffer *fragment, int width,
906n/a double factor)
907n/a/*[clinic end generated code: output=6cd48fe796da0ea4 input=c726667baa157d3c]*/
908n/a{
909n/a signed char *ncp;
910n/a Py_ssize_t i;
911n/a double maxval, minval;
912n/a PyObject *rv;
913n/a
914n/a if (!audioop_check_parameters(fragment->len, width))
915n/a return NULL;
916n/a
917n/a maxval = (double) maxvals[width];
918n/a minval = (double) minvals[width];
919n/a
920n/a rv = PyBytes_FromStringAndSize(NULL, fragment->len);
921n/a if (rv == NULL)
922n/a return NULL;
923n/a ncp = (signed char *)PyBytes_AsString(rv);
924n/a
925n/a for (i = 0; i < fragment->len; i += width) {
926n/a double val = GETRAWSAMPLE(width, fragment->buf, i);
927n/a val *= factor;
928n/a val = floor(fbound(val, minval, maxval));
929n/a SETRAWSAMPLE(width, ncp, i, (int)val);
930n/a }
931n/a return rv;
932n/a}
933n/a
934n/a/*[clinic input]
935n/aaudioop.tomono
936n/a
937n/a fragment: Py_buffer
938n/a width: int
939n/a lfactor: double
940n/a rfactor: double
941n/a /
942n/a
943n/aConvert a stereo fragment to a mono fragment.
944n/a[clinic start generated code]*/
945n/a
946n/astatic PyObject *
947n/aaudioop_tomono_impl(PyObject *module, Py_buffer *fragment, int width,
948n/a double lfactor, double rfactor)
949n/a/*[clinic end generated code: output=235c8277216d4e4e input=c4ec949b3f4dddfa]*/
950n/a{
951n/a signed char *cp, *ncp;
952n/a Py_ssize_t len, i;
953n/a double maxval, minval;
954n/a PyObject *rv;
955n/a
956n/a cp = fragment->buf;
957n/a len = fragment->len;
958n/a if (!audioop_check_parameters(len, width))
959n/a return NULL;
960n/a if (((len / width) & 1) != 0) {
961n/a PyErr_SetString(AudioopError, "not a whole number of frames");
962n/a return NULL;
963n/a }
964n/a
965n/a maxval = (double) maxvals[width];
966n/a minval = (double) minvals[width];
967n/a
968n/a rv = PyBytes_FromStringAndSize(NULL, len/2);
969n/a if (rv == NULL)
970n/a return NULL;
971n/a ncp = (signed char *)PyBytes_AsString(rv);
972n/a
973n/a for (i = 0; i < len; i += width*2) {
974n/a double val1 = GETRAWSAMPLE(width, cp, i);
975n/a double val2 = GETRAWSAMPLE(width, cp, i + width);
976n/a double val = val1*lfactor + val2*rfactor;
977n/a val = floor(fbound(val, minval, maxval));
978n/a SETRAWSAMPLE(width, ncp, i/2, val);
979n/a }
980n/a return rv;
981n/a}
982n/a
983n/a/*[clinic input]
984n/aaudioop.tostereo
985n/a
986n/a fragment: Py_buffer
987n/a width: int
988n/a lfactor: double
989n/a rfactor: double
990n/a /
991n/a
992n/aGenerate a stereo fragment from a mono fragment.
993n/a[clinic start generated code]*/
994n/a
995n/astatic PyObject *
996n/aaudioop_tostereo_impl(PyObject *module, Py_buffer *fragment, int width,
997n/a double lfactor, double rfactor)
998n/a/*[clinic end generated code: output=046f13defa5f1595 input=27b6395ebfdff37a]*/
999n/a{
1000n/a signed char *ncp;
1001n/a Py_ssize_t i;
1002n/a double maxval, minval;
1003n/a PyObject *rv;
1004n/a
1005n/a if (!audioop_check_parameters(fragment->len, width))
1006n/a return NULL;
1007n/a
1008n/a maxval = (double) maxvals[width];
1009n/a minval = (double) minvals[width];
1010n/a
1011n/a if (fragment->len > PY_SSIZE_T_MAX/2) {
1012n/a PyErr_SetString(PyExc_MemoryError,
1013n/a "not enough memory for output buffer");
1014n/a return NULL;
1015n/a }
1016n/a
1017n/a rv = PyBytes_FromStringAndSize(NULL, fragment->len*2);
1018n/a if (rv == NULL)
1019n/a return NULL;
1020n/a ncp = (signed char *)PyBytes_AsString(rv);
1021n/a
1022n/a for (i = 0; i < fragment->len; i += width) {
1023n/a double val = GETRAWSAMPLE(width, fragment->buf, i);
1024n/a int val1 = (int)floor(fbound(val*lfactor, minval, maxval));
1025n/a int val2 = (int)floor(fbound(val*rfactor, minval, maxval));
1026n/a SETRAWSAMPLE(width, ncp, i*2, val1);
1027n/a SETRAWSAMPLE(width, ncp, i*2 + width, val2);
1028n/a }
1029n/a return rv;
1030n/a}
1031n/a
1032n/a/*[clinic input]
1033n/aaudioop.add
1034n/a
1035n/a fragment1: Py_buffer
1036n/a fragment2: Py_buffer
1037n/a width: int
1038n/a /
1039n/a
1040n/aReturn a fragment which is the addition of the two samples passed as parameters.
1041n/a[clinic start generated code]*/
1042n/a
1043n/astatic PyObject *
1044n/aaudioop_add_impl(PyObject *module, Py_buffer *fragment1,
1045n/a Py_buffer *fragment2, int width)
1046n/a/*[clinic end generated code: output=60140af4d1aab6f2 input=4a8d4bae4c1605c7]*/
1047n/a{
1048n/a signed char *ncp;
1049n/a Py_ssize_t i;
1050n/a int minval, maxval, newval;
1051n/a PyObject *rv;
1052n/a
1053n/a if (!audioop_check_parameters(fragment1->len, width))
1054n/a return NULL;
1055n/a if (fragment1->len != fragment2->len) {
1056n/a PyErr_SetString(AudioopError, "Lengths should be the same");
1057n/a return NULL;
1058n/a }
1059n/a
1060n/a maxval = maxvals[width];
1061n/a minval = minvals[width];
1062n/a
1063n/a rv = PyBytes_FromStringAndSize(NULL, fragment1->len);
1064n/a if (rv == NULL)
1065n/a return NULL;
1066n/a ncp = (signed char *)PyBytes_AsString(rv);
1067n/a
1068n/a for (i = 0; i < fragment1->len; i += width) {
1069n/a int val1 = GETRAWSAMPLE(width, fragment1->buf, i);
1070n/a int val2 = GETRAWSAMPLE(width, fragment2->buf, i);
1071n/a
1072n/a if (width < 4) {
1073n/a newval = val1 + val2;
1074n/a /* truncate in case of overflow */
1075n/a if (newval > maxval)
1076n/a newval = maxval;
1077n/a else if (newval < minval)
1078n/a newval = minval;
1079n/a }
1080n/a else {
1081n/a double fval = (double)val1 + (double)val2;
1082n/a /* truncate in case of overflow */
1083n/a newval = (int)floor(fbound(fval, minval, maxval));
1084n/a }
1085n/a
1086n/a SETRAWSAMPLE(width, ncp, i, newval);
1087n/a }
1088n/a return rv;
1089n/a}
1090n/a
1091n/a/*[clinic input]
1092n/aaudioop.bias
1093n/a
1094n/a fragment: Py_buffer
1095n/a width: int
1096n/a bias: int
1097n/a /
1098n/a
1099n/aReturn a fragment that is the original fragment with a bias added to each sample.
1100n/a[clinic start generated code]*/
1101n/a
1102n/astatic PyObject *
1103n/aaudioop_bias_impl(PyObject *module, Py_buffer *fragment, int width, int bias)
1104n/a/*[clinic end generated code: output=6e0aa8f68f045093 input=2b5cce5c3bb4838c]*/
1105n/a{
1106n/a signed char *ncp;
1107n/a Py_ssize_t i;
1108n/a unsigned int val = 0, mask;
1109n/a PyObject *rv;
1110n/a
1111n/a if (!audioop_check_parameters(fragment->len, width))
1112n/a return NULL;
1113n/a
1114n/a rv = PyBytes_FromStringAndSize(NULL, fragment->len);
1115n/a if (rv == NULL)
1116n/a return NULL;
1117n/a ncp = (signed char *)PyBytes_AsString(rv);
1118n/a
1119n/a mask = masks[width];
1120n/a
1121n/a for (i = 0; i < fragment->len; i += width) {
1122n/a if (width == 1)
1123n/a val = GETINTX(unsigned char, fragment->buf, i);
1124n/a else if (width == 2)
1125n/a val = GETINTX(uint16_t, fragment->buf, i);
1126n/a else if (width == 3)
1127n/a val = ((unsigned int)GETINT24(fragment->buf, i)) & 0xffffffu;
1128n/a else {
1129n/a assert(width == 4);
1130n/a val = GETINTX(uint32_t, fragment->buf, i);
1131n/a }
1132n/a
1133n/a val += (unsigned int)bias;
1134n/a /* wrap around in case of overflow */
1135n/a val &= mask;
1136n/a
1137n/a if (width == 1)
1138n/a SETINTX(unsigned char, ncp, i, val);
1139n/a else if (width == 2)
1140n/a SETINTX(uint16_t, ncp, i, val);
1141n/a else if (width == 3)
1142n/a SETINT24(ncp, i, (int)val);
1143n/a else {
1144n/a assert(width == 4);
1145n/a SETINTX(uint32_t, ncp, i, val);
1146n/a }
1147n/a }
1148n/a return rv;
1149n/a}
1150n/a
1151n/a/*[clinic input]
1152n/aaudioop.reverse
1153n/a
1154n/a fragment: Py_buffer
1155n/a width: int
1156n/a /
1157n/a
1158n/aReverse the samples in a fragment and returns the modified fragment.
1159n/a[clinic start generated code]*/
1160n/a
1161n/astatic PyObject *
1162n/aaudioop_reverse_impl(PyObject *module, Py_buffer *fragment, int width)
1163n/a/*[clinic end generated code: output=b44135698418da14 input=668f890cf9f9d225]*/
1164n/a{
1165n/a unsigned char *ncp;
1166n/a Py_ssize_t i;
1167n/a PyObject *rv;
1168n/a
1169n/a if (!audioop_check_parameters(fragment->len, width))
1170n/a return NULL;
1171n/a
1172n/a rv = PyBytes_FromStringAndSize(NULL, fragment->len);
1173n/a if (rv == NULL)
1174n/a return NULL;
1175n/a ncp = (unsigned char *)PyBytes_AsString(rv);
1176n/a
1177n/a for (i = 0; i < fragment->len; i += width) {
1178n/a int val = GETRAWSAMPLE(width, fragment->buf, i);
1179n/a SETRAWSAMPLE(width, ncp, fragment->len - i - width, val);
1180n/a }
1181n/a return rv;
1182n/a}
1183n/a
1184n/a/*[clinic input]
1185n/aaudioop.byteswap
1186n/a
1187n/a fragment: Py_buffer
1188n/a width: int
1189n/a /
1190n/a
1191n/aConvert big-endian samples to little-endian and vice versa.
1192n/a[clinic start generated code]*/
1193n/a
1194n/astatic PyObject *
1195n/aaudioop_byteswap_impl(PyObject *module, Py_buffer *fragment, int width)
1196n/a/*[clinic end generated code: output=50838a9e4b87cd4d input=fae7611ceffa5c82]*/
1197n/a{
1198n/a unsigned char *ncp;
1199n/a Py_ssize_t i;
1200n/a PyObject *rv;
1201n/a
1202n/a if (!audioop_check_parameters(fragment->len, width))
1203n/a return NULL;
1204n/a
1205n/a rv = PyBytes_FromStringAndSize(NULL, fragment->len);
1206n/a if (rv == NULL)
1207n/a return NULL;
1208n/a ncp = (unsigned char *)PyBytes_AsString(rv);
1209n/a
1210n/a for (i = 0; i < fragment->len; i += width) {
1211n/a int j;
1212n/a for (j = 0; j < width; j++)
1213n/a ncp[i + width - 1 - j] = ((unsigned char *)fragment->buf)[i + j];
1214n/a }
1215n/a return rv;
1216n/a}
1217n/a
1218n/a/*[clinic input]
1219n/aaudioop.lin2lin
1220n/a
1221n/a fragment: Py_buffer
1222n/a width: int
1223n/a newwidth: int
1224n/a /
1225n/a
1226n/aConvert samples between 1-, 2-, 3- and 4-byte formats.
1227n/a[clinic start generated code]*/
1228n/a
1229n/astatic PyObject *
1230n/aaudioop_lin2lin_impl(PyObject *module, Py_buffer *fragment, int width,
1231n/a int newwidth)
1232n/a/*[clinic end generated code: output=17b14109248f1d99 input=5ce08c8aa2f24d96]*/
1233n/a{
1234n/a unsigned char *ncp;
1235n/a Py_ssize_t i, j;
1236n/a PyObject *rv;
1237n/a
1238n/a if (!audioop_check_parameters(fragment->len, width))
1239n/a return NULL;
1240n/a if (!audioop_check_size(newwidth))
1241n/a return NULL;
1242n/a
1243n/a if (fragment->len/width > PY_SSIZE_T_MAX/newwidth) {
1244n/a PyErr_SetString(PyExc_MemoryError,
1245n/a "not enough memory for output buffer");
1246n/a return NULL;
1247n/a }
1248n/a rv = PyBytes_FromStringAndSize(NULL, (fragment->len/width)*newwidth);
1249n/a if (rv == NULL)
1250n/a return NULL;
1251n/a ncp = (unsigned char *)PyBytes_AsString(rv);
1252n/a
1253n/a for (i = j = 0; i < fragment->len; i += width, j += newwidth) {
1254n/a int val = GETSAMPLE32(width, fragment->buf, i);
1255n/a SETSAMPLE32(newwidth, ncp, j, val);
1256n/a }
1257n/a return rv;
1258n/a}
1259n/a
1260n/astatic int
1261n/agcd(int a, int b)
1262n/a{
1263n/a while (b > 0) {
1264n/a int tmp = a % b;
1265n/a a = b;
1266n/a b = tmp;
1267n/a }
1268n/a return a;
1269n/a}
1270n/a
1271n/a/*[clinic input]
1272n/aaudioop.ratecv
1273n/a
1274n/a fragment: Py_buffer
1275n/a width: int
1276n/a nchannels: int
1277n/a inrate: int
1278n/a outrate: int
1279n/a state: object
1280n/a weightA: int = 1
1281n/a weightB: int = 0
1282n/a /
1283n/a
1284n/aConvert the frame rate of the input fragment.
1285n/a[clinic start generated code]*/
1286n/a
1287n/astatic PyObject *
1288n/aaudioop_ratecv_impl(PyObject *module, Py_buffer *fragment, int width,
1289n/a int nchannels, int inrate, int outrate, PyObject *state,
1290n/a int weightA, int weightB)
1291n/a/*[clinic end generated code: output=624038e843243139 input=aff3acdc94476191]*/
1292n/a{
1293n/a char *cp, *ncp;
1294n/a Py_ssize_t len;
1295n/a int chan, d, *prev_i, *cur_i, cur_o;
1296n/a PyObject *samps, *str, *rv = NULL;
1297n/a int bytes_per_frame;
1298n/a
1299n/a if (!audioop_check_size(width))
1300n/a return NULL;
1301n/a if (nchannels < 1) {
1302n/a PyErr_SetString(AudioopError, "# of channels should be >= 1");
1303n/a return NULL;
1304n/a }
1305n/a if (width > INT_MAX / nchannels) {
1306n/a /* This overflow test is rigorously correct because
1307n/a both multiplicands are >= 1. Use the argument names
1308n/a from the docs for the error msg. */
1309n/a PyErr_SetString(PyExc_OverflowError,
1310n/a "width * nchannels too big for a C int");
1311n/a return NULL;
1312n/a }
1313n/a bytes_per_frame = width * nchannels;
1314n/a if (weightA < 1 || weightB < 0) {
1315n/a PyErr_SetString(AudioopError,
1316n/a "weightA should be >= 1, weightB should be >= 0");
1317n/a return NULL;
1318n/a }
1319n/a assert(fragment->len >= 0);
1320n/a if (fragment->len % bytes_per_frame != 0) {
1321n/a PyErr_SetString(AudioopError, "not a whole number of frames");
1322n/a return NULL;
1323n/a }
1324n/a if (inrate <= 0 || outrate <= 0) {
1325n/a PyErr_SetString(AudioopError, "sampling rate not > 0");
1326n/a return NULL;
1327n/a }
1328n/a /* divide inrate and outrate by their greatest common divisor */
1329n/a d = gcd(inrate, outrate);
1330n/a inrate /= d;
1331n/a outrate /= d;
1332n/a /* divide weightA and weightB by their greatest common divisor */
1333n/a d = gcd(weightA, weightB);
1334n/a weightA /= d;
1335n/a weightB /= d;
1336n/a
1337n/a if ((size_t)nchannels > SIZE_MAX/sizeof(int)) {
1338n/a PyErr_SetString(PyExc_MemoryError,
1339n/a "not enough memory for output buffer");
1340n/a return NULL;
1341n/a }
1342n/a prev_i = (int *) PyMem_Malloc(nchannels * sizeof(int));
1343n/a cur_i = (int *) PyMem_Malloc(nchannels * sizeof(int));
1344n/a if (prev_i == NULL || cur_i == NULL) {
1345n/a (void) PyErr_NoMemory();
1346n/a goto exit;
1347n/a }
1348n/a
1349n/a len = fragment->len / bytes_per_frame; /* # of frames */
1350n/a
1351n/a if (state == Py_None) {
1352n/a d = -outrate;
1353n/a for (chan = 0; chan < nchannels; chan++)
1354n/a prev_i[chan] = cur_i[chan] = 0;
1355n/a }
1356n/a else {
1357n/a if (!PyArg_ParseTuple(state,
1358n/a "iO!;audioop.ratecv: illegal state argument",
1359n/a &d, &PyTuple_Type, &samps))
1360n/a goto exit;
1361n/a if (PyTuple_Size(samps) != nchannels) {
1362n/a PyErr_SetString(AudioopError,
1363n/a "illegal state argument");
1364n/a goto exit;
1365n/a }
1366n/a for (chan = 0; chan < nchannels; chan++) {
1367n/a if (!PyArg_ParseTuple(PyTuple_GetItem(samps, chan),
1368n/a "ii:ratecv", &prev_i[chan],
1369n/a &cur_i[chan]))
1370n/a goto exit;
1371n/a }
1372n/a }
1373n/a
1374n/a /* str <- Space for the output buffer. */
1375n/a if (len == 0)
1376n/a str = PyBytes_FromStringAndSize(NULL, 0);
1377n/a else {
1378n/a /* There are len input frames, so we need (mathematically)
1379n/a ceiling(len*outrate/inrate) output frames, and each frame
1380n/a requires bytes_per_frame bytes. Computing this
1381n/a without spurious overflow is the challenge; we can
1382n/a settle for a reasonable upper bound, though, in this
1383n/a case ceiling(len/inrate) * outrate. */
1384n/a
1385n/a /* compute ceiling(len/inrate) without overflow */
1386n/a Py_ssize_t q = 1 + (len - 1) / inrate;
1387n/a if (outrate > PY_SSIZE_T_MAX / q / bytes_per_frame)
1388n/a str = NULL;
1389n/a else
1390n/a str = PyBytes_FromStringAndSize(NULL,
1391n/a q * outrate * bytes_per_frame);
1392n/a }
1393n/a if (str == NULL) {
1394n/a PyErr_SetString(PyExc_MemoryError,
1395n/a "not enough memory for output buffer");
1396n/a goto exit;
1397n/a }
1398n/a ncp = PyBytes_AsString(str);
1399n/a cp = fragment->buf;
1400n/a
1401n/a for (;;) {
1402n/a while (d < 0) {
1403n/a if (len == 0) {
1404n/a samps = PyTuple_New(nchannels);
1405n/a if (samps == NULL)
1406n/a goto exit;
1407n/a for (chan = 0; chan < nchannels; chan++)
1408n/a PyTuple_SetItem(samps, chan,
1409n/a Py_BuildValue("(ii)",
1410n/a prev_i[chan],
1411n/a cur_i[chan]));
1412n/a if (PyErr_Occurred())
1413n/a goto exit;
1414n/a /* We have checked before that the length
1415n/a * of the string fits into int. */
1416n/a len = (Py_ssize_t)(ncp - PyBytes_AsString(str));
1417n/a rv = PyBytes_FromStringAndSize
1418n/a (PyBytes_AsString(str), len);
1419n/a Py_DECREF(str);
1420n/a str = rv;
1421n/a if (str == NULL)
1422n/a goto exit;
1423n/a rv = Py_BuildValue("(O(iO))", str, d, samps);
1424n/a Py_DECREF(samps);
1425n/a Py_DECREF(str);
1426n/a goto exit; /* return rv */
1427n/a }
1428n/a for (chan = 0; chan < nchannels; chan++) {
1429n/a prev_i[chan] = cur_i[chan];
1430n/a cur_i[chan] = GETSAMPLE32(width, cp, 0);
1431n/a cp += width;
1432n/a /* implements a simple digital filter */
1433n/a cur_i[chan] = (int)(
1434n/a ((double)weightA * (double)cur_i[chan] +
1435n/a (double)weightB * (double)prev_i[chan]) /
1436n/a ((double)weightA + (double)weightB));
1437n/a }
1438n/a len--;
1439n/a d += outrate;
1440n/a }
1441n/a while (d >= 0) {
1442n/a for (chan = 0; chan < nchannels; chan++) {
1443n/a cur_o = (int)(((double)prev_i[chan] * (double)d +
1444n/a (double)cur_i[chan] * (double)(outrate - d)) /
1445n/a (double)outrate);
1446n/a SETSAMPLE32(width, ncp, 0, cur_o);
1447n/a ncp += width;
1448n/a }
1449n/a d -= inrate;
1450n/a }
1451n/a }
1452n/a exit:
1453n/a PyMem_Free(prev_i);
1454n/a PyMem_Free(cur_i);
1455n/a return rv;
1456n/a}
1457n/a
1458n/a/*[clinic input]
1459n/aaudioop.lin2ulaw
1460n/a
1461n/a fragment: Py_buffer
1462n/a width: int
1463n/a /
1464n/a
1465n/aConvert samples in the audio fragment to u-LAW encoding.
1466n/a[clinic start generated code]*/
1467n/a
1468n/astatic PyObject *
1469n/aaudioop_lin2ulaw_impl(PyObject *module, Py_buffer *fragment, int width)
1470n/a/*[clinic end generated code: output=14fb62b16fe8ea8e input=2450d1b870b6bac2]*/
1471n/a{
1472n/a unsigned char *ncp;
1473n/a Py_ssize_t i;
1474n/a PyObject *rv;
1475n/a
1476n/a if (!audioop_check_parameters(fragment->len, width))
1477n/a return NULL;
1478n/a
1479n/a rv = PyBytes_FromStringAndSize(NULL, fragment->len/width);
1480n/a if (rv == NULL)
1481n/a return NULL;
1482n/a ncp = (unsigned char *)PyBytes_AsString(rv);
1483n/a
1484n/a for (i = 0; i < fragment->len; i += width) {
1485n/a int val = GETSAMPLE32(width, fragment->buf, i);
1486n/a *ncp++ = st_14linear2ulaw(val >> 18);
1487n/a }
1488n/a return rv;
1489n/a}
1490n/a
1491n/a/*[clinic input]
1492n/aaudioop.ulaw2lin
1493n/a
1494n/a fragment: Py_buffer
1495n/a width: int
1496n/a /
1497n/a
1498n/aConvert sound fragments in u-LAW encoding to linearly encoded sound fragments.
1499n/a[clinic start generated code]*/
1500n/a
1501n/astatic PyObject *
1502n/aaudioop_ulaw2lin_impl(PyObject *module, Py_buffer *fragment, int width)
1503n/a/*[clinic end generated code: output=378356b047521ba2 input=45d53ddce5be7d06]*/
1504n/a{
1505n/a unsigned char *cp;
1506n/a signed char *ncp;
1507n/a Py_ssize_t i;
1508n/a PyObject *rv;
1509n/a
1510n/a if (!audioop_check_size(width))
1511n/a return NULL;
1512n/a
1513n/a if (fragment->len > PY_SSIZE_T_MAX/width) {
1514n/a PyErr_SetString(PyExc_MemoryError,
1515n/a "not enough memory for output buffer");
1516n/a return NULL;
1517n/a }
1518n/a rv = PyBytes_FromStringAndSize(NULL, fragment->len*width);
1519n/a if (rv == NULL)
1520n/a return NULL;
1521n/a ncp = (signed char *)PyBytes_AsString(rv);
1522n/a
1523n/a cp = fragment->buf;
1524n/a for (i = 0; i < fragment->len*width; i += width) {
1525n/a int val = st_ulaw2linear16(*cp++) << 16;
1526n/a SETSAMPLE32(width, ncp, i, val);
1527n/a }
1528n/a return rv;
1529n/a}
1530n/a
1531n/a/*[clinic input]
1532n/aaudioop.lin2alaw
1533n/a
1534n/a fragment: Py_buffer
1535n/a width: int
1536n/a /
1537n/a
1538n/aConvert samples in the audio fragment to a-LAW encoding.
1539n/a[clinic start generated code]*/
1540n/a
1541n/astatic PyObject *
1542n/aaudioop_lin2alaw_impl(PyObject *module, Py_buffer *fragment, int width)
1543n/a/*[clinic end generated code: output=d076f130121a82f0 input=ffb1ef8bb39da945]*/
1544n/a{
1545n/a unsigned char *ncp;
1546n/a Py_ssize_t i;
1547n/a PyObject *rv;
1548n/a
1549n/a if (!audioop_check_parameters(fragment->len, width))
1550n/a return NULL;
1551n/a
1552n/a rv = PyBytes_FromStringAndSize(NULL, fragment->len/width);
1553n/a if (rv == NULL)
1554n/a return NULL;
1555n/a ncp = (unsigned char *)PyBytes_AsString(rv);
1556n/a
1557n/a for (i = 0; i < fragment->len; i += width) {
1558n/a int val = GETSAMPLE32(width, fragment->buf, i);
1559n/a *ncp++ = st_linear2alaw(val >> 19);
1560n/a }
1561n/a return rv;
1562n/a}
1563n/a
1564n/a/*[clinic input]
1565n/aaudioop.alaw2lin
1566n/a
1567n/a fragment: Py_buffer
1568n/a width: int
1569n/a /
1570n/a
1571n/aConvert sound fragments in a-LAW encoding to linearly encoded sound fragments.
1572n/a[clinic start generated code]*/
1573n/a
1574n/astatic PyObject *
1575n/aaudioop_alaw2lin_impl(PyObject *module, Py_buffer *fragment, int width)
1576n/a/*[clinic end generated code: output=85c365ec559df647 input=4140626046cd1772]*/
1577n/a{
1578n/a unsigned char *cp;
1579n/a signed char *ncp;
1580n/a Py_ssize_t i;
1581n/a int val;
1582n/a PyObject *rv;
1583n/a
1584n/a if (!audioop_check_size(width))
1585n/a return NULL;
1586n/a
1587n/a if (fragment->len > PY_SSIZE_T_MAX/width) {
1588n/a PyErr_SetString(PyExc_MemoryError,
1589n/a "not enough memory for output buffer");
1590n/a return NULL;
1591n/a }
1592n/a rv = PyBytes_FromStringAndSize(NULL, fragment->len*width);
1593n/a if (rv == NULL)
1594n/a return NULL;
1595n/a ncp = (signed char *)PyBytes_AsString(rv);
1596n/a cp = fragment->buf;
1597n/a
1598n/a for (i = 0; i < fragment->len*width; i += width) {
1599n/a val = st_alaw2linear16(*cp++) << 16;
1600n/a SETSAMPLE32(width, ncp, i, val);
1601n/a }
1602n/a return rv;
1603n/a}
1604n/a
1605n/a/*[clinic input]
1606n/aaudioop.lin2adpcm
1607n/a
1608n/a fragment: Py_buffer
1609n/a width: int
1610n/a state: object
1611n/a /
1612n/a
1613n/aConvert samples to 4 bit Intel/DVI ADPCM encoding.
1614n/a[clinic start generated code]*/
1615n/a
1616n/astatic PyObject *
1617n/aaudioop_lin2adpcm_impl(PyObject *module, Py_buffer *fragment, int width,
1618n/a PyObject *state)
1619n/a/*[clinic end generated code: output=cc19f159f16c6793 input=12919d549b90c90a]*/
1620n/a{
1621n/a signed char *ncp;
1622n/a Py_ssize_t i;
1623n/a int step, valpred, delta,
1624n/a index, sign, vpdiff, diff;
1625n/a PyObject *rv = NULL, *str;
1626n/a int outputbuffer = 0, bufferstep;
1627n/a
1628n/a if (!audioop_check_parameters(fragment->len, width))
1629n/a return NULL;
1630n/a
1631n/a /* Decode state, should have (value, step) */
1632n/a if ( state == Py_None ) {
1633n/a /* First time, it seems. Set defaults */
1634n/a valpred = 0;
1635n/a index = 0;
1636n/a }
1637n/a else if (!PyTuple_Check(state)) {
1638n/a PyErr_SetString(PyExc_TypeError, "state must be a tuple or None");
1639n/a return NULL;
1640n/a }
1641n/a else if (!PyArg_ParseTuple(state, "ii", &valpred, &index)) {
1642n/a return NULL;
1643n/a }
1644n/a else if (valpred >= 0x8000 || valpred < -0x8000 ||
1645n/a (size_t)index >= Py_ARRAY_LENGTH(stepsizeTable)) {
1646n/a PyErr_SetString(PyExc_ValueError, "bad state");
1647n/a return NULL;
1648n/a }
1649n/a
1650n/a str = PyBytes_FromStringAndSize(NULL, fragment->len/(width*2));
1651n/a if (str == NULL)
1652n/a return NULL;
1653n/a ncp = (signed char *)PyBytes_AsString(str);
1654n/a
1655n/a step = stepsizeTable[index];
1656n/a bufferstep = 1;
1657n/a
1658n/a for (i = 0; i < fragment->len; i += width) {
1659n/a int val = GETSAMPLE32(width, fragment->buf, i) >> 16;
1660n/a
1661n/a /* Step 1 - compute difference with previous value */
1662n/a if (val < valpred) {
1663n/a diff = valpred - val;
1664n/a sign = 8;
1665n/a }
1666n/a else {
1667n/a diff = val - valpred;
1668n/a sign = 0;
1669n/a }
1670n/a
1671n/a /* Step 2 - Divide and clamp */
1672n/a /* Note:
1673n/a ** This code *approximately* computes:
1674n/a ** delta = diff*4/step;
1675n/a ** vpdiff = (delta+0.5)*step/4;
1676n/a ** but in shift step bits are dropped. The net result of this
1677n/a ** is that even if you have fast mul/div hardware you cannot
1678n/a ** put it to good use since the fixup would be too expensive.
1679n/a */
1680n/a delta = 0;
1681n/a vpdiff = (step >> 3);
1682n/a
1683n/a if ( diff >= step ) {
1684n/a delta = 4;
1685n/a diff -= step;
1686n/a vpdiff += step;
1687n/a }
1688n/a step >>= 1;
1689n/a if ( diff >= step ) {
1690n/a delta |= 2;
1691n/a diff -= step;
1692n/a vpdiff += step;
1693n/a }
1694n/a step >>= 1;
1695n/a if ( diff >= step ) {
1696n/a delta |= 1;
1697n/a vpdiff += step;
1698n/a }
1699n/a
1700n/a /* Step 3 - Update previous value */
1701n/a if ( sign )
1702n/a valpred -= vpdiff;
1703n/a else
1704n/a valpred += vpdiff;
1705n/a
1706n/a /* Step 4 - Clamp previous value to 16 bits */
1707n/a if ( valpred > 32767 )
1708n/a valpred = 32767;
1709n/a else if ( valpred < -32768 )
1710n/a valpred = -32768;
1711n/a
1712n/a /* Step 5 - Assemble value, update index and step values */
1713n/a delta |= sign;
1714n/a
1715n/a index += indexTable[delta];
1716n/a if ( index < 0 ) index = 0;
1717n/a if ( index > 88 ) index = 88;
1718n/a step = stepsizeTable[index];
1719n/a
1720n/a /* Step 6 - Output value */
1721n/a if ( bufferstep ) {
1722n/a outputbuffer = (delta << 4) & 0xf0;
1723n/a } else {
1724n/a *ncp++ = (delta & 0x0f) | outputbuffer;
1725n/a }
1726n/a bufferstep = !bufferstep;
1727n/a }
1728n/a rv = Py_BuildValue("(O(ii))", str, valpred, index);
1729n/a Py_DECREF(str);
1730n/a return rv;
1731n/a}
1732n/a
1733n/a/*[clinic input]
1734n/aaudioop.adpcm2lin
1735n/a
1736n/a fragment: Py_buffer
1737n/a width: int
1738n/a state: object
1739n/a /
1740n/a
1741n/aDecode an Intel/DVI ADPCM coded fragment to a linear fragment.
1742n/a[clinic start generated code]*/
1743n/a
1744n/astatic PyObject *
1745n/aaudioop_adpcm2lin_impl(PyObject *module, Py_buffer *fragment, int width,
1746n/a PyObject *state)
1747n/a/*[clinic end generated code: output=3440ea105acb3456 input=f5221144f5ca9ef0]*/
1748n/a{
1749n/a signed char *cp;
1750n/a signed char *ncp;
1751n/a Py_ssize_t i, outlen;
1752n/a int valpred, step, delta, index, sign, vpdiff;
1753n/a PyObject *rv, *str;
1754n/a int inputbuffer = 0, bufferstep;
1755n/a
1756n/a if (!audioop_check_size(width))
1757n/a return NULL;
1758n/a
1759n/a /* Decode state, should have (value, step) */
1760n/a if ( state == Py_None ) {
1761n/a /* First time, it seems. Set defaults */
1762n/a valpred = 0;
1763n/a index = 0;
1764n/a }
1765n/a else if (!PyTuple_Check(state)) {
1766n/a PyErr_SetString(PyExc_TypeError, "state must be a tuple or None");
1767n/a return NULL;
1768n/a }
1769n/a else if (!PyArg_ParseTuple(state, "ii", &valpred, &index)) {
1770n/a return NULL;
1771n/a }
1772n/a else if (valpred >= 0x8000 || valpred < -0x8000 ||
1773n/a (size_t)index >= Py_ARRAY_LENGTH(stepsizeTable)) {
1774n/a PyErr_SetString(PyExc_ValueError, "bad state");
1775n/a return NULL;
1776n/a }
1777n/a
1778n/a if (fragment->len > (PY_SSIZE_T_MAX/2)/width) {
1779n/a PyErr_SetString(PyExc_MemoryError,
1780n/a "not enough memory for output buffer");
1781n/a return NULL;
1782n/a }
1783n/a outlen = fragment->len*width*2;
1784n/a str = PyBytes_FromStringAndSize(NULL, outlen);
1785n/a if (str == NULL)
1786n/a return NULL;
1787n/a ncp = (signed char *)PyBytes_AsString(str);
1788n/a cp = fragment->buf;
1789n/a
1790n/a step = stepsizeTable[index];
1791n/a bufferstep = 0;
1792n/a
1793n/a for (i = 0; i < outlen; i += width) {
1794n/a /* Step 1 - get the delta value and compute next index */
1795n/a if ( bufferstep ) {
1796n/a delta = inputbuffer & 0xf;
1797n/a } else {
1798n/a inputbuffer = *cp++;
1799n/a delta = (inputbuffer >> 4) & 0xf;
1800n/a }
1801n/a
1802n/a bufferstep = !bufferstep;
1803n/a
1804n/a /* Step 2 - Find new index value (for later) */
1805n/a index += indexTable[delta];
1806n/a if ( index < 0 ) index = 0;
1807n/a if ( index > 88 ) index = 88;
1808n/a
1809n/a /* Step 3 - Separate sign and magnitude */
1810n/a sign = delta & 8;
1811n/a delta = delta & 7;
1812n/a
1813n/a /* Step 4 - Compute difference and new predicted value */
1814n/a /*
1815n/a ** Computes 'vpdiff = (delta+0.5)*step/4', but see comment
1816n/a ** in adpcm_coder.
1817n/a */
1818n/a vpdiff = step >> 3;
1819n/a if ( delta & 4 ) vpdiff += step;
1820n/a if ( delta & 2 ) vpdiff += step>>1;
1821n/a if ( delta & 1 ) vpdiff += step>>2;
1822n/a
1823n/a if ( sign )
1824n/a valpred -= vpdiff;
1825n/a else
1826n/a valpred += vpdiff;
1827n/a
1828n/a /* Step 5 - clamp output value */
1829n/a if ( valpred > 32767 )
1830n/a valpred = 32767;
1831n/a else if ( valpred < -32768 )
1832n/a valpred = -32768;
1833n/a
1834n/a /* Step 6 - Update step value */
1835n/a step = stepsizeTable[index];
1836n/a
1837n/a /* Step 6 - Output value */
1838n/a SETSAMPLE32(width, ncp, i, valpred << 16);
1839n/a }
1840n/a
1841n/a rv = Py_BuildValue("(O(ii))", str, valpred, index);
1842n/a Py_DECREF(str);
1843n/a return rv;
1844n/a}
1845n/a
1846n/a#include "clinic/audioop.c.h"
1847n/a
1848n/astatic PyMethodDef audioop_methods[] = {
1849n/a AUDIOOP_MAX_METHODDEF
1850n/a AUDIOOP_MINMAX_METHODDEF
1851n/a AUDIOOP_AVG_METHODDEF
1852n/a AUDIOOP_MAXPP_METHODDEF
1853n/a AUDIOOP_AVGPP_METHODDEF
1854n/a AUDIOOP_RMS_METHODDEF
1855n/a AUDIOOP_FINDFIT_METHODDEF
1856n/a AUDIOOP_FINDMAX_METHODDEF
1857n/a AUDIOOP_FINDFACTOR_METHODDEF
1858n/a AUDIOOP_CROSS_METHODDEF
1859n/a AUDIOOP_MUL_METHODDEF
1860n/a AUDIOOP_ADD_METHODDEF
1861n/a AUDIOOP_BIAS_METHODDEF
1862n/a AUDIOOP_ULAW2LIN_METHODDEF
1863n/a AUDIOOP_LIN2ULAW_METHODDEF
1864n/a AUDIOOP_ALAW2LIN_METHODDEF
1865n/a AUDIOOP_LIN2ALAW_METHODDEF
1866n/a AUDIOOP_LIN2LIN_METHODDEF
1867n/a AUDIOOP_ADPCM2LIN_METHODDEF
1868n/a AUDIOOP_LIN2ADPCM_METHODDEF
1869n/a AUDIOOP_TOMONO_METHODDEF
1870n/a AUDIOOP_TOSTEREO_METHODDEF
1871n/a AUDIOOP_GETSAMPLE_METHODDEF
1872n/a AUDIOOP_REVERSE_METHODDEF
1873n/a AUDIOOP_BYTESWAP_METHODDEF
1874n/a AUDIOOP_RATECV_METHODDEF
1875n/a { 0, 0 }
1876n/a};
1877n/a
1878n/a
1879n/astatic struct PyModuleDef audioopmodule = {
1880n/a PyModuleDef_HEAD_INIT,
1881n/a "audioop",
1882n/a NULL,
1883n/a -1,
1884n/a audioop_methods,
1885n/a NULL,
1886n/a NULL,
1887n/a NULL,
1888n/a NULL
1889n/a};
1890n/a
1891n/aPyMODINIT_FUNC
1892n/aPyInit_audioop(void)
1893n/a{
1894n/a PyObject *m, *d;
1895n/a m = PyModule_Create(&audioopmodule);
1896n/a if (m == NULL)
1897n/a return NULL;
1898n/a d = PyModule_GetDict(m);
1899n/a if (d == NULL)
1900n/a return NULL;
1901n/a AudioopError = PyErr_NewException("audioop.error", NULL, NULL);
1902n/a if (AudioopError != NULL)
1903n/a PyDict_SetItemString(d,"error",AudioopError);
1904n/a return m;
1905n/a}