Vector Optimized Library of Kernels 3.1.0
Architecture-tuned implementations of math kernels
 
Loading...
Searching...
No Matches
volk_32fc_x2_dot_prod_32fc.h
Go to the documentation of this file.
1/* -*- c++ -*- */
2/*
3 * Copyright 2012, 2013, 2014 Free Software Foundation, Inc.
4 *
5 * This file is part of VOLK
6 *
7 * SPDX-License-Identifier: LGPL-3.0-or-later
8 */
9
45#ifndef INCLUDED_volk_32fc_x2_dot_prod_32fc_u_H
46#define INCLUDED_volk_32fc_x2_dot_prod_32fc_u_H
47
48#include <stdio.h>
49#include <string.h>
50#include <volk/volk_common.h>
51#include <volk/volk_complex.h>
52
53
54#ifdef LV_HAVE_RISCV64
55extern void volk_32fc_x2_dot_prod_32fc_sifive_u74(lv_32fc_t* result,
56 const lv_32fc_t* input,
57 const lv_32fc_t* taps,
58 unsigned int num_points);
59#endif
60
61#ifdef LV_HAVE_GENERIC
62
63
65 const lv_32fc_t* input,
66 const lv_32fc_t* taps,
67 unsigned int num_points)
68{
69
70 float* res = (float*)result;
71 float* in = (float*)input;
72 float* tp = (float*)taps;
73 unsigned int n_2_ccomplex_blocks = num_points / 2;
74
75 float sum0[2] = { 0, 0 };
76 float sum1[2] = { 0, 0 };
77 unsigned int i = 0;
78
79 for (i = 0; i < n_2_ccomplex_blocks; ++i) {
80 sum0[0] += in[0] * tp[0] - in[1] * tp[1];
81 sum0[1] += in[0] * tp[1] + in[1] * tp[0];
82 sum1[0] += in[2] * tp[2] - in[3] * tp[3];
83 sum1[1] += in[2] * tp[3] + in[3] * tp[2];
84
85 in += 4;
86 tp += 4;
87 }
88
89 res[0] = sum0[0] + sum1[0];
90 res[1] = sum0[1] + sum1[1];
91
92 // Cleanup if we had an odd number of points
93 if (num_points & 1) {
94 *result += input[num_points - 1] * taps[num_points - 1];
95 }
96}
97
98#endif /*LV_HAVE_GENERIC*/
99
100
101#if LV_HAVE_SSE && LV_HAVE_64
102
103static inline void volk_32fc_x2_dot_prod_32fc_u_sse_64(lv_32fc_t* result,
104 const lv_32fc_t* input,
105 const lv_32fc_t* taps,
106 unsigned int num_points)
107{
108
109 const unsigned int num_bytes = num_points * 8;
110 unsigned int isodd = num_points & 1;
111
113 "# ccomplex_dotprod_generic (float* result, const float *input,\n\t"
114 "# const float *taps, unsigned num_bytes)\n\t"
115 "# float sum0 = 0;\n\t"
116 "# float sum1 = 0;\n\t"
117 "# float sum2 = 0;\n\t"
118 "# float sum3 = 0;\n\t"
119 "# do {\n\t"
120 "# sum0 += input[0] * taps[0] - input[1] * taps[1];\n\t"
121 "# sum1 += input[0] * taps[1] + input[1] * taps[0];\n\t"
122 "# sum2 += input[2] * taps[2] - input[3] * taps[3];\n\t"
123 "# sum3 += input[2] * taps[3] + input[3] * taps[2];\n\t"
124 "# input += 4;\n\t"
125 "# taps += 4; \n\t"
126 "# } while (--n_2_ccomplex_blocks != 0);\n\t"
127 "# result[0] = sum0 + sum2;\n\t"
128 "# result[1] = sum1 + sum3;\n\t"
129 "# TODO: prefetch and better scheduling\n\t"
130 " xor %%r9, %%r9\n\t"
131 " xor %%r10, %%r10\n\t"
132 " movq %%rcx, %%rax\n\t"
133 " movq %%rcx, %%r8\n\t"
134 " movq %[rsi], %%r9\n\t"
135 " movq %[rdx], %%r10\n\t"
136 " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
137 " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
138 " shr $5, %%rax # rax = n_2_ccomplex_blocks / 2\n\t"
139 " shr $4, %%r8\n\t"
140 " jmp .%=L1_test\n\t"
141 " # 4 taps / loop\n\t"
142 " # something like ?? cycles / loop\n\t"
143 ".%=Loop1: \n\t"
144 "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
145 "# movups (%%r9), %%xmmA\n\t"
146 "# movups (%%r10), %%xmmB\n\t"
147 "# movups %%xmmA, %%xmmZ\n\t"
148 "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
149 "# mulps %%xmmB, %%xmmA\n\t"
150 "# mulps %%xmmZ, %%xmmB\n\t"
151 "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
152 "# xorps %%xmmPN, %%xmmA\n\t"
153 "# movups %%xmmA, %%xmmZ\n\t"
154 "# unpcklps %%xmmB, %%xmmA\n\t"
155 "# unpckhps %%xmmB, %%xmmZ\n\t"
156 "# movups %%xmmZ, %%xmmY\n\t"
157 "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
158 "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
159 "# addps %%xmmZ, %%xmmA\n\t"
160 "# addps %%xmmA, %%xmmC\n\t"
161 "# A=xmm0, B=xmm2, Z=xmm4\n\t"
162 "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
163 " movups 0(%%r9), %%xmm0\n\t"
164 " movups 16(%%r9), %%xmm1\n\t"
165 " movups %%xmm0, %%xmm4\n\t"
166 " movups 0(%%r10), %%xmm2\n\t"
167 " mulps %%xmm2, %%xmm0\n\t"
168 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
169 " movups 16(%%r10), %%xmm3\n\t"
170 " movups %%xmm1, %%xmm5\n\t"
171 " addps %%xmm0, %%xmm6\n\t"
172 " mulps %%xmm3, %%xmm1\n\t"
173 " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
174 " addps %%xmm1, %%xmm6\n\t"
175 " mulps %%xmm4, %%xmm2\n\t"
176 " addps %%xmm2, %%xmm7\n\t"
177 " mulps %%xmm5, %%xmm3\n\t"
178 " add $32, %%r9\n\t"
179 " addps %%xmm3, %%xmm7\n\t"
180 " add $32, %%r10\n\t"
181 ".%=L1_test:\n\t"
182 " dec %%rax\n\t"
183 " jge .%=Loop1\n\t"
184 " # We've handled the bulk of multiplies up to here.\n\t"
185 " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
186 " # If so, we've got 2 more taps to do.\n\t"
187 " and $1, %%r8\n\t"
188 " je .%=Leven\n\t"
189 " # The count was odd, do 2 more taps.\n\t"
190 " movups 0(%%r9), %%xmm0\n\t"
191 " movups %%xmm0, %%xmm4\n\t"
192 " movups 0(%%r10), %%xmm2\n\t"
193 " mulps %%xmm2, %%xmm0\n\t"
194 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
195 " addps %%xmm0, %%xmm6\n\t"
196 " mulps %%xmm4, %%xmm2\n\t"
197 " addps %%xmm2, %%xmm7\n\t"
198 ".%=Leven:\n\t"
199 " # neg inversor\n\t"
200 " xorps %%xmm1, %%xmm1\n\t"
201 " mov $0x80000000, %%r9\n\t"
202 " movd %%r9, %%xmm1\n\t"
203 " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
204 " # pfpnacc\n\t"
205 " xorps %%xmm1, %%xmm6\n\t"
206 " movups %%xmm6, %%xmm2\n\t"
207 " unpcklps %%xmm7, %%xmm6\n\t"
208 " unpckhps %%xmm7, %%xmm2\n\t"
209 " movups %%xmm2, %%xmm3\n\t"
210 " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
211 " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
212 " addps %%xmm2, %%xmm6\n\t"
213 " # xmm6 = r1 i2 r3 i4\n\t"
214 " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
215 " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
216 " movlps %%xmm6, (%[rdi]) # store low 2x32 bits (complex) "
217 "to memory\n\t"
218 :
219 : [rsi] "r"(input), [rdx] "r"(taps), "c"(num_bytes), [rdi] "r"(result)
220 : "rax", "r8", "r9", "r10");
221
222
223 if (isodd) {
224 *result += input[num_points - 1] * taps[num_points - 1];
225 }
226
227 return;
228}
229
230#endif /* LV_HAVE_SSE && LV_HAVE_64 */
231
232
233#ifdef LV_HAVE_SSE3
234
235#include <pmmintrin.h>
236
238 const lv_32fc_t* input,
239 const lv_32fc_t* taps,
240 unsigned int num_points)
241{
242
243 lv_32fc_t dotProduct;
244 memset(&dotProduct, 0x0, 2 * sizeof(float));
245
246 unsigned int number = 0;
247 const unsigned int halfPoints = num_points / 2;
248 unsigned int isodd = num_points & 1;
249
250 __m128 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
251
252 const lv_32fc_t* a = input;
253 const lv_32fc_t* b = taps;
254
255 dotProdVal = _mm_setzero_ps();
256
257 for (; number < halfPoints; number++) {
258
259 x = _mm_loadu_ps((float*)a); // Load the ar + ai, br + bi as ar,ai,br,bi
260 y = _mm_loadu_ps((float*)b); // Load the cr + ci, dr + di as cr,ci,dr,di
261
262 yl = _mm_moveldup_ps(y); // Load yl with cr,cr,dr,dr
263 yh = _mm_movehdup_ps(y); // Load yh with ci,ci,di,di
264
265 tmp1 = _mm_mul_ps(x, yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr
266
267 x = _mm_shuffle_ps(x, x, 0xB1); // Re-arrange x to be ai,ar,bi,br
268
269 tmp2 = _mm_mul_ps(x, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di
270
271 z = _mm_addsub_ps(tmp1,
272 tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
273
274 dotProdVal =
275 _mm_add_ps(dotProdVal, z); // Add the complex multiplication results together
276
277 a += 2;
278 b += 2;
279 }
280
281 __VOLK_ATTR_ALIGNED(16) lv_32fc_t dotProductVector[2];
282
283 _mm_storeu_ps((float*)dotProductVector,
284 dotProdVal); // Store the results back into the dot product vector
285
286 dotProduct += (dotProductVector[0] + dotProductVector[1]);
287
288 if (isodd) {
289 dotProduct += input[num_points - 1] * taps[num_points - 1];
290 }
291
292 *result = dotProduct;
293}
294
295#endif /*LV_HAVE_SSE3*/
296
297// #ifdef LV_HAVE_SSE4_1
298
299// #include <smmintrin.h>
300
301// static inline void volk_32fc_x2_dot_prod_32fc_u_sse4_1(lv_32fc_t* result,
302// const lv_32fc_t* input,
303// const lv_32fc_t* taps,
304// unsigned int num_points)
305// {
306
307// unsigned int i = 0;
308// const unsigned int qtr_points = num_points / 4;
309// const unsigned int isodd = num_points & 3;
310
311// __m128 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, real0, real1, im0, im1;
312// float *p_input, *p_taps;
313// __m64* p_result;
314
315// p_result = (__m64*)result;
316// p_input = (float*)input;
317// p_taps = (float*)taps;
318
319// static const __m128i neg = { 0x000000000000000080000000 };
320
321// real0 = _mm_setzero_ps();
322// real1 = _mm_setzero_ps();
323// im0 = _mm_setzero_ps();
324// im1 = _mm_setzero_ps();
325
326// for (; i < qtr_points; ++i) {
327// xmm0 = _mm_loadu_ps(p_input);
328// xmm1 = _mm_loadu_ps(p_taps);
329
330// p_input += 4;
331// p_taps += 4;
332
333// xmm2 = _mm_loadu_ps(p_input);
334// xmm3 = _mm_loadu_ps(p_taps);
335
336// p_input += 4;
337// p_taps += 4;
338
339// xmm4 = _mm_unpackhi_ps(xmm0, xmm2);
340// xmm5 = _mm_unpackhi_ps(xmm1, xmm3);
341// xmm0 = _mm_unpacklo_ps(xmm0, xmm2);
342// xmm2 = _mm_unpacklo_ps(xmm1, xmm3);
343
344// // imaginary vector from input
345// xmm1 = _mm_unpackhi_ps(xmm0, xmm4);
346// // real vector from input
347// xmm3 = _mm_unpacklo_ps(xmm0, xmm4);
348// // imaginary vector from taps
349// xmm0 = _mm_unpackhi_ps(xmm2, xmm5);
350// // real vector from taps
351// xmm2 = _mm_unpacklo_ps(xmm2, xmm5);
352
353// xmm4 = _mm_dp_ps(xmm3, xmm2, 0xf1);
354// xmm5 = _mm_dp_ps(xmm1, xmm0, 0xf1);
355
356// xmm6 = _mm_dp_ps(xmm3, xmm0, 0xf2);
357// xmm7 = _mm_dp_ps(xmm1, xmm2, 0xf2);
358
359// real0 = _mm_add_ps(xmm4, real0);
360// real1 = _mm_add_ps(xmm5, real1);
361// im0 = _mm_add_ps(xmm6, im0);
362// im1 = _mm_add_ps(xmm7, im1);
363// }
364
365// real1 = _mm_xor_ps(real1, bit128_p(&neg)->float_vec);
366
367// im0 = _mm_add_ps(im0, im1);
368// real0 = _mm_add_ps(real0, real1);
369
370// im0 = _mm_add_ps(im0, real0);
371
372// _mm_storel_pi(p_result, im0);
373
374// for (i = num_points - isodd; i < num_points; i++) {
375// *result += input[i] * taps[i];
376// }
377// }
378
379// #endif /*LV_HAVE_SSE4_1*/
380
381#ifdef LV_HAVE_AVX
382
383#include <immintrin.h>
384
386 const lv_32fc_t* input,
387 const lv_32fc_t* taps,
388 unsigned int num_points)
389{
390
391 unsigned int isodd = num_points & 3;
392 unsigned int i = 0;
393 lv_32fc_t dotProduct;
394 memset(&dotProduct, 0x0, 2 * sizeof(float));
395
396 unsigned int number = 0;
397 const unsigned int quarterPoints = num_points / 4;
398
399 __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
400
401 const lv_32fc_t* a = input;
402 const lv_32fc_t* b = taps;
403
404 dotProdVal = _mm256_setzero_ps();
405
406 for (; number < quarterPoints; number++) {
407 x = _mm256_loadu_ps((float*)a); // Load a,b,e,f as ar,ai,br,bi,er,ei,fr,fi
408 y = _mm256_loadu_ps((float*)b); // Load c,d,g,h as cr,ci,dr,di,gr,gi,hr,hi
409
410 yl = _mm256_moveldup_ps(y); // Load yl with cr,cr,dr,dr,gr,gr,hr,hr
411 yh = _mm256_movehdup_ps(y); // Load yh with ci,ci,di,di,gi,gi,hi,hi
412
413 tmp1 = _mm256_mul_ps(x, yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr ...
414
415 x = _mm256_shuffle_ps(x, x, 0xB1); // Re-arrange x to be ai,ar,bi,br,ei,er,fi,fr
416
417 tmp2 = _mm256_mul_ps(x, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di ...
418
419 z = _mm256_addsub_ps(tmp1,
420 tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
421
422 dotProdVal = _mm256_add_ps(dotProdVal,
423 z); // Add the complex multiplication results together
424
425 a += 4;
426 b += 4;
427 }
428
429 __VOLK_ATTR_ALIGNED(32) lv_32fc_t dotProductVector[4];
430
431 _mm256_storeu_ps((float*)dotProductVector,
432 dotProdVal); // Store the results back into the dot product vector
433
434 dotProduct += (dotProductVector[0] + dotProductVector[1] + dotProductVector[2] +
435 dotProductVector[3]);
436
437 for (i = num_points - isodd; i < num_points; i++) {
438 dotProduct += input[i] * taps[i];
439 }
440
441 *result = dotProduct;
442}
443
444#endif /*LV_HAVE_AVX*/
445
446#if LV_HAVE_AVX && LV_HAVE_FMA
447#include <immintrin.h>
448
449static inline void volk_32fc_x2_dot_prod_32fc_u_avx_fma(lv_32fc_t* result,
450 const lv_32fc_t* input,
451 const lv_32fc_t* taps,
452 unsigned int num_points)
453{
454
455 unsigned int isodd = num_points & 3;
456 unsigned int i = 0;
457 lv_32fc_t dotProduct;
458 memset(&dotProduct, 0x0, 2 * sizeof(float));
459
460 unsigned int number = 0;
461 const unsigned int quarterPoints = num_points / 4;
462
463 __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
464
465 const lv_32fc_t* a = input;
466 const lv_32fc_t* b = taps;
467
468 dotProdVal = _mm256_setzero_ps();
469
470 for (; number < quarterPoints; number++) {
471
472 x = _mm256_loadu_ps((float*)a); // Load a,b,e,f as ar,ai,br,bi,er,ei,fr,fi
473 y = _mm256_loadu_ps((float*)b); // Load c,d,g,h as cr,ci,dr,di,gr,gi,hr,hi
474
475 yl = _mm256_moveldup_ps(y); // Load yl with cr,cr,dr,dr,gr,gr,hr,hr
476 yh = _mm256_movehdup_ps(y); // Load yh with ci,ci,di,di,gi,gi,hi,hi
477
478 tmp1 = x;
479
480 x = _mm256_shuffle_ps(x, x, 0xB1); // Re-arrange x to be ai,ar,bi,br,ei,er,fi,fr
481
482 tmp2 = _mm256_mul_ps(x, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di ...
483
484 z = _mm256_fmaddsub_ps(
485 tmp1, yl, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
486
487 dotProdVal = _mm256_add_ps(dotProdVal,
488 z); // Add the complex multiplication results together
489
490 a += 4;
491 b += 4;
492 }
493
494 __VOLK_ATTR_ALIGNED(32) lv_32fc_t dotProductVector[4];
495
496 _mm256_storeu_ps((float*)dotProductVector,
497 dotProdVal); // Store the results back into the dot product vector
498
499 dotProduct += (dotProductVector[0] + dotProductVector[1] + dotProductVector[2] +
500 dotProductVector[3]);
501
502 for (i = num_points - isodd; i < num_points; i++) {
503 dotProduct += input[i] * taps[i];
504 }
505
506 *result = dotProduct;
507}
508
509#endif /*LV_HAVE_AVX && LV_HAVE_FMA*/
510
511#endif /*INCLUDED_volk_32fc_x2_dot_prod_32fc_u_H*/
512
513#ifndef INCLUDED_volk_32fc_x2_dot_prod_32fc_a_H
514#define INCLUDED_volk_32fc_x2_dot_prod_32fc_a_H
515
516#include <stdio.h>
517#include <string.h>
518#include <volk/volk_common.h>
519#include <volk/volk_complex.h>
520
521
522#if LV_HAVE_SSE && LV_HAVE_64
523
524
525static inline void volk_32fc_x2_dot_prod_32fc_a_sse_64(lv_32fc_t* result,
526 const lv_32fc_t* input,
527 const lv_32fc_t* taps,
528 unsigned int num_points)
529{
530
531 const unsigned int num_bytes = num_points * 8;
532 unsigned int isodd = num_points & 1;
533
535 "# ccomplex_dotprod_generic (float* result, const float *input,\n\t"
536 "# const float *taps, unsigned num_bytes)\n\t"
537 "# float sum0 = 0;\n\t"
538 "# float sum1 = 0;\n\t"
539 "# float sum2 = 0;\n\t"
540 "# float sum3 = 0;\n\t"
541 "# do {\n\t"
542 "# sum0 += input[0] * taps[0] - input[1] * taps[1];\n\t"
543 "# sum1 += input[0] * taps[1] + input[1] * taps[0];\n\t"
544 "# sum2 += input[2] * taps[2] - input[3] * taps[3];\n\t"
545 "# sum3 += input[2] * taps[3] + input[3] * taps[2];\n\t"
546 "# input += 4;\n\t"
547 "# taps += 4; \n\t"
548 "# } while (--n_2_ccomplex_blocks != 0);\n\t"
549 "# result[0] = sum0 + sum2;\n\t"
550 "# result[1] = sum1 + sum3;\n\t"
551 "# TODO: prefetch and better scheduling\n\t"
552 " xor %%r9, %%r9\n\t"
553 " xor %%r10, %%r10\n\t"
554 " movq %%rcx, %%rax\n\t"
555 " movq %%rcx, %%r8\n\t"
556 " movq %[rsi], %%r9\n\t"
557 " movq %[rdx], %%r10\n\t"
558 " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
559 " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
560 " shr $5, %%rax # rax = n_2_ccomplex_blocks / 2\n\t"
561 " shr $4, %%r8\n\t"
562 " jmp .%=L1_test\n\t"
563 " # 4 taps / loop\n\t"
564 " # something like ?? cycles / loop\n\t"
565 ".%=Loop1: \n\t"
566 "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
567 "# movaps (%%r9), %%xmmA\n\t"
568 "# movaps (%%r10), %%xmmB\n\t"
569 "# movaps %%xmmA, %%xmmZ\n\t"
570 "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
571 "# mulps %%xmmB, %%xmmA\n\t"
572 "# mulps %%xmmZ, %%xmmB\n\t"
573 "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
574 "# xorps %%xmmPN, %%xmmA\n\t"
575 "# movaps %%xmmA, %%xmmZ\n\t"
576 "# unpcklps %%xmmB, %%xmmA\n\t"
577 "# unpckhps %%xmmB, %%xmmZ\n\t"
578 "# movaps %%xmmZ, %%xmmY\n\t"
579 "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
580 "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
581 "# addps %%xmmZ, %%xmmA\n\t"
582 "# addps %%xmmA, %%xmmC\n\t"
583 "# A=xmm0, B=xmm2, Z=xmm4\n\t"
584 "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
585 " movaps 0(%%r9), %%xmm0\n\t"
586 " movaps 16(%%r9), %%xmm1\n\t"
587 " movaps %%xmm0, %%xmm4\n\t"
588 " movaps 0(%%r10), %%xmm2\n\t"
589 " mulps %%xmm2, %%xmm0\n\t"
590 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
591 " movaps 16(%%r10), %%xmm3\n\t"
592 " movaps %%xmm1, %%xmm5\n\t"
593 " addps %%xmm0, %%xmm6\n\t"
594 " mulps %%xmm3, %%xmm1\n\t"
595 " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
596 " addps %%xmm1, %%xmm6\n\t"
597 " mulps %%xmm4, %%xmm2\n\t"
598 " addps %%xmm2, %%xmm7\n\t"
599 " mulps %%xmm5, %%xmm3\n\t"
600 " add $32, %%r9\n\t"
601 " addps %%xmm3, %%xmm7\n\t"
602 " add $32, %%r10\n\t"
603 ".%=L1_test:\n\t"
604 " dec %%rax\n\t"
605 " jge .%=Loop1\n\t"
606 " # We've handled the bulk of multiplies up to here.\n\t"
607 " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
608 " # If so, we've got 2 more taps to do.\n\t"
609 " and $1, %%r8\n\t"
610 " je .%=Leven\n\t"
611 " # The count was odd, do 2 more taps.\n\t"
612 " movaps 0(%%r9), %%xmm0\n\t"
613 " movaps %%xmm0, %%xmm4\n\t"
614 " movaps 0(%%r10), %%xmm2\n\t"
615 " mulps %%xmm2, %%xmm0\n\t"
616 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
617 " addps %%xmm0, %%xmm6\n\t"
618 " mulps %%xmm4, %%xmm2\n\t"
619 " addps %%xmm2, %%xmm7\n\t"
620 ".%=Leven:\n\t"
621 " # neg inversor\n\t"
622 " xorps %%xmm1, %%xmm1\n\t"
623 " mov $0x80000000, %%r9\n\t"
624 " movd %%r9, %%xmm1\n\t"
625 " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
626 " # pfpnacc\n\t"
627 " xorps %%xmm1, %%xmm6\n\t"
628 " movaps %%xmm6, %%xmm2\n\t"
629 " unpcklps %%xmm7, %%xmm6\n\t"
630 " unpckhps %%xmm7, %%xmm2\n\t"
631 " movaps %%xmm2, %%xmm3\n\t"
632 " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
633 " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
634 " addps %%xmm2, %%xmm6\n\t"
635 " # xmm6 = r1 i2 r3 i4\n\t"
636 " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
637 " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
638 " movlps %%xmm6, (%[rdi]) # store low 2x32 bits (complex) "
639 "to memory\n\t"
640 :
641 : [rsi] "r"(input), [rdx] "r"(taps), "c"(num_bytes), [rdi] "r"(result)
642 : "rax", "r8", "r9", "r10");
643
644
645 if (isodd) {
646 *result += input[num_points - 1] * taps[num_points - 1];
647 }
648
649 return;
650}
651
652#endif
653
654#if LV_HAVE_SSE && LV_HAVE_32
655
656static inline void volk_32fc_x2_dot_prod_32fc_a_sse_32(lv_32fc_t* result,
657 const lv_32fc_t* input,
658 const lv_32fc_t* taps,
659 unsigned int num_points)
660{
661
662 volk_32fc_x2_dot_prod_32fc_generic(result, input, taps, num_points);
663
664#if 0
665 const unsigned int num_bytes = num_points*8;
666 unsigned int isodd = num_points & 1;
667
669 (
670 " #pushl %%ebp\n\t"
671 " #movl %%esp, %%ebp\n\t"
672 " movl 12(%%ebp), %%eax # input\n\t"
673 " movl 16(%%ebp), %%edx # taps\n\t"
674 " movl 20(%%ebp), %%ecx # n_bytes\n\t"
675 " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
676 " movaps 0(%%eax), %%xmm0\n\t"
677 " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
678 " movaps 0(%%edx), %%xmm2\n\t"
679 " shrl $5, %%ecx # ecx = n_2_ccomplex_blocks / 2\n\t"
680 " jmp .%=L1_test\n\t"
681 " # 4 taps / loop\n\t"
682 " # something like ?? cycles / loop\n\t"
683 ".%=Loop1: \n\t"
684 "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
685 "# movaps (%%eax), %%xmmA\n\t"
686 "# movaps (%%edx), %%xmmB\n\t"
687 "# movaps %%xmmA, %%xmmZ\n\t"
688 "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
689 "# mulps %%xmmB, %%xmmA\n\t"
690 "# mulps %%xmmZ, %%xmmB\n\t"
691 "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
692 "# xorps %%xmmPN, %%xmmA\n\t"
693 "# movaps %%xmmA, %%xmmZ\n\t"
694 "# unpcklps %%xmmB, %%xmmA\n\t"
695 "# unpckhps %%xmmB, %%xmmZ\n\t"
696 "# movaps %%xmmZ, %%xmmY\n\t"
697 "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
698 "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
699 "# addps %%xmmZ, %%xmmA\n\t"
700 "# addps %%xmmA, %%xmmC\n\t"
701 "# A=xmm0, B=xmm2, Z=xmm4\n\t"
702 "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
703 " movaps 16(%%eax), %%xmm1\n\t"
704 " movaps %%xmm0, %%xmm4\n\t"
705 " mulps %%xmm2, %%xmm0\n\t"
706 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
707 " movaps 16(%%edx), %%xmm3\n\t"
708 " movaps %%xmm1, %%xmm5\n\t"
709 " addps %%xmm0, %%xmm6\n\t"
710 " mulps %%xmm3, %%xmm1\n\t"
711 " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
712 " addps %%xmm1, %%xmm6\n\t"
713 " mulps %%xmm4, %%xmm2\n\t"
714 " movaps 32(%%eax), %%xmm0\n\t"
715 " addps %%xmm2, %%xmm7\n\t"
716 " mulps %%xmm5, %%xmm3\n\t"
717 " addl $32, %%eax\n\t"
718 " movaps 32(%%edx), %%xmm2\n\t"
719 " addps %%xmm3, %%xmm7\n\t"
720 " addl $32, %%edx\n\t"
721 ".%=L1_test:\n\t"
722 " decl %%ecx\n\t"
723 " jge .%=Loop1\n\t"
724 " # We've handled the bulk of multiplies up to here.\n\t"
725 " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
726 " # If so, we've got 2 more taps to do.\n\t"
727 " movl 20(%%ebp), %%ecx # n_2_ccomplex_blocks\n\t"
728 " shrl $4, %%ecx\n\t"
729 " andl $1, %%ecx\n\t"
730 " je .%=Leven\n\t"
731 " # The count was odd, do 2 more taps.\n\t"
732 " # Note that we've already got mm0/mm2 preloaded\n\t"
733 " # from the main loop.\n\t"
734 " movaps %%xmm0, %%xmm4\n\t"
735 " mulps %%xmm2, %%xmm0\n\t"
736 " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
737 " addps %%xmm0, %%xmm6\n\t"
738 " mulps %%xmm4, %%xmm2\n\t"
739 " addps %%xmm2, %%xmm7\n\t"
740 ".%=Leven:\n\t"
741 " # neg inversor\n\t"
742 " movl 8(%%ebp), %%eax \n\t"
743 " xorps %%xmm1, %%xmm1\n\t"
744 " movl $0x80000000, (%%eax)\n\t"
745 " movss (%%eax), %%xmm1\n\t"
746 " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
747 " # pfpnacc\n\t"
748 " xorps %%xmm1, %%xmm6\n\t"
749 " movaps %%xmm6, %%xmm2\n\t"
750 " unpcklps %%xmm7, %%xmm6\n\t"
751 " unpckhps %%xmm7, %%xmm2\n\t"
752 " movaps %%xmm2, %%xmm3\n\t"
753 " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
754 " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
755 " addps %%xmm2, %%xmm6\n\t"
756 " # xmm6 = r1 i2 r3 i4\n\t"
757 " #movl 8(%%ebp), %%eax # @result\n\t"
758 " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
759 " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
760 " movlps %%xmm6, (%%eax) # store low 2x32 bits (complex) to memory\n\t"
761 " #popl %%ebp\n\t"
762 :
763 :
764 : "eax", "ecx", "edx"
765 );
766
767
768 int getem = num_bytes % 16;
769
770 if(isodd) {
771 *result += (input[num_points - 1] * taps[num_points - 1]);
772 }
773
774 return;
775#endif
776}
777
778#endif /*LV_HAVE_SSE*/
779
780#ifdef LV_HAVE_SSE3
781
782#include <pmmintrin.h>
783
785 const lv_32fc_t* input,
786 const lv_32fc_t* taps,
787 unsigned int num_points)
788{
789
790 const unsigned int num_bytes = num_points * 8;
791 unsigned int isodd = num_points & 1;
792
793 lv_32fc_t dotProduct;
794 memset(&dotProduct, 0x0, 2 * sizeof(float));
795
796 unsigned int number = 0;
797 const unsigned int halfPoints = num_bytes >> 4;
798
799 __m128 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
800
801 const lv_32fc_t* a = input;
802 const lv_32fc_t* b = taps;
803
804 dotProdVal = _mm_setzero_ps();
805
806 for (; number < halfPoints; number++) {
807
808 x = _mm_load_ps((float*)a); // Load the ar + ai, br + bi as ar,ai,br,bi
809 y = _mm_load_ps((float*)b); // Load the cr + ci, dr + di as cr,ci,dr,di
810
811 yl = _mm_moveldup_ps(y); // Load yl with cr,cr,dr,dr
812 yh = _mm_movehdup_ps(y); // Load yh with ci,ci,di,di
813
814 tmp1 = _mm_mul_ps(x, yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr
815
816 x = _mm_shuffle_ps(x, x, 0xB1); // Re-arrange x to be ai,ar,bi,br
817
818 tmp2 = _mm_mul_ps(x, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di
819
820 z = _mm_addsub_ps(tmp1,
821 tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
822
823 dotProdVal =
824 _mm_add_ps(dotProdVal, z); // Add the complex multiplication results together
825
826 a += 2;
827 b += 2;
828 }
829
830 __VOLK_ATTR_ALIGNED(16) lv_32fc_t dotProductVector[2];
831
832 _mm_store_ps((float*)dotProductVector,
833 dotProdVal); // Store the results back into the dot product vector
834
835 dotProduct += (dotProductVector[0] + dotProductVector[1]);
836
837 if (isodd) {
838 dotProduct += input[num_points - 1] * taps[num_points - 1];
839 }
840
841 *result = dotProduct;
842}
843
844#endif /*LV_HAVE_SSE3*/
845
846
847// #ifdef LV_HAVE_SSE4_1
848
849// #include <smmintrin.h>
850
851// static inline void volk_32fc_x2_dot_prod_32fc_a_sse4_1(lv_32fc_t* result,
852// const lv_32fc_t* input,
853// const lv_32fc_t* taps,
854// unsigned int num_points)
855// {
856
857// unsigned int i = 0;
858// const unsigned int qtr_points = num_points / 4;
859// const unsigned int isodd = num_points & 3;
860
861// __m128 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, real0, real1, im0, im1;
862// float *p_input, *p_taps;
863// __m64* p_result;
864
865// static const __m128i neg = { 0x000000000000000080000000 };
866
867// p_result = (__m64*)result;
868// p_input = (float*)input;
869// p_taps = (float*)taps;
870
871// real0 = _mm_setzero_ps();
872// real1 = _mm_setzero_ps();
873// im0 = _mm_setzero_ps();
874// im1 = _mm_setzero_ps();
875
876// for (; i < qtr_points; ++i) {
877// xmm0 = _mm_load_ps(p_input);
878// xmm1 = _mm_load_ps(p_taps);
879
880// p_input += 4;
881// p_taps += 4;
882
883// xmm2 = _mm_load_ps(p_input);
884// xmm3 = _mm_load_ps(p_taps);
885
886// p_input += 4;
887// p_taps += 4;
888
889// xmm4 = _mm_unpackhi_ps(xmm0, xmm2);
890// xmm5 = _mm_unpackhi_ps(xmm1, xmm3);
891// xmm0 = _mm_unpacklo_ps(xmm0, xmm2);
892// xmm2 = _mm_unpacklo_ps(xmm1, xmm3);
893
894// // imaginary vector from input
895// xmm1 = _mm_unpackhi_ps(xmm0, xmm4);
896// // real vector from input
897// xmm3 = _mm_unpacklo_ps(xmm0, xmm4);
898// // imaginary vector from taps
899// xmm0 = _mm_unpackhi_ps(xmm2, xmm5);
900// // real vector from taps
901// xmm2 = _mm_unpacklo_ps(xmm2, xmm5);
902
903// xmm4 = _mm_dp_ps(xmm3, xmm2, 0xf1);
904// xmm5 = _mm_dp_ps(xmm1, xmm0, 0xf1);
905
906// xmm6 = _mm_dp_ps(xmm3, xmm0, 0xf2);
907// xmm7 = _mm_dp_ps(xmm1, xmm2, 0xf2);
908
909// real0 = _mm_add_ps(xmm4, real0);
910// real1 = _mm_add_ps(xmm5, real1);
911// im0 = _mm_add_ps(xmm6, im0);
912// im1 = _mm_add_ps(xmm7, im1);
913// }
914
915// real1 = _mm_xor_ps(real1, bit128_p(&neg)->float_vec);
916
917// im0 = _mm_add_ps(im0, im1);
918// real0 = _mm_add_ps(real0, real1);
919
920// im0 = _mm_add_ps(im0, real0);
921
922// _mm_storel_pi(p_result, im0);
923
924// for (i = num_points - isodd; i < num_points; i++) {
925// *result += input[i] * taps[i];
926// }
927// }
928
929// #endif /*LV_HAVE_SSE4_1*/
930
931#ifdef LV_HAVE_NEON
932#include <arm_neon.h>
933
935 const lv_32fc_t* input,
936 const lv_32fc_t* taps,
937 unsigned int num_points)
938{
939
940 unsigned int quarter_points = num_points / 4;
941 unsigned int number;
942
943 lv_32fc_t* a_ptr = (lv_32fc_t*)taps;
944 lv_32fc_t* b_ptr = (lv_32fc_t*)input;
945 // for 2-lane vectors, 1st lane holds the real part,
946 // 2nd lane holds the imaginary part
947 float32x4x2_t a_val, b_val, c_val, accumulator;
948 float32x4x2_t tmp_real, tmp_imag;
949 accumulator.val[0] = vdupq_n_f32(0);
950 accumulator.val[1] = vdupq_n_f32(0);
951
952 for (number = 0; number < quarter_points; ++number) {
953 a_val = vld2q_f32((float*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
954 b_val = vld2q_f32((float*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
955 __VOLK_PREFETCH(a_ptr + 8);
956 __VOLK_PREFETCH(b_ptr + 8);
957
958 // multiply the real*real and imag*imag to get real result
959 // a0r*b0r|a1r*b1r|a2r*b2r|a3r*b3r
960 tmp_real.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
961 // a0i*b0i|a1i*b1i|a2i*b2i|a3i*b3i
962 tmp_real.val[1] = vmulq_f32(a_val.val[1], b_val.val[1]);
963
964 // Multiply cross terms to get the imaginary result
965 // a0r*b0i|a1r*b1i|a2r*b2i|a3r*b3i
966 tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[1]);
967 // a0i*b0r|a1i*b1r|a2i*b2r|a3i*b3r
968 tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
969
970 c_val.val[0] = vsubq_f32(tmp_real.val[0], tmp_real.val[1]);
971 c_val.val[1] = vaddq_f32(tmp_imag.val[0], tmp_imag.val[1]);
972
973 accumulator.val[0] = vaddq_f32(accumulator.val[0], c_val.val[0]);
974 accumulator.val[1] = vaddq_f32(accumulator.val[1], c_val.val[1]);
975
976 a_ptr += 4;
977 b_ptr += 4;
978 }
979 lv_32fc_t accum_result[4];
980 vst2q_f32((float*)accum_result, accumulator);
981 *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
982
983 // tail case
984 for (number = quarter_points * 4; number < num_points; ++number) {
985 *result += (*a_ptr++) * (*b_ptr++);
986 }
987}
988#endif /*LV_HAVE_NEON*/
989
990#ifdef LV_HAVE_NEON
991#include <arm_neon.h>
993 const lv_32fc_t* input,
994 const lv_32fc_t* taps,
995 unsigned int num_points)
996{
997
998 unsigned int quarter_points = num_points / 4;
999 unsigned int number;
1000
1001 lv_32fc_t* a_ptr = (lv_32fc_t*)taps;
1002 lv_32fc_t* b_ptr = (lv_32fc_t*)input;
1003 // for 2-lane vectors, 1st lane holds the real part,
1004 // 2nd lane holds the imaginary part
1005 float32x4x2_t a_val, b_val, accumulator;
1006 float32x4x2_t tmp_imag;
1007 accumulator.val[0] = vdupq_n_f32(0);
1008 accumulator.val[1] = vdupq_n_f32(0);
1009
1010 for (number = 0; number < quarter_points; ++number) {
1011 a_val = vld2q_f32((float*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
1012 b_val = vld2q_f32((float*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
1013 __VOLK_PREFETCH(a_ptr + 8);
1014 __VOLK_PREFETCH(b_ptr + 8);
1015
1016 // do the first multiply
1017 tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
1018 tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
1019
1020 // use multiply accumulate/subtract to get result
1021 tmp_imag.val[1] = vmlaq_f32(tmp_imag.val[1], a_val.val[0], b_val.val[1]);
1022 tmp_imag.val[0] = vmlsq_f32(tmp_imag.val[0], a_val.val[1], b_val.val[1]);
1023
1024 accumulator.val[0] = vaddq_f32(accumulator.val[0], tmp_imag.val[0]);
1025 accumulator.val[1] = vaddq_f32(accumulator.val[1], tmp_imag.val[1]);
1026
1027 // increment pointers
1028 a_ptr += 4;
1029 b_ptr += 4;
1030 }
1031 lv_32fc_t accum_result[4];
1032 vst2q_f32((float*)accum_result, accumulator);
1033 *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
1034
1035 // tail case
1036 for (number = quarter_points * 4; number < num_points; ++number) {
1037 *result += (*a_ptr++) * (*b_ptr++);
1038 }
1039}
1040#endif /*LV_HAVE_NEON*/
1041
1042#ifdef LV_HAVE_NEON
1044 const lv_32fc_t* input,
1045 const lv_32fc_t* taps,
1046 unsigned int num_points)
1047{
1048
1049 unsigned int quarter_points = num_points / 4;
1050 unsigned int number;
1051
1052 lv_32fc_t* a_ptr = (lv_32fc_t*)taps;
1053 lv_32fc_t* b_ptr = (lv_32fc_t*)input;
1054 // for 2-lane vectors, 1st lane holds the real part,
1055 // 2nd lane holds the imaginary part
1056 float32x4x2_t a_val, b_val, accumulator1, accumulator2;
1057 accumulator1.val[0] = vdupq_n_f32(0);
1058 accumulator1.val[1] = vdupq_n_f32(0);
1059 accumulator2.val[0] = vdupq_n_f32(0);
1060 accumulator2.val[1] = vdupq_n_f32(0);
1061
1062 for (number = 0; number < quarter_points; ++number) {
1063 a_val = vld2q_f32((float*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
1064 b_val = vld2q_f32((float*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
1065 __VOLK_PREFETCH(a_ptr + 8);
1066 __VOLK_PREFETCH(b_ptr + 8);
1067
1068 // use 2 accumulators to remove inter-instruction data dependencies
1069 accumulator1.val[0] = vmlaq_f32(accumulator1.val[0], a_val.val[0], b_val.val[0]);
1070 accumulator1.val[1] = vmlaq_f32(accumulator1.val[1], a_val.val[0], b_val.val[1]);
1071 accumulator2.val[0] = vmlsq_f32(accumulator2.val[0], a_val.val[1], b_val.val[1]);
1072 accumulator2.val[1] = vmlaq_f32(accumulator2.val[1], a_val.val[1], b_val.val[0]);
1073 // increment pointers
1074 a_ptr += 4;
1075 b_ptr += 4;
1076 }
1077 accumulator1.val[0] = vaddq_f32(accumulator1.val[0], accumulator2.val[0]);
1078 accumulator1.val[1] = vaddq_f32(accumulator1.val[1], accumulator2.val[1]);
1079 lv_32fc_t accum_result[4];
1080 vst2q_f32((float*)accum_result, accumulator1);
1081 *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
1082
1083 // tail case
1084 for (number = quarter_points * 4; number < num_points; ++number) {
1085 *result += (*a_ptr++) * (*b_ptr++);
1086 }
1087}
1088#endif /*LV_HAVE_NEON*/
1089
1090#ifdef LV_HAVE_NEON
1092 const lv_32fc_t* input,
1093 const lv_32fc_t* taps,
1094 unsigned int num_points)
1095{
1096 // NOTE: GCC does a poor job with this kernel, but the equivalent ASM code is very
1097 // fast
1098
1099 unsigned int quarter_points = num_points / 8;
1100 unsigned int number;
1101
1102 lv_32fc_t* a_ptr = (lv_32fc_t*)taps;
1103 lv_32fc_t* b_ptr = (lv_32fc_t*)input;
1104 // for 2-lane vectors, 1st lane holds the real part,
1105 // 2nd lane holds the imaginary part
1106 float32x4x4_t a_val, b_val, accumulator1, accumulator2;
1107 float32x4x2_t reduced_accumulator;
1108 accumulator1.val[0] = vdupq_n_f32(0);
1109 accumulator1.val[1] = vdupq_n_f32(0);
1110 accumulator1.val[2] = vdupq_n_f32(0);
1111 accumulator1.val[3] = vdupq_n_f32(0);
1112 accumulator2.val[0] = vdupq_n_f32(0);
1113 accumulator2.val[1] = vdupq_n_f32(0);
1114 accumulator2.val[2] = vdupq_n_f32(0);
1115 accumulator2.val[3] = vdupq_n_f32(0);
1116
1117 // 8 input regs, 8 accumulators -> 16/16 neon regs are used
1118 for (number = 0; number < quarter_points; ++number) {
1119 a_val = vld4q_f32((float*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
1120 b_val = vld4q_f32((float*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
1121 __VOLK_PREFETCH(a_ptr + 8);
1122 __VOLK_PREFETCH(b_ptr + 8);
1123
1124 // use 2 accumulators to remove inter-instruction data dependencies
1125 accumulator1.val[0] = vmlaq_f32(accumulator1.val[0], a_val.val[0], b_val.val[0]);
1126 accumulator1.val[1] = vmlaq_f32(accumulator1.val[1], a_val.val[0], b_val.val[1]);
1127
1128 accumulator1.val[2] = vmlaq_f32(accumulator1.val[2], a_val.val[2], b_val.val[2]);
1129 accumulator1.val[3] = vmlaq_f32(accumulator1.val[3], a_val.val[2], b_val.val[3]);
1130
1131 accumulator2.val[0] = vmlsq_f32(accumulator2.val[0], a_val.val[1], b_val.val[1]);
1132 accumulator2.val[1] = vmlaq_f32(accumulator2.val[1], a_val.val[1], b_val.val[0]);
1133
1134 accumulator2.val[2] = vmlsq_f32(accumulator2.val[2], a_val.val[3], b_val.val[3]);
1135 accumulator2.val[3] = vmlaq_f32(accumulator2.val[3], a_val.val[3], b_val.val[2]);
1136 // increment pointers
1137 a_ptr += 8;
1138 b_ptr += 8;
1139 }
1140 // reduce 8 accumulator lanes down to 2 (1 real and 1 imag)
1141 accumulator1.val[0] = vaddq_f32(accumulator1.val[0], accumulator1.val[2]);
1142 accumulator1.val[1] = vaddq_f32(accumulator1.val[1], accumulator1.val[3]);
1143 accumulator2.val[0] = vaddq_f32(accumulator2.val[0], accumulator2.val[2]);
1144 accumulator2.val[1] = vaddq_f32(accumulator2.val[1], accumulator2.val[3]);
1145 reduced_accumulator.val[0] = vaddq_f32(accumulator1.val[0], accumulator2.val[0]);
1146 reduced_accumulator.val[1] = vaddq_f32(accumulator1.val[1], accumulator2.val[1]);
1147 // now reduce accumulators to scalars
1148 lv_32fc_t accum_result[4];
1149 vst2q_f32((float*)accum_result, reduced_accumulator);
1150 *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
1151
1152 // tail case
1153 for (number = quarter_points * 8; number < num_points; ++number) {
1154 *result += (*a_ptr++) * (*b_ptr++);
1155 }
1156}
1157#endif /*LV_HAVE_NEON*/
1158
1159
1160#ifdef LV_HAVE_AVX
1161
1162#include <immintrin.h>
1163
1165 const lv_32fc_t* input,
1166 const lv_32fc_t* taps,
1167 unsigned int num_points)
1168{
1169
1170 unsigned int isodd = num_points & 3;
1171 unsigned int i = 0;
1172 lv_32fc_t dotProduct;
1173 memset(&dotProduct, 0x0, 2 * sizeof(float));
1174
1175 unsigned int number = 0;
1176 const unsigned int quarterPoints = num_points / 4;
1177
1178 __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
1179
1180 const lv_32fc_t* a = input;
1181 const lv_32fc_t* b = taps;
1182
1183 dotProdVal = _mm256_setzero_ps();
1184
1185 for (; number < quarterPoints; number++) {
1186
1187 x = _mm256_load_ps((float*)a); // Load a,b,e,f as ar,ai,br,bi,er,ei,fr,fi
1188 y = _mm256_load_ps((float*)b); // Load c,d,g,h as cr,ci,dr,di,gr,gi,hr,hi
1189
1190 yl = _mm256_moveldup_ps(y); // Load yl with cr,cr,dr,dr,gr,gr,hr,hr
1191 yh = _mm256_movehdup_ps(y); // Load yh with ci,ci,di,di,gi,gi,hi,hi
1192
1193 tmp1 = _mm256_mul_ps(x, yl); // tmp1 = ar*cr,ai*cr,br*dr,bi*dr ...
1194
1195 x = _mm256_shuffle_ps(x, x, 0xB1); // Re-arrange x to be ai,ar,bi,br,ei,er,fi,fr
1196
1197 tmp2 = _mm256_mul_ps(x, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di ...
1198
1199 z = _mm256_addsub_ps(tmp1,
1200 tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
1201
1202 dotProdVal = _mm256_add_ps(dotProdVal,
1203 z); // Add the complex multiplication results together
1204
1205 a += 4;
1206 b += 4;
1207 }
1208
1209 __VOLK_ATTR_ALIGNED(32) lv_32fc_t dotProductVector[4];
1210
1211 _mm256_store_ps((float*)dotProductVector,
1212 dotProdVal); // Store the results back into the dot product vector
1213
1214 dotProduct += (dotProductVector[0] + dotProductVector[1] + dotProductVector[2] +
1215 dotProductVector[3]);
1216
1217 for (i = num_points - isodd; i < num_points; i++) {
1218 dotProduct += input[i] * taps[i];
1219 }
1220
1221 *result = dotProduct;
1222}
1223
1224#endif /*LV_HAVE_AVX*/
1225
1226#if LV_HAVE_AVX && LV_HAVE_FMA
1227#include <immintrin.h>
1228
1229static inline void volk_32fc_x2_dot_prod_32fc_a_avx_fma(lv_32fc_t* result,
1230 const lv_32fc_t* input,
1231 const lv_32fc_t* taps,
1232 unsigned int num_points)
1233{
1234
1235 unsigned int isodd = num_points & 3;
1236 unsigned int i = 0;
1237 lv_32fc_t dotProduct;
1238 memset(&dotProduct, 0x0, 2 * sizeof(float));
1239
1240 unsigned int number = 0;
1241 const unsigned int quarterPoints = num_points / 4;
1242
1243 __m256 x, y, yl, yh, z, tmp1, tmp2, dotProdVal;
1244
1245 const lv_32fc_t* a = input;
1246 const lv_32fc_t* b = taps;
1247
1248 dotProdVal = _mm256_setzero_ps();
1249
1250 for (; number < quarterPoints; number++) {
1251
1252 x = _mm256_load_ps((float*)a); // Load a,b,e,f as ar,ai,br,bi,er,ei,fr,fi
1253 y = _mm256_load_ps((float*)b); // Load c,d,g,h as cr,ci,dr,di,gr,gi,hr,hi
1254
1255 yl = _mm256_moveldup_ps(y); // Load yl with cr,cr,dr,dr,gr,gr,hr,hr
1256 yh = _mm256_movehdup_ps(y); // Load yh with ci,ci,di,di,gi,gi,hi,hi
1257
1258 tmp1 = x;
1259
1260 x = _mm256_shuffle_ps(x, x, 0xB1); // Re-arrange x to be ai,ar,bi,br,ei,er,fi,fr
1261
1262 tmp2 = _mm256_mul_ps(x, yh); // tmp2 = ai*ci,ar*ci,bi*di,br*di ...
1263
1264 z = _mm256_fmaddsub_ps(
1265 tmp1, yl, tmp2); // ar*cr-ai*ci, ai*cr+ar*ci, br*dr-bi*di, bi*dr+br*di
1266
1267 dotProdVal = _mm256_add_ps(dotProdVal,
1268 z); // Add the complex multiplication results together
1269
1270 a += 4;
1271 b += 4;
1272 }
1273
1274 __VOLK_ATTR_ALIGNED(32) lv_32fc_t dotProductVector[4];
1275
1276 _mm256_store_ps((float*)dotProductVector,
1277 dotProdVal); // Store the results back into the dot product vector
1278
1279 dotProduct += (dotProductVector[0] + dotProductVector[1] + dotProductVector[2] +
1280 dotProductVector[3]);
1281
1282 for (i = num_points - isodd; i < num_points; i++) {
1283 dotProduct += input[i] * taps[i];
1284 }
1285
1286 *result = dotProduct;
1287}
1288
1289#endif /*LV_HAVE_AVX && LV_HAVE_FMA*/
1290
1291
1292#endif /*INCLUDED_volk_32fc_x2_dot_prod_32fc_a_H*/
FORCE_INLINE __m128 _mm_movehdup_ps(__m128 a)
Definition: sse2neon.h:6611
float32x4_t __m128
Definition: sse2neon.h:235
FORCE_INLINE __m128 _mm_addsub_ps(__m128 a, __m128 b)
Definition: sse2neon.h:6496
#define _mm_shuffle_ps(a, b, imm)
Definition: sse2neon.h:2586
FORCE_INLINE void _mm_storeu_ps(float *p, __m128 a)
Definition: sse2neon.h:2787
FORCE_INLINE __m128 _mm_moveldup_ps(__m128 a)
Definition: sse2neon.h:6627
FORCE_INLINE __m128 _mm_mul_ps(__m128 a, __m128 b)
Definition: sse2neon.h:2205
FORCE_INLINE __m128 _mm_loadu_ps(const float *p)
Definition: sse2neon.h:1941
FORCE_INLINE __m128 _mm_setzero_ps(void)
Definition: sse2neon.h:2531
FORCE_INLINE __m128 _mm_add_ps(__m128 a, __m128 b)
Definition: sse2neon.h:1039
FORCE_INLINE __m128 _mm_load_ps(const float *p)
Definition: sse2neon.h:1858
FORCE_INLINE void _mm_store_ps(float *p, __m128 a)
Definition: sse2neon.h:2704
static void volk_32fc_x2_dot_prod_32fc_neon_optfmaunroll(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:1091
static void volk_32fc_x2_dot_prod_32fc_a_sse3(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:784
static void volk_32fc_x2_dot_prod_32fc_a_avx(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:1164
static void volk_32fc_x2_dot_prod_32fc_u_avx(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:385
static void volk_32fc_x2_dot_prod_32fc_neon(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:934
static void volk_32fc_x2_dot_prod_32fc_generic(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:64
static void volk_32fc_x2_dot_prod_32fc_u_sse3(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:237
static void volk_32fc_x2_dot_prod_32fc_neon_opttests(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:992
static void volk_32fc_x2_dot_prod_32fc_neon_optfma(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_dot_prod_32fc.h:1043
#define __VOLK_VOLATILE
Definition: volk_common.h:74
#define __VOLK_PREFETCH(addr)
Definition: volk_common.h:72
#define __VOLK_ASM
Definition: volk_common.h:73
#define __VOLK_ATTR_ALIGNED(x)
Definition: volk_common.h:66
float complex lv_32fc_t
Definition: volk_complex.h:74
for i
Definition: volk_config_fixed.tmpl.h:13