46#ifndef INCLUDED_volk_16i_permute_and_scalar_add_a_H
47#define INCLUDED_volk_16i_permute_and_scalar_add_a_H
59 short* permute_indexes,
65 unsigned int num_points)
68 const unsigned int num_bytes = num_points * 2;
70 __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
72 __m128i *p_target, *p_cntl0, *p_cntl1, *p_cntl2, *p_cntl3, *p_scalars;
74 short* p_permute_indexes = permute_indexes;
76 p_target = (__m128i*)target;
77 p_cntl0 = (__m128i*)cntl0;
78 p_cntl1 = (__m128i*)cntl1;
79 p_cntl2 = (__m128i*)cntl2;
80 p_cntl3 = (__m128i*)cntl3;
81 p_scalars = (__m128i*)scalars;
85 int bound = (num_bytes >> 4);
86 int leftovers = (num_bytes >> 1) & 7;
88 xmm0 = _mm_load_si128(p_scalars);
90 xmm1 = _mm_shufflelo_epi16(xmm0, 0);
91 xmm2 = _mm_shufflelo_epi16(xmm0, 0x55);
92 xmm3 = _mm_shufflelo_epi16(xmm0, 0xaa);
93 xmm4 = _mm_shufflelo_epi16(xmm0, 0xff);
95 xmm1 = _mm_shuffle_epi32(xmm1, 0x00);
96 xmm2 = _mm_shuffle_epi32(xmm2, 0x00);
97 xmm3 = _mm_shuffle_epi32(xmm3, 0x00);
98 xmm4 = _mm_shuffle_epi32(xmm4, 0x00);
101 for (;
i < bound; ++
i) {
102 xmm0 = _mm_setzero_si128();
103 xmm5 = _mm_setzero_si128();
104 xmm6 = _mm_setzero_si128();
105 xmm7 = _mm_setzero_si128();
107 xmm0 = _mm_insert_epi16(xmm0, src0[p_permute_indexes[0]], 0);
108 xmm5 = _mm_insert_epi16(xmm5, src0[p_permute_indexes[1]], 1);
109 xmm6 = _mm_insert_epi16(xmm6, src0[p_permute_indexes[2]], 2);
110 xmm7 = _mm_insert_epi16(xmm7, src0[p_permute_indexes[3]], 3);
111 xmm0 = _mm_insert_epi16(xmm0, src0[p_permute_indexes[4]], 4);
112 xmm5 = _mm_insert_epi16(xmm5, src0[p_permute_indexes[5]], 5);
113 xmm6 = _mm_insert_epi16(xmm6, src0[p_permute_indexes[6]], 6);
114 xmm7 = _mm_insert_epi16(xmm7, src0[p_permute_indexes[7]], 7);
116 xmm0 = _mm_add_epi16(xmm0, xmm5);
117 xmm6 = _mm_add_epi16(xmm6, xmm7);
119 p_permute_indexes += 8;
121 xmm0 = _mm_add_epi16(xmm0, xmm6);
123 xmm5 = _mm_load_si128(p_cntl0);
124 xmm6 = _mm_load_si128(p_cntl1);
125 xmm7 = _mm_load_si128(p_cntl2);
127 xmm5 = _mm_and_si128(xmm5, xmm1);
128 xmm6 = _mm_and_si128(xmm6, xmm2);
129 xmm7 = _mm_and_si128(xmm7, xmm3);
131 xmm0 = _mm_add_epi16(xmm0, xmm5);
133 xmm5 = _mm_load_si128(p_cntl3);
135 xmm6 = _mm_add_epi16(xmm6, xmm7);
139 xmm5 = _mm_and_si128(xmm5, xmm4);
141 xmm0 = _mm_add_epi16(xmm0, xmm6);
146 xmm0 = _mm_add_epi16(xmm0, xmm5);
150 _mm_store_si128(p_target, xmm0);
155 for (
i = bound * 8;
i < (bound * 8) + leftovers; ++
i) {
156 target[
i] = src0[permute_indexes[
i]] + (cntl0[
i] & scalars[0]) +
157 (cntl1[
i] & scalars[1]) + (cntl2[
i] & scalars[2]) +
158 (cntl3[
i] & scalars[3]);
164#ifdef LV_HAVE_GENERIC
167 short* permute_indexes,
173 unsigned int num_points)
175 const unsigned int num_bytes = num_points * 2;
179 int bound = num_bytes >> 1;
181 for (
i = 0;
i < bound; ++
i) {
182 target[
i] = src0[permute_indexes[
i]] + (cntl0[
i] & scalars[0]) +
183 (cntl1[
i] & scalars[1]) + (cntl2[
i] & scalars[2]) +
184 (cntl3[
i] & scalars[3]);
static void volk_16i_permute_and_scalar_add_a_sse2(short *target, short *src0, short *permute_indexes, short *cntl0, short *cntl1, short *cntl2, short *cntl3, short *scalars, unsigned int num_points)
Definition volk_16i_permute_and_scalar_add.h:57
static void volk_16i_permute_and_scalar_add_generic(short *target, short *src0, short *permute_indexes, short *cntl0, short *cntl1, short *cntl2, short *cntl3, short *scalars, unsigned int num_points)
Definition volk_16i_permute_and_scalar_add.h:165
for i
Definition volk_config_fixed.tmpl.h:13