65#ifndef INCLUDED_volk_32fc_x2_square_dist_32f_a_H
66#define INCLUDED_volk_32fc_x2_square_dist_32f_a_H
75static inline void volk_32fc_x2_square_dist_32f_a_avx2(
float* target,
78 unsigned int num_points)
80 const unsigned int num_bytes = num_points * 8;
82 __m256 xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
86 int bound = num_bytes >> 6;
87 int leftovers0 = (num_bytes >> 5) & 1;
88 int leftovers1 = (num_bytes >> 4) & 1;
89 int leftovers2 = (num_bytes >> 3) & 1;
92 __m256i idx = _mm256_set_epi32(7, 6, 3, 2, 5, 4, 1, 0);
93 xmm1 = _mm256_setzero_ps();
95 xmm0 = _mm_permute_ps(xmm0, 0b01000100);
96 xmm1 = _mm256_insertf128_ps(xmm1, xmm0, 0);
97 xmm1 = _mm256_insertf128_ps(xmm1, xmm0, 1);
99 for (;
i < bound; ++
i) {
100 xmm2 = _mm256_load_ps((
float*)&points[0]);
101 xmm3 = _mm256_load_ps((
float*)&points[4]);
104 xmm4 = _mm256_sub_ps(xmm1, xmm2);
105 xmm5 = _mm256_sub_ps(xmm1, xmm3);
106 xmm6 = _mm256_mul_ps(xmm4, xmm4);
107 xmm7 = _mm256_mul_ps(xmm5, xmm5);
109 xmm4 = _mm256_hadd_ps(xmm6, xmm7);
110 xmm4 = _mm256_permutevar8x32_ps(xmm4, idx);
112 _mm256_store_ps(target, xmm4);
117 for (
i = 0;
i < leftovers0; ++
i) {
119 xmm2 = _mm256_load_ps((
float*)&points[0]);
121 xmm4 = _mm256_sub_ps(xmm1, xmm2);
125 xmm6 = _mm256_mul_ps(xmm4, xmm4);
127 xmm4 = _mm256_hadd_ps(xmm6, xmm6);
128 xmm4 = _mm256_permutevar8x32_ps(xmm4, idx);
130 xmm9 = _mm256_extractf128_ps(xmm4, 1);
136 for (
i = 0;
i < leftovers1; ++
i) {
152 for (
i = 0;
i < leftovers2; ++
i) {
154 diff = src0[0] - points[0];
165#include <pmmintrin.h>
166#include <xmmintrin.h>
171 unsigned int num_points)
173 const unsigned int num_bytes = num_points * 8;
175 __m128 xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
179 int bound = num_bytes >> 5;
186 for (;
i < bound; ++
i) {
203 if (num_bytes >> 4 & 1) {
220 if (num_bytes >> 3 & 1) {
222 diff = src0[0] - points[0];
238 unsigned int num_points)
240 const unsigned int quarter_points = num_points / 4;
243 float32x4x2_t a_vec, b_vec;
244 float32x4x2_t diff_vec;
245 float32x4_t tmp, tmp1, dist_sq;
246 a_vec.val[0] = vdupq_n_f32(
lv_creal(src0[0]));
247 a_vec.val[1] = vdupq_n_f32(
lv_cimag(src0[0]));
248 for (number = 0; number < quarter_points; ++number) {
249 b_vec = vld2q_f32((
float*)points);
250 diff_vec.val[0] = vsubq_f32(a_vec.val[0], b_vec.val[0]);
251 diff_vec.val[1] = vsubq_f32(a_vec.val[1], b_vec.val[1]);
252 tmp = vmulq_f32(diff_vec.val[0], diff_vec.val[0]);
253 tmp1 = vmulq_f32(diff_vec.val[1], diff_vec.val[1]);
255 dist_sq = vaddq_f32(tmp, tmp1);
256 vst1q_f32(target, dist_sq);
260 for (number = quarter_points * 4; number < num_points; ++number) {
268#ifdef LV_HAVE_GENERIC
272 unsigned int num_points)
274 const unsigned int num_bytes = num_points * 8;
280 for (; i<num_bytes>> 3; ++
i) {
281 diff = src0[0] - points[
i];
294#ifndef INCLUDED_volk_32fc_x2_square_dist_32f_u_H
295#define INCLUDED_volk_32fc_x2_square_dist_32f_u_H
302#include <immintrin.h>
304static inline void volk_32fc_x2_square_dist_32f_u_avx2(
float* target,
307 unsigned int num_points)
309 const unsigned int num_bytes = num_points * 8;
311 __m256 xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
315 int bound = num_bytes >> 6;
316 int leftovers1 = (num_bytes >> 3) & 0b11;
319 __m256i idx = _mm256_set_epi32(7, 6, 3, 2, 5, 4, 1, 0);
320 xmm1 = _mm256_setzero_ps();
322 xmm0 = _mm_permute_ps(xmm0, 0b01000100);
323 xmm1 = _mm256_insertf128_ps(xmm1, xmm0, 0);
324 xmm1 = _mm256_insertf128_ps(xmm1, xmm0, 1);
326 for (;
i < bound; ++
i) {
327 xmm2 = _mm256_loadu_ps((
float*)&points[0]);
328 xmm3 = _mm256_loadu_ps((
float*)&points[4]);
331 xmm4 = _mm256_sub_ps(xmm1, xmm2);
332 xmm5 = _mm256_sub_ps(xmm1, xmm3);
333 xmm6 = _mm256_mul_ps(xmm4, xmm4);
334 xmm7 = _mm256_mul_ps(xmm5, xmm5);
336 xmm4 = _mm256_hadd_ps(xmm6, xmm7);
337 xmm4 = _mm256_permutevar8x32_ps(xmm4, idx);
339 _mm256_storeu_ps(target, xmm4);
344 if (num_bytes >> 5 & 1) {
346 xmm2 = _mm256_loadu_ps((
float*)&points[0]);
348 xmm4 = _mm256_sub_ps(xmm1, xmm2);
352 xmm6 = _mm256_mul_ps(xmm4, xmm4);
354 xmm4 = _mm256_hadd_ps(xmm6, xmm6);
355 xmm4 = _mm256_permutevar8x32_ps(xmm4, idx);
357 xmm9 = _mm256_extractf128_ps(xmm4, 1);
363 for (
i = 0;
i < leftovers1; ++
i) {
365 diff = src0[0] - points[0];
FORCE_INLINE __m128 _mm_sub_ps(__m128 a, __m128 b)
Definition: sse2neon.h:2834
float32x4_t __m128
Definition: sse2neon.h:235
FORCE_INLINE __m128 _mm_hadd_ps(__m128 a, __m128 b)
Definition: sse2neon.h:6527
FORCE_INLINE void _mm_storeu_ps(float *p, __m128 a)
Definition: sse2neon.h:2787
FORCE_INLINE __m128 _mm_mul_ps(__m128 a, __m128 b)
Definition: sse2neon.h:2205
FORCE_INLINE __m128 _mm_loadl_pi(__m128 a, __m64 const *p)
Definition: sse2neon.h:1917
FORCE_INLINE __m128 _mm_loadu_ps(const float *p)
Definition: sse2neon.h:1941
FORCE_INLINE __m128 _mm_movelh_ps(__m128 __A, __m128 __B)
Definition: sse2neon.h:2145
FORCE_INLINE __m128 _mm_setzero_ps(void)
Definition: sse2neon.h:2531
int64x1_t __m64
Definition: sse2neon.h:234
FORCE_INLINE __m128 _mm_load_ps(const float *p)
Definition: sse2neon.h:1858
FORCE_INLINE void _mm_store_ps(float *p, __m128 a)
Definition: sse2neon.h:2704
FORCE_INLINE void _mm_storeh_pi(__m64 *p, __m128 a)
Definition: sse2neon.h:2751
static void volk_32fc_x2_square_dist_32f_generic(float *target, lv_32fc_t *src0, lv_32fc_t *points, unsigned int num_points)
Definition: volk_32fc_x2_square_dist_32f.h:269
static void volk_32fc_x2_square_dist_32f_neon(float *target, lv_32fc_t *src0, lv_32fc_t *points, unsigned int num_points)
Definition: volk_32fc_x2_square_dist_32f.h:235
static void volk_32fc_x2_square_dist_32f_a_sse3(float *target, lv_32fc_t *src0, lv_32fc_t *points, unsigned int num_points)
Definition: volk_32fc_x2_square_dist_32f.h:168
#define lv_cimag(x)
Definition: volk_complex.h:98
#define lv_creal(x)
Definition: volk_complex.h:96
float complex lv_32fc_t
Definition: volk_complex.h:74
for i
Definition: volk_config_fixed.tmpl.h:13