42#ifndef INCLUDED_volk_16i_x4_quad_max_star_16i_a_H
43#define INCLUDED_volk_16i_x4_quad_max_star_16i_a_H
57 unsigned int num_points)
59 const unsigned int num_bytes = num_points * 2;
63 int bound = (num_bytes >> 4);
64 int bound_copy = bound;
65 int leftovers = (num_bytes >> 1) & 7;
67 __m128i *p_target, *p_src0, *p_src1, *p_src2, *p_src3;
74 __m128i xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
76 while (bound_copy > 0) {
125 for (
i = bound * 8;
i < (bound * 8) + leftovers; ++
i) {
126 temp0 = ((short)(src0[
i] - src1[
i]) > 0) ? src0[
i] : src1[
i];
127 temp1 = ((short)(src2[
i] - src3[
i]) > 0) ? src2[
i] : src3[
i];
128 target[
i] = ((short)(temp0 - temp1) > 0) ? temp0 : temp1;
144 unsigned int num_points)
146 const unsigned int eighth_points = num_points / 8;
149 int16x8_t src0_vec, src1_vec, src2_vec, src3_vec;
150 int16x8_t diff12, diff34;
151 int16x8_t comp0, comp1, comp2, comp3;
152 int16x8_t result1_vec, result2_vec;
154 zeros = vdupq_n_s16(0);
155 for (
i = 0;
i < eighth_points; ++
i) {
156 src0_vec = vld1q_s16(src0);
157 src1_vec = vld1q_s16(src1);
158 src2_vec = vld1q_s16(src2);
159 src3_vec = vld1q_s16(src3);
160 diff12 = vsubq_s16(src0_vec, src1_vec);
161 diff34 = vsubq_s16(src2_vec, src3_vec);
162 comp0 = (int16x8_t)vcgeq_s16(diff12, zeros);
163 comp1 = (int16x8_t)vcltq_s16(diff12, zeros);
164 comp2 = (int16x8_t)vcgeq_s16(diff34, zeros);
165 comp3 = (int16x8_t)vcltq_s16(diff34, zeros);
166 comp0 = vandq_s16(src0_vec, comp0);
167 comp1 = vandq_s16(src1_vec, comp1);
168 comp2 = vandq_s16(src2_vec, comp2);
169 comp3 = vandq_s16(src3_vec, comp3);
171 result1_vec = vaddq_s16(comp0, comp1);
172 result2_vec = vaddq_s16(comp2, comp3);
174 diff12 = vsubq_s16(result1_vec, result2_vec);
175 comp0 = (int16x8_t)vcgeq_s16(diff12, zeros);
176 comp1 = (int16x8_t)vcltq_s16(diff12, zeros);
177 comp0 = vandq_s16(result1_vec, comp0);
178 comp1 = vandq_s16(result2_vec, comp1);
179 result1_vec = vaddq_s16(comp0, comp1);
180 vst1q_s16(target, result1_vec);
190 for (
i = eighth_points * 8;
i < num_points; ++
i) {
191 temp0 = ((short)(*src0 - *src1) > 0) ? *src0 : *src1;
192 temp1 = ((short)(*src2 - *src3) > 0) ? *src2 : *src3;
193 *target++ = ((short)(temp0 - temp1) > 0) ? temp0 : temp1;
203#ifdef LV_HAVE_GENERIC
209 unsigned int num_points)
211 const unsigned int num_bytes = num_points * 2;
215 int bound = num_bytes >> 1;
219 for (
i = 0;
i < bound; ++
i) {
220 temp0 = ((short)(src0[
i] - src1[
i]) > 0) ? src0[
i] : src1[
i];
221 temp1 = ((short)(src2[
i] - src3[
i]) > 0) ? src2[
i] : src3[
i];
222 target[
i] = ((short)(temp0 - temp1) > 0) ? temp0 : temp1;
FORCE_INLINE void _mm_store_si128(__m128i *p, __m128i a)
Definition: sse2neon.h:5452
FORCE_INLINE __m128i _mm_andnot_si128(__m128i a, __m128i b)
Definition: sse2neon.h:3053
FORCE_INLINE __m128i _mm_setzero_si128(void)
Definition: sse2neon.h:5043
FORCE_INLINE __m128i _mm_and_si128(__m128i, __m128i)
Definition: sse2neon.h:3034
FORCE_INLINE __m128i _mm_add_epi16(__m128i a, __m128i b)
Definition: sse2neon.h:2912
FORCE_INLINE __m128i _mm_xor_si128(__m128i a, __m128i b)
Definition: sse2neon.h:5880
FORCE_INLINE __m128i _mm_load_si128(const __m128i *p)
Definition: sse2neon.h:4260
FORCE_INLINE __m128i _mm_sub_epi16(__m128i a, __m128i b)
Definition: sse2neon.h:5579
FORCE_INLINE __m128i _mm_cmpgt_epi16(__m128i a, __m128i b)
Definition: sse2neon.h:3263
int64x2_t __m128i
Definition: sse2neon.h:375
static void volk_16i_x4_quad_max_star_16i_generic(short *target, short *src0, short *src1, short *src2, short *src3, unsigned int num_points)
Definition: volk_16i_x4_quad_max_star_16i.h:204
static void volk_16i_x4_quad_max_star_16i_neon(short *target, short *src0, short *src1, short *src2, short *src3, unsigned int num_points)
Definition: volk_16i_x4_quad_max_star_16i.h:139
static void volk_16i_x4_quad_max_star_16i_a_sse2(short *target, short *src0, short *src1, short *src2, short *src3, unsigned int num_points)
Definition: volk_16i_x4_quad_max_star_16i.h:52
for i
Definition: volk_config_fixed.tmpl.h:13