aboutsummaryrefslogtreecommitdiffstats
path: root/host/lib/convert/sse2_sc8_to_fc64.cpp
diff options
context:
space:
mode:
authorMartin Braun <martin.braun@ettus.com>2019-07-18 15:36:11 -0700
committerMartin Braun <martin.braun@ettus.com>2019-11-26 11:49:10 -0800
commit9df26a9d89ef8fb50a667428066f3ef1732245c9 (patch)
treeaa8aa5adf1c40e0aecb3e45a527511af96e05ca1 /host/lib/convert/sse2_sc8_to_fc64.cpp
parentfed32af0806a730e0f4202003dc49cb736c832fb (diff)
downloaduhd-9df26a9d89ef8fb50a667428066f3ef1732245c9.tar.gz
uhd-9df26a9d89ef8fb50a667428066f3ef1732245c9.tar.bz2
uhd-9df26a9d89ef8fb50a667428066f3ef1732245c9.zip
convert: sse2: Apply clang-format
Diffstat (limited to 'host/lib/convert/sse2_sc8_to_fc64.cpp')
-rw-r--r--host/lib/convert/sse2_sc8_to_fc64.cpp168
1 files changed, 86 insertions, 82 deletions
diff --git a/host/lib/convert/sse2_sc8_to_fc64.cpp b/host/lib/convert/sse2_sc8_to_fc64.cpp
index f5b406152..3cc2fefd0 100644
--- a/host/lib/convert/sse2_sc8_to_fc64.cpp
+++ b/host/lib/convert/sse2_sc8_to_fc64.cpp
@@ -13,129 +13,133 @@ using namespace uhd::convert;
static const __m128i zeroi = _mm_setzero_si128();
-UHD_INLINE void unpack_sc32_8x(
- const __m128i &in,
- __m128d &out0, __m128d &out1,
- __m128d &out2, __m128d &out3,
- __m128d &out4, __m128d &out5,
- __m128d &out6, __m128d &out7,
- const __m128d &scalar
-){
+UHD_INLINE void unpack_sc32_8x(const __m128i& in,
+ __m128d& out0,
+ __m128d& out1,
+ __m128d& out2,
+ __m128d& out3,
+ __m128d& out4,
+ __m128d& out5,
+ __m128d& out6,
+ __m128d& out7,
+ const __m128d& scalar)
+{
const int shuf = _MM_SHUFFLE(1, 0, 3, 2);
__m128i tmp;
const __m128i tmplo = _mm_unpacklo_epi8(zeroi, in); /* value in upper 8 bits */
- tmp = _mm_unpacklo_epi16(zeroi, tmplo); /* value in upper 16 bits */
- out0 = _mm_mul_pd(_mm_cvtepi32_pd(tmp), scalar);
- tmp = _mm_shuffle_epi32(tmp, shuf);
- out1 = _mm_mul_pd(_mm_cvtepi32_pd(tmp), scalar);
- tmp = _mm_unpackhi_epi16(zeroi, tmplo);
- out2 = _mm_mul_pd(_mm_cvtepi32_pd(tmp), scalar);
- tmp = _mm_shuffle_epi32(tmp, shuf);
- out3 = _mm_mul_pd(_mm_cvtepi32_pd(tmp), scalar);
+ tmp = _mm_unpacklo_epi16(zeroi, tmplo); /* value in upper 16 bits */
+ out0 = _mm_mul_pd(_mm_cvtepi32_pd(tmp), scalar);
+ tmp = _mm_shuffle_epi32(tmp, shuf);
+ out1 = _mm_mul_pd(_mm_cvtepi32_pd(tmp), scalar);
+ tmp = _mm_unpackhi_epi16(zeroi, tmplo);
+ out2 = _mm_mul_pd(_mm_cvtepi32_pd(tmp), scalar);
+ tmp = _mm_shuffle_epi32(tmp, shuf);
+ out3 = _mm_mul_pd(_mm_cvtepi32_pd(tmp), scalar);
const __m128i tmphi = _mm_unpackhi_epi8(zeroi, in);
- tmp = _mm_unpacklo_epi16(zeroi, tmphi);
- out4 = _mm_mul_pd(_mm_cvtepi32_pd(tmp), scalar);
- tmp = _mm_shuffle_epi32(tmp, shuf);
- out5 = _mm_mul_pd(_mm_cvtepi32_pd(tmp), scalar);
- tmp = _mm_unpackhi_epi16(zeroi, tmphi);
- out6 = _mm_mul_pd(_mm_cvtepi32_pd(tmp), scalar);
- tmp = _mm_shuffle_epi32(tmp, shuf);
- out7 = _mm_mul_pd(_mm_cvtepi32_pd(tmp), scalar);
+ tmp = _mm_unpacklo_epi16(zeroi, tmphi);
+ out4 = _mm_mul_pd(_mm_cvtepi32_pd(tmp), scalar);
+ tmp = _mm_shuffle_epi32(tmp, shuf);
+ out5 = _mm_mul_pd(_mm_cvtepi32_pd(tmp), scalar);
+ tmp = _mm_unpackhi_epi16(zeroi, tmphi);
+ out6 = _mm_mul_pd(_mm_cvtepi32_pd(tmp), scalar);
+ tmp = _mm_shuffle_epi32(tmp, shuf);
+ out7 = _mm_mul_pd(_mm_cvtepi32_pd(tmp), scalar);
}
-DECLARE_CONVERTER(sc8_item32_be, 1, fc64, 1, PRIORITY_SIMD){
- const item32_t *input = reinterpret_cast<const item32_t *>(size_t(inputs[0]) & ~0x3);
- fc64_t *output = reinterpret_cast<fc64_t *>(outputs[0]);
+DECLARE_CONVERTER(sc8_item32_be, 1, fc64, 1, PRIORITY_SIMD)
+{
+ const item32_t* input = reinterpret_cast<const item32_t*>(size_t(inputs[0]) & ~0x3);
+ fc64_t* output = reinterpret_cast<fc64_t*>(outputs[0]);
- const __m128d scalar = _mm_set1_pd(scale_factor/(1 << 24));
+ const __m128d scalar = _mm_set1_pd(scale_factor / (1 << 24));
size_t i = 0, j = 0;
fc32_t dummy;
size_t num_samps = nsamps;
- if ((size_t(inputs[0]) & 0x3) != 0){
+ if ((size_t(inputs[0]) & 0x3) != 0) {
item32_sc8_to_xx<uhd::ntohx>(input++, output++, 1, scale_factor);
num_samps--;
}
- #define convert_sc8_item32_1_to_fc64_1_bswap_guts(_al_) \
- for (; j+7 < num_samps; j+=8, i+=4){ \
- /* load from input */ \
- __m128i tmpi = _mm_loadu_si128(reinterpret_cast<const __m128i *>(input+i)); \
- \
- /* unpack */ \
- __m128d tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; \
+#define convert_sc8_item32_1_to_fc64_1_bswap_guts(_al_) \
+ for (; j + 7 < num_samps; j += 8, i += 4) { \
+ /* load from input */ \
+ __m128i tmpi = _mm_loadu_si128(reinterpret_cast<const __m128i*>(input + i)); \
+ \
+ /* unpack */ \
+ __m128d tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; \
unpack_sc32_8x(tmpi, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, scalar); \
- \
- /* store to output */ \
- _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+0), tmp0); \
- _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+1), tmp1); \
- _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+2), tmp2); \
- _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+3), tmp3); \
- _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+4), tmp4); \
- _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+5), tmp5); \
- _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+6), tmp6); \
- _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+7), tmp7); \
+ \
+ /* store to output */ \
+ _mm_store##_al_##pd(reinterpret_cast<double*>(output + j + 0), tmp0); \
+ _mm_store##_al_##pd(reinterpret_cast<double*>(output + j + 1), tmp1); \
+ _mm_store##_al_##pd(reinterpret_cast<double*>(output + j + 2), tmp2); \
+ _mm_store##_al_##pd(reinterpret_cast<double*>(output + j + 3), tmp3); \
+ _mm_store##_al_##pd(reinterpret_cast<double*>(output + j + 4), tmp4); \
+ _mm_store##_al_##pd(reinterpret_cast<double*>(output + j + 5), tmp5); \
+ _mm_store##_al_##pd(reinterpret_cast<double*>(output + j + 6), tmp6); \
+ _mm_store##_al_##pd(reinterpret_cast<double*>(output + j + 7), tmp7); \
}
- //dispatch according to alignment
- if ((size_t(output) & 0xf) == 0){
+ // dispatch according to alignment
+ if ((size_t(output) & 0xf) == 0) {
convert_sc8_item32_1_to_fc64_1_bswap_guts(_)
- }
- else{
+ } else {
convert_sc8_item32_1_to_fc64_1_bswap_guts(u_)
}
- //convert remainder
- item32_sc8_to_xx<uhd::ntohx>(input+i, output+j, num_samps-j, scale_factor);
+ // convert remainder
+ item32_sc8_to_xx<uhd::ntohx>(input + i, output + j, num_samps - j, scale_factor);
}
-DECLARE_CONVERTER(sc8_item32_le, 1, fc64, 1, PRIORITY_SIMD){
- const item32_t *input = reinterpret_cast<const item32_t *>(size_t(inputs[0]) & ~0x3);
- fc64_t *output = reinterpret_cast<fc64_t *>(outputs[0]);
+DECLARE_CONVERTER(sc8_item32_le, 1, fc64, 1, PRIORITY_SIMD)
+{
+ const item32_t* input = reinterpret_cast<const item32_t*>(size_t(inputs[0]) & ~0x3);
+ fc64_t* output = reinterpret_cast<fc64_t*>(outputs[0]);
- const __m128d scalar = _mm_set1_pd(scale_factor/(1 << 24));
+ const __m128d scalar = _mm_set1_pd(scale_factor / (1 << 24));
size_t i = 0, j = 0;
fc32_t dummy;
size_t num_samps = nsamps;
- if ((size_t(inputs[0]) & 0x3) != 0){
+ if ((size_t(inputs[0]) & 0x3) != 0) {
item32_sc8_to_xx<uhd::wtohx>(input++, output++, 1, scale_factor);
num_samps--;
}
- #define convert_sc8_item32_1_to_fc64_1_nswap_guts(_al_) \
- for (; j+7 < num_samps; j+=8, i+=4){ \
- /* load from input */ \
- __m128i tmpi = _mm_loadu_si128(reinterpret_cast<const __m128i *>(input+i)); \
- \
- /* unpack */ \
- __m128d tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; \
- tmpi = _mm_or_si128(_mm_srli_epi16(tmpi, 8), _mm_slli_epi16(tmpi, 8)); /*byteswap*/\
- unpack_sc32_8x(tmpi, tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, tmp7, tmp6, scalar); \
- \
- /* store to output */ \
- _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+0), tmp0); \
- _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+1), tmp1); \
- _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+2), tmp2); \
- _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+3), tmp3); \
- _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+4), tmp4); \
- _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+5), tmp5); \
- _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+6), tmp6); \
- _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+7), tmp7); \
+#define convert_sc8_item32_1_to_fc64_1_nswap_guts(_al_) \
+ for (; j + 7 < num_samps; j += 8, i += 4) { \
+ /* load from input */ \
+ __m128i tmpi = _mm_loadu_si128(reinterpret_cast<const __m128i*>(input + i)); \
+ \
+ /* unpack */ \
+ __m128d tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; \
+ tmpi = \
+ _mm_or_si128(_mm_srli_epi16(tmpi, 8), _mm_slli_epi16(tmpi, 8)); /*byteswap*/ \
+ unpack_sc32_8x(tmpi, tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, tmp7, tmp6, scalar); \
+ \
+ /* store to output */ \
+ _mm_store##_al_##pd(reinterpret_cast<double*>(output + j + 0), tmp0); \
+ _mm_store##_al_##pd(reinterpret_cast<double*>(output + j + 1), tmp1); \
+ _mm_store##_al_##pd(reinterpret_cast<double*>(output + j + 2), tmp2); \
+ _mm_store##_al_##pd(reinterpret_cast<double*>(output + j + 3), tmp3); \
+ _mm_store##_al_##pd(reinterpret_cast<double*>(output + j + 4), tmp4); \
+ _mm_store##_al_##pd(reinterpret_cast<double*>(output + j + 5), tmp5); \
+ _mm_store##_al_##pd(reinterpret_cast<double*>(output + j + 6), tmp6); \
+ _mm_store##_al_##pd(reinterpret_cast<double*>(output + j + 7), tmp7); \
}
- //dispatch according to alignment
- if ((size_t(output) & 0xf) == 0){
+ // dispatch according to alignment
+ if ((size_t(output) & 0xf) == 0) {
convert_sc8_item32_1_to_fc64_1_nswap_guts(_)
- }
- else{
+ } else {
convert_sc8_item32_1_to_fc64_1_nswap_guts(u_)
}
- //convert remainder
- item32_sc8_to_xx<uhd::wtohx>(input+i, output+j, num_samps-j, scale_factor);
+ // convert remainder
+ item32_sc8_to_xx<uhd::wtohx>(input + i, output + j, num_samps - j, scale_factor);
}