Displaying 7 results from an estimated 7 matches for "_mm_slli_si128".
2020 May 18
6
[PATCH] SSE2/SSSE3 optimized version of get_checksum1() for x86-64
...__
+static inline __m128i sse_interleave_odd_epi16(__m128i a, __m128i b) {
+ return _mm_packs_epi32(
+ _mm_srai_epi32(a, 16),
+ _mm_srai_epi32(b, 16)
+ );
+}
+
+static inline __m128i sse_interleave_even_epi16(__m128i a, __m128i b) {
+ return sse_interleave_odd_epi16(
+ _mm_slli_si128(a, 2),
+ _mm_slli_si128(b, 2)
+ );
+}
+
+static inline __m128i sse_mulu_odd_epi8(__m128i a, __m128i b) {
+ return _mm_mullo_epi16(
+ _mm_srli_epi16(a, 8),
+ _mm_srai_epi16(b, 8)
+ );
+}
+
+static inline __m128i sse_mulu_even_epi8(__m128i a, __m128i b) {
+ return _mm...
2020 May 18
0
[PATCH] SSE2/SSSE3 optimized version of get_checksum1() for x86-64
...epi16(__m128i a, __m128i b) {
> + return _mm_packs_epi32(
> + _mm_srai_epi32(a, 16),
> + _mm_srai_epi32(b, 16)
> + );
> +}
> +
> +static inline __m128i sse_interleave_even_epi16(__m128i a, __m128i b) {
> + return sse_interleave_odd_epi16(
> + _mm_slli_si128(a, 2),
> + _mm_slli_si128(b, 2)
> + );
> +}
> +
> +static inline __m128i sse_mulu_odd_epi8(__m128i a, __m128i b) {
> + return _mm_mullo_epi16(
> + _mm_srli_epi16(a, 8),
> + _mm_srai_epi16(b, 8)
> + );
> +}
> +
> +static inline __m128...
2020 May 19
5
[PATCHv2] SSE2/SSSE3 optimized version of get_checksum1() for x86-64
..._epi16(__m128i a, __m128i b) {
+ return _mm_packs_epi32(
+ _mm_srai_epi32(a, 16),
+ _mm_srai_epi32(b, 16)
+ );
+}
+
+__attribute__ ((target ("sse2"))) static inline __m128i
sse_interleave_even_epi16(__m128i a, __m128i b) {
+ return sse_interleave_odd_epi16(
+ _mm_slli_si128(a, 2),
+ _mm_slli_si128(b, 2)
+ );
+}
+
+__attribute__ ((target ("sse2"))) static inline __m128i
sse_mulu_odd_epi8(__m128i a, __m128i b) {
+ return _mm_mullo_epi16(
+ _mm_srli_epi16(a, 8),
+ _mm_srai_epi16(b, 8)
+ );
+}
+
+__attribute__ ((target ("sse2&q...
2020 May 18
2
[PATCH] SSE2/SSSE3 optimized version of get_checksum1() for x86-64
...t; + return _mm_packs_epi32(
>> + _mm_srai_epi32(a, 16),
>> + _mm_srai_epi32(b, 16)
>> + );
>> +}
>> +
>> +static inline __m128i sse_interleave_even_epi16(__m128i a, __m128i b) {
>> + return sse_interleave_odd_epi16(
>> + _mm_slli_si128(a, 2),
>> + _mm_slli_si128(b, 2)
>> + );
>> +}
>> +
>> +static inline __m128i sse_mulu_odd_epi8(__m128i a, __m128i b) {
>> + return _mm_mullo_epi16(
>> + _mm_srli_epi16(a, 8),
>> + _mm_srai_epi16(b, 8)
>> + );
>&...
2020 May 20
0
[PATCHv2] SSE2/SSSE3 optimized version of get_checksum1() for x86-64
...m_packs_epi32(
> + _mm_srai_epi32(a, 16),
> + _mm_srai_epi32(b, 16)
> + );
> +}
> +
> +__attribute__ ((target ("sse2"))) static inline __m128i
> sse_interleave_even_epi16(__m128i a, __m128i b) {
> + return sse_interleave_odd_epi16(
> + _mm_slli_si128(a, 2),
> + _mm_slli_si128(b, 2)
> + );
> +}
> +
> +__attribute__ ((target ("sse2"))) static inline __m128i
> sse_mulu_odd_epi8(__m128i a, __m128i b) {
> + return _mm_mullo_epi16(
> + _mm_srli_epi16(a, 8),
> + _mm_srai_epi16(b, 8)
>...
2020 May 18
3
[PATCH] SSE2/SSSE3 optimized version of get_checksum1() for x86-64
What do you base this on?
Per https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html :
"For the x86-32 compiler, you must use -march=cpu-type, -msse or
-msse2 switches to enable SSE extensions and make this option
effective. For the x86-64 compiler, these extensions are enabled by
default."
That reads to me like we're fine for SSE2. As stated in my comments,
SSSE3 support must be
2014 Sep 23
2
[LLVMdev] Please benchmark new x86 vector shuffle lowering, planning to make it the default very soon!
On Sun, Sep 21, 2014 at 1:15 PM, Simon Pilgrim <llvm-dev at redking.me.uk>
wrote:
> On 20 Sep 2014, at 19:44, Chandler Carruth <chandlerc at google.com> wrote:
>
> > If AVX is available I would expect the vpermilps/vpermilpd instruction
> to be used for all float/double single vector shuffles, especially as it
> can deal with the folded load case as well - this would