3769 lines
178 KiB
LLVM
3769 lines
178 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE,SSSE3
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX512
|
|
|
|
declare {<1 x i32>, <1 x i1>} @llvm.umul.with.overflow.v1i32(<1 x i32>, <1 x i32>)
|
|
declare {<2 x i32>, <2 x i1>} @llvm.umul.with.overflow.v2i32(<2 x i32>, <2 x i32>)
|
|
declare {<3 x i32>, <3 x i1>} @llvm.umul.with.overflow.v3i32(<3 x i32>, <3 x i32>)
|
|
declare {<4 x i32>, <4 x i1>} @llvm.umul.with.overflow.v4i32(<4 x i32>, <4 x i32>)
|
|
declare {<6 x i32>, <6 x i1>} @llvm.umul.with.overflow.v6i32(<6 x i32>, <6 x i32>)
|
|
declare {<8 x i32>, <8 x i1>} @llvm.umul.with.overflow.v8i32(<8 x i32>, <8 x i32>)
|
|
declare {<16 x i32>, <16 x i1>} @llvm.umul.with.overflow.v16i32(<16 x i32>, <16 x i32>)
|
|
|
|
declare {<16 x i8>, <16 x i1>} @llvm.umul.with.overflow.v16i8(<16 x i8>, <16 x i8>)
|
|
declare {<32 x i8>, <32 x i1>} @llvm.umul.with.overflow.v32i8(<32 x i8>, <32 x i8>)
|
|
declare {<64 x i8>, <64 x i1>} @llvm.umul.with.overflow.v64i8(<64 x i8>, <64 x i8>)
|
|
declare {<8 x i16>, <8 x i1>} @llvm.umul.with.overflow.v8i16(<8 x i16>, <8 x i16>)
|
|
declare {<2 x i64>, <2 x i1>} @llvm.umul.with.overflow.v2i64(<2 x i64>, <2 x i64>)
|
|
|
|
declare {<4 x i24>, <4 x i1>} @llvm.umul.with.overflow.v4i24(<4 x i24>, <4 x i24>)
|
|
declare {<4 x i1>, <4 x i1>} @llvm.umul.with.overflow.v4i1(<4 x i1>, <4 x i1>)
|
|
declare {<2 x i128>, <2 x i1>} @llvm.umul.with.overflow.v2i128(<2 x i128>, <2 x i128>)
|
|
|
|
define <1 x i32> @umulo_v1i32(<1 x i32> %a0, <1 x i32> %a1, <1 x i32>* %p2) nounwind {
|
|
; SSE-LABEL: umulo_v1i32:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: movq %rdx, %rcx
|
|
; SSE-NEXT: movl %edi, %eax
|
|
; SSE-NEXT: xorl %edi, %edi
|
|
; SSE-NEXT: mull %esi
|
|
; SSE-NEXT: seto %dil
|
|
; SSE-NEXT: negl %edi
|
|
; SSE-NEXT: movl %eax, (%rcx)
|
|
; SSE-NEXT: movl %edi, %eax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX-LABEL: umulo_v1i32:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: movq %rdx, %rcx
|
|
; AVX-NEXT: movl %edi, %eax
|
|
; AVX-NEXT: xorl %edi, %edi
|
|
; AVX-NEXT: mull %esi
|
|
; AVX-NEXT: seto %dil
|
|
; AVX-NEXT: negl %edi
|
|
; AVX-NEXT: movl %eax, (%rcx)
|
|
; AVX-NEXT: movl %edi, %eax
|
|
; AVX-NEXT: retq
|
|
%t = call {<1 x i32>, <1 x i1>} @llvm.umul.with.overflow.v1i32(<1 x i32> %a0, <1 x i32> %a1)
|
|
%val = extractvalue {<1 x i32>, <1 x i1>} %t, 0
|
|
%obit = extractvalue {<1 x i32>, <1 x i1>} %t, 1
|
|
%res = sext <1 x i1> %obit to <1 x i32>
|
|
store <1 x i32> %val, <1 x i32>* %p2
|
|
ret <1 x i32> %res
|
|
}
|
|
|
|
define <2 x i32> @umulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) nounwind {
|
|
; SSE2-LABEL: umulo_v2i32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm4
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,3,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
|
|
; SSE2-NEXT: pxor %xmm2, %xmm2
|
|
; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
|
|
; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
|
|
; SSE2-NEXT: pxor %xmm2, %xmm1
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
|
|
; SSE2-NEXT: movq %xmm0, (%rdi)
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSSE3-LABEL: umulo_v2i32:
|
|
; SSSE3: # %bb.0:
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
|
; SSSE3-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
|
|
; SSSE3-NEXT: pmuludq %xmm2, %xmm4
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,3,2,3]
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
|
|
; SSSE3-NEXT: pxor %xmm2, %xmm2
|
|
; SSSE3-NEXT: pcmpeqd %xmm3, %xmm2
|
|
; SSSE3-NEXT: pcmpeqd %xmm1, %xmm1
|
|
; SSSE3-NEXT: pxor %xmm2, %xmm1
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
|
|
; SSSE3-NEXT: movq %xmm0, (%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm1, %xmm0
|
|
; SSSE3-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: umulo_v2i32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
|
; SSE41-NEXT: pmuludq %xmm2, %xmm3
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE41-NEXT: pmuludq %xmm1, %xmm2
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
|
|
; SSE41-NEXT: pxor %xmm3, %xmm3
|
|
; SSE41-NEXT: pcmpeqd %xmm2, %xmm3
|
|
; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
|
|
; SSE41-NEXT: pxor %xmm3, %xmm2
|
|
; SSE41-NEXT: pmulld %xmm1, %xmm0
|
|
; SSE41-NEXT: movq %xmm0, (%rdi)
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: umulo_v2i32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
|
; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
|
|
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovq %xmm0, (%rdi)
|
|
; AVX1-NEXT: vmovdqa %xmm2, %xmm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: umulo_v2i32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
|
; AVX2-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
|
|
; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
|
|
; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3]
|
|
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpxor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovq %xmm0, (%rdi)
|
|
; AVX2-NEXT: vmovdqa %xmm2, %xmm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: umulo_v2i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
|
|
; AVX512-NEXT: vpmuludq %xmm3, %xmm4, %xmm3
|
|
; AVX512-NEXT: vmovdqa {{.*#+}} xmm4 = [1,5,3,7]
|
|
; AVX512-NEXT: vpermi2d %xmm3, %xmm2, %xmm4
|
|
; AVX512-NEXT: vptestmd %xmm4, %xmm4, %k1
|
|
; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm1
|
|
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
|
|
; AVX512-NEXT: vmovq %xmm1, (%rdi)
|
|
; AVX512-NEXT: retq
|
|
%t = call {<2 x i32>, <2 x i1>} @llvm.umul.with.overflow.v2i32(<2 x i32> %a0, <2 x i32> %a1)
|
|
%val = extractvalue {<2 x i32>, <2 x i1>} %t, 0
|
|
%obit = extractvalue {<2 x i32>, <2 x i1>} %t, 1
|
|
%res = sext <2 x i1> %obit to <2 x i32>
|
|
store <2 x i32> %val, <2 x i32>* %p2
|
|
ret <2 x i32> %res
|
|
}
|
|
|
|
define <3 x i32> @umulo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) nounwind {
|
|
; SSE2-LABEL: umulo_v3i32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm4
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,3,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
|
|
; SSE2-NEXT: pxor %xmm2, %xmm2
|
|
; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
|
|
; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
|
|
; SSE2-NEXT: pxor %xmm2, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
|
|
; SSE2-NEXT: movd %xmm2, 8(%rdi)
|
|
; SSE2-NEXT: movq %xmm0, (%rdi)
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSSE3-LABEL: umulo_v3i32:
|
|
; SSSE3: # %bb.0:
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
|
; SSSE3-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
|
|
; SSSE3-NEXT: pmuludq %xmm2, %xmm4
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,3,2,3]
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
|
|
; SSSE3-NEXT: pxor %xmm2, %xmm2
|
|
; SSSE3-NEXT: pcmpeqd %xmm3, %xmm2
|
|
; SSSE3-NEXT: pcmpeqd %xmm1, %xmm1
|
|
; SSSE3-NEXT: pxor %xmm2, %xmm1
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
|
|
; SSSE3-NEXT: movd %xmm2, 8(%rdi)
|
|
; SSSE3-NEXT: movq %xmm0, (%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm1, %xmm0
|
|
; SSSE3-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: umulo_v3i32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
|
; SSE41-NEXT: pmuludq %xmm2, %xmm3
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE41-NEXT: pmuludq %xmm1, %xmm2
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
|
|
; SSE41-NEXT: pxor %xmm3, %xmm3
|
|
; SSE41-NEXT: pcmpeqd %xmm2, %xmm3
|
|
; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
|
|
; SSE41-NEXT: pxor %xmm3, %xmm2
|
|
; SSE41-NEXT: pmulld %xmm1, %xmm0
|
|
; SSE41-NEXT: pextrd $2, %xmm0, 8(%rdi)
|
|
; SSE41-NEXT: movq %xmm0, (%rdi)
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: umulo_v3i32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
|
; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
|
|
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpextrd $2, %xmm0, 8(%rdi)
|
|
; AVX1-NEXT: vmovq %xmm0, (%rdi)
|
|
; AVX1-NEXT: vmovdqa %xmm2, %xmm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: umulo_v3i32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
|
; AVX2-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
|
|
; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
|
|
; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3]
|
|
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpxor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpextrd $2, %xmm0, 8(%rdi)
|
|
; AVX2-NEXT: vmovq %xmm0, (%rdi)
|
|
; AVX2-NEXT: vmovdqa %xmm2, %xmm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: umulo_v3i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
|
|
; AVX512-NEXT: vpmuludq %xmm3, %xmm4, %xmm3
|
|
; AVX512-NEXT: vmovdqa {{.*#+}} xmm4 = [1,5,3,7]
|
|
; AVX512-NEXT: vpermi2d %xmm3, %xmm2, %xmm4
|
|
; AVX512-NEXT: vptestmd %xmm4, %xmm4, %k1
|
|
; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm1
|
|
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
|
|
; AVX512-NEXT: vpextrd $2, %xmm1, 8(%rdi)
|
|
; AVX512-NEXT: vmovq %xmm1, (%rdi)
|
|
; AVX512-NEXT: retq
|
|
%t = call {<3 x i32>, <3 x i1>} @llvm.umul.with.overflow.v3i32(<3 x i32> %a0, <3 x i32> %a1)
|
|
%val = extractvalue {<3 x i32>, <3 x i1>} %t, 0
|
|
%obit = extractvalue {<3 x i32>, <3 x i1>} %t, 1
|
|
%res = sext <3 x i1> %obit to <3 x i32>
|
|
store <3 x i32> %val, <3 x i32>* %p2
|
|
ret <3 x i32> %res
|
|
}
|
|
|
|
define <4 x i32> @umulo_v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i32>* %p2) nounwind {
|
|
; SSE2-LABEL: umulo_v4i32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm4
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,3,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
|
|
; SSE2-NEXT: pxor %xmm2, %xmm2
|
|
; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
|
|
; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
|
|
; SSE2-NEXT: pxor %xmm2, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
; SSE2-NEXT: movdqa %xmm0, (%rdi)
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSSE3-LABEL: umulo_v4i32:
|
|
; SSSE3: # %bb.0:
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
|
; SSSE3-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
|
|
; SSSE3-NEXT: pmuludq %xmm2, %xmm4
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,3,2,3]
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
|
|
; SSSE3-NEXT: pxor %xmm2, %xmm2
|
|
; SSSE3-NEXT: pcmpeqd %xmm3, %xmm2
|
|
; SSSE3-NEXT: pcmpeqd %xmm1, %xmm1
|
|
; SSSE3-NEXT: pxor %xmm2, %xmm1
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3]
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
; SSSE3-NEXT: movdqa %xmm0, (%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm1, %xmm0
|
|
; SSSE3-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: umulo_v4i32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
|
; SSE41-NEXT: pmuludq %xmm2, %xmm3
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE41-NEXT: pmuludq %xmm1, %xmm2
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
|
|
; SSE41-NEXT: pxor %xmm3, %xmm3
|
|
; SSE41-NEXT: pcmpeqd %xmm2, %xmm3
|
|
; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
|
|
; SSE41-NEXT: pxor %xmm3, %xmm2
|
|
; SSE41-NEXT: pmulld %xmm1, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, (%rdi)
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: umulo_v4i32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
|
; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
|
|
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovdqa %xmm0, (%rdi)
|
|
; AVX1-NEXT: vmovdqa %xmm2, %xmm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: umulo_v4i32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
|
; AVX2-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
|
|
; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
|
|
; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3]
|
|
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpxor %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovdqa %xmm0, (%rdi)
|
|
; AVX2-NEXT: vmovdqa %xmm2, %xmm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: umulo_v4i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
|
|
; AVX512-NEXT: vpmuludq %xmm3, %xmm4, %xmm3
|
|
; AVX512-NEXT: vmovdqa {{.*#+}} xmm4 = [1,5,3,7]
|
|
; AVX512-NEXT: vpermi2d %xmm3, %xmm2, %xmm4
|
|
; AVX512-NEXT: vptestmd %xmm4, %xmm4, %k1
|
|
; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm1
|
|
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
|
|
; AVX512-NEXT: vmovdqa %xmm1, (%rdi)
|
|
; AVX512-NEXT: retq
|
|
%t = call {<4 x i32>, <4 x i1>} @llvm.umul.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> %a1)
|
|
%val = extractvalue {<4 x i32>, <4 x i1>} %t, 0
|
|
%obit = extractvalue {<4 x i32>, <4 x i1>} %t, 1
|
|
%res = sext <4 x i1> %obit to <4 x i32>
|
|
store <4 x i32> %val, <4 x i32>* %p2
|
|
ret <4 x i32> %res
|
|
}
|
|
|
|
define <6 x i32> @umulo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) nounwind {
|
|
; SSE2-LABEL: umulo_v6i32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movq %rdi, %rax
|
|
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
|
|
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
|
|
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
|
|
; SSE2-NEXT: movd %r8d, %xmm0
|
|
; SSE2-NEXT: movd %ecx, %xmm1
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
|
|
; SSE2-NEXT: movd %edx, %xmm0
|
|
; SSE2-NEXT: movd %esi, %xmm3
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
|
|
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0]
|
|
; SSE2-NEXT: movd %r9d, %xmm1
|
|
; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rcx
|
|
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm3
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm4, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,3,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
|
|
; SSE2-NEXT: pxor %xmm4, %xmm4
|
|
; SSE2-NEXT: pcmpeqd %xmm4, %xmm1
|
|
; SSE2-NEXT: pcmpeqd %xmm5, %xmm5
|
|
; SSE2-NEXT: pxor %xmm5, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
|
|
; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
|
|
; SSE2-NEXT: movd {{.*#+}} xmm6 = mem[0],zero,zero,zero
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm6
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,3,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,3,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm2[0],xmm7[1],xmm2[1]
|
|
; SSE2-NEXT: pcmpeqd %xmm4, %xmm7
|
|
; SSE2-NEXT: pxor %xmm5, %xmm7
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
|
|
; SSE2-NEXT: movq %xmm0, 16(%rcx)
|
|
; SSE2-NEXT: movdqa %xmm3, (%rcx)
|
|
; SSE2-NEXT: movq %xmm7, 16(%rdi)
|
|
; SSE2-NEXT: movdqa %xmm1, (%rdi)
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSSE3-LABEL: umulo_v6i32:
|
|
; SSSE3: # %bb.0:
|
|
; SSSE3-NEXT: movq %rdi, %rax
|
|
; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
|
|
; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
; SSSE3-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
|
|
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
|
|
; SSSE3-NEXT: movd %r8d, %xmm0
|
|
; SSSE3-NEXT: movd %ecx, %xmm1
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
|
|
; SSSE3-NEXT: movd %edx, %xmm0
|
|
; SSSE3-NEXT: movd %esi, %xmm3
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
|
|
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0]
|
|
; SSSE3-NEXT: movd %r9d, %xmm1
|
|
; SSSE3-NEXT: movq {{[0-9]+}}(%rsp), %rcx
|
|
; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
; SSSE3-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
|
|
; SSSE3-NEXT: pmuludq %xmm2, %xmm3
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
|
|
; SSSE3-NEXT: pmuludq %xmm4, %xmm2
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,3,2,3]
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
|
|
; SSSE3-NEXT: pxor %xmm4, %xmm4
|
|
; SSSE3-NEXT: pcmpeqd %xmm4, %xmm1
|
|
; SSSE3-NEXT: pcmpeqd %xmm5, %xmm5
|
|
; SSSE3-NEXT: pxor %xmm5, %xmm1
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
|
|
; SSSE3-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
|
|
; SSSE3-NEXT: movd {{.*#+}} xmm6 = mem[0],zero,zero,zero
|
|
; SSSE3-NEXT: pmuludq %xmm2, %xmm6
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,3,2,3]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,3,2,3]
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm2[0],xmm7[1],xmm2[1]
|
|
; SSSE3-NEXT: pcmpeqd %xmm4, %xmm7
|
|
; SSSE3-NEXT: pxor %xmm5, %xmm7
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
|
|
; SSSE3-NEXT: movq %xmm0, 16(%rcx)
|
|
; SSSE3-NEXT: movdqa %xmm3, (%rcx)
|
|
; SSSE3-NEXT: movq %xmm7, 16(%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm1, (%rdi)
|
|
; SSSE3-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: umulo_v6i32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movq %rdi, %rax
|
|
; SSE41-NEXT: movd %esi, %xmm2
|
|
; SSE41-NEXT: pinsrd $1, %edx, %xmm2
|
|
; SSE41-NEXT: pinsrd $2, %ecx, %xmm2
|
|
; SSE41-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
|
; SSE41-NEXT: pinsrd $1, {{[0-9]+}}(%rsp), %xmm1
|
|
; SSE41-NEXT: pinsrd $2, {{[0-9]+}}(%rsp), %xmm1
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE41-NEXT: pmuludq %xmm2, %xmm1
|
|
; SSE41-NEXT: pinsrd $3, %r8d, %xmm2
|
|
; SSE41-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
|
|
; SSE41-NEXT: movd %r9d, %xmm4
|
|
; SSE41-NEXT: movdqa %xmm4, %xmm5
|
|
; SSE41-NEXT: pmuludq %xmm3, %xmm4
|
|
; SSE41-NEXT: pinsrd $1, {{[0-9]+}}(%rsp), %xmm3
|
|
; SSE41-NEXT: pinsrd $1, {{[0-9]+}}(%rsp), %xmm5
|
|
; SSE41-NEXT: pmulld %xmm3, %xmm5
|
|
; SSE41-NEXT: pinsrd $3, {{[0-9]+}}(%rsp), %xmm0
|
|
; SSE41-NEXT: movq {{[0-9]+}}(%rsp), %rcx
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
|
|
; SSE41-NEXT: pmuludq %xmm3, %xmm6
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm6[2,3],xmm1[4,5],xmm6[6,7]
|
|
; SSE41-NEXT: pxor %xmm8, %xmm8
|
|
; SSE41-NEXT: pcmpeqd %xmm8, %xmm1
|
|
; SSE41-NEXT: pcmpeqd %xmm6, %xmm6
|
|
; SSE41-NEXT: pxor %xmm6, %xmm1
|
|
; SSE41-NEXT: movd {{.*#+}} xmm7 = mem[0],zero,zero,zero
|
|
; SSE41-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
|
|
; SSE41-NEXT: pmuludq %xmm7, %xmm3
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm3[2,3],xmm4[4,5],xmm3[6,7]
|
|
; SSE41-NEXT: pcmpeqd %xmm8, %xmm4
|
|
; SSE41-NEXT: pxor %xmm6, %xmm4
|
|
; SSE41-NEXT: pmulld %xmm2, %xmm0
|
|
; SSE41-NEXT: movq %xmm5, 16(%rcx)
|
|
; SSE41-NEXT: movdqa %xmm0, (%rcx)
|
|
; SSE41-NEXT: movq %xmm4, 16(%rdi)
|
|
; SSE41-NEXT: movdqa %xmm1, (%rdi)
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: umulo_v6i32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[1,1,3,3]
|
|
; AVX1-NEXT: vpmuludq %xmm2, %xmm5, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm3, %xmm4, %xmm5
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm5[0,1],xmm2[2,3],xmm5[4,5],xmm2[6,7]
|
|
; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8
|
|
; AVX1-NEXT: vpcmpeqd %xmm2, %xmm8, %xmm2
|
|
; AVX1-NEXT: vpcmpeqd %xmm6, %xmm6, %xmm6
|
|
; AVX1-NEXT: vpxor %xmm6, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm1[1,1,3,3]
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
|
|
; AVX1-NEXT: vpmuludq %xmm7, %xmm5, %xmm5
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm7
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3],xmm7[4,5],xmm5[6,7]
|
|
; AVX1-NEXT: vpcmpeqd %xmm5, %xmm8, %xmm5
|
|
; AVX1-NEXT: vpxor %xmm6, %xmm5, %xmm5
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpmulld %xmm3, %xmm4, %xmm1
|
|
; AVX1-NEXT: vmovq %xmm1, 16(%rdi)
|
|
; AVX1-NEXT: vmovdqa %xmm0, (%rdi)
|
|
; AVX1-NEXT: vmovaps %ymm2, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: umulo_v6i32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
|
|
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7]
|
|
; AVX2-NEXT: vpmuludq %ymm2, %ymm3, %ymm2
|
|
; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[1,1,3,3,5,5,7,7]
|
|
; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4],ymm2[5],ymm3[6],ymm2[7]
|
|
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vmovq %xmm1, 16(%rdi)
|
|
; AVX2-NEXT: vmovdqa %xmm0, (%rdi)
|
|
; AVX2-NEXT: vmovdqa %ymm2, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: umulo_v6i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpmuludq %ymm1, %ymm0, %ymm2
|
|
; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm1[1,1,3,3,5,5,7,7]
|
|
; AVX512-NEXT: vpshufd {{.*#+}} ymm4 = ymm0[1,1,3,3,5,5,7,7]
|
|
; AVX512-NEXT: vpmuludq %ymm3, %ymm4, %ymm3
|
|
; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [1,9,3,11,5,13,7,15]
|
|
; AVX512-NEXT: vpermi2d %ymm3, %ymm2, %ymm4
|
|
; AVX512-NEXT: vptestmd %ymm4, %ymm4, %k1
|
|
; AVX512-NEXT: vpmulld %ymm1, %ymm0, %ymm1
|
|
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
|
|
; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
|
|
; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2
|
|
; AVX512-NEXT: vmovq %xmm2, 16(%rdi)
|
|
; AVX512-NEXT: vmovdqa %xmm1, (%rdi)
|
|
; AVX512-NEXT: retq
|
|
%t = call {<6 x i32>, <6 x i1>} @llvm.umul.with.overflow.v6i32(<6 x i32> %a0, <6 x i32> %a1)
|
|
%val = extractvalue {<6 x i32>, <6 x i1>} %t, 0
|
|
%obit = extractvalue {<6 x i32>, <6 x i1>} %t, 1
|
|
%res = sext <6 x i1> %obit to <6 x i32>
|
|
store <6 x i32> %val, <6 x i32>* %p2
|
|
ret <6 x i32> %res
|
|
}
|
|
|
|
define <8 x i32> @umulo_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32>* %p2) nounwind {
|
|
; SSE2-LABEL: umulo_v8i32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm4
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm4
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,3,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm2[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm5, %xmm6
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,3,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
; SSE2-NEXT: pxor %xmm8, %xmm8
|
|
; SSE2-NEXT: pcmpeqd %xmm8, %xmm0
|
|
; SSE2-NEXT: pcmpeqd %xmm7, %xmm7
|
|
; SSE2-NEXT: pxor %xmm7, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm3, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm5, %xmm3
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,3,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
|
|
; SSE2-NEXT: pcmpeqd %xmm8, %xmm2
|
|
; SSE2-NEXT: pxor %xmm7, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm6[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
|
|
; SSE2-NEXT: movdqa %xmm1, 16(%rdi)
|
|
; SSE2-NEXT: movdqa %xmm4, (%rdi)
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm1
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSSE3-LABEL: umulo_v8i32:
|
|
; SSSE3: # %bb.0:
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm4
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
|
|
; SSSE3-NEXT: pmuludq %xmm2, %xmm4
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,3,2,3]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm2[1,1,3,3]
|
|
; SSSE3-NEXT: pmuludq %xmm5, %xmm6
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,3,2,3]
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
; SSSE3-NEXT: pxor %xmm8, %xmm8
|
|
; SSSE3-NEXT: pcmpeqd %xmm8, %xmm0
|
|
; SSSE3-NEXT: pcmpeqd %xmm7, %xmm7
|
|
; SSSE3-NEXT: pxor %xmm7, %xmm0
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,3,3]
|
|
; SSSE3-NEXT: pmuludq %xmm3, %xmm1
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
|
|
; SSSE3-NEXT: pmuludq %xmm5, %xmm3
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,3,2,3]
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
|
|
; SSSE3-NEXT: pcmpeqd %xmm8, %xmm2
|
|
; SSSE3-NEXT: pxor %xmm7, %xmm2
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm6[0,2,2,3]
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
|
|
; SSSE3-NEXT: movdqa %xmm1, 16(%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm4, (%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm2, %xmm1
|
|
; SSSE3-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: umulo_v8i32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
|
|
; SSE41-NEXT: pmuludq %xmm4, %xmm5
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm4
|
|
; SSE41-NEXT: pmuludq %xmm2, %xmm4
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
|
|
; SSE41-NEXT: pxor %xmm8, %xmm8
|
|
; SSE41-NEXT: pcmpeqd %xmm8, %xmm4
|
|
; SSE41-NEXT: pcmpeqd %xmm7, %xmm7
|
|
; SSE41-NEXT: pxor %xmm7, %xmm4
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm3[1,1,3,3]
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm1[1,1,3,3]
|
|
; SSE41-NEXT: pmuludq %xmm5, %xmm6
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm5
|
|
; SSE41-NEXT: pmuludq %xmm3, %xmm5
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm6[2,3],xmm5[4,5],xmm6[6,7]
|
|
; SSE41-NEXT: pcmpeqd %xmm8, %xmm5
|
|
; SSE41-NEXT: pxor %xmm7, %xmm5
|
|
; SSE41-NEXT: pmulld %xmm2, %xmm0
|
|
; SSE41-NEXT: pmulld %xmm3, %xmm1
|
|
; SSE41-NEXT: movdqa %xmm1, 16(%rdi)
|
|
; SSE41-NEXT: movdqa %xmm0, (%rdi)
|
|
; SSE41-NEXT: movdqa %xmm4, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm5, %xmm1
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: umulo_v8i32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[1,1,3,3]
|
|
; AVX1-NEXT: vpmuludq %xmm2, %xmm5, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm3, %xmm4, %xmm5
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm5[0,1],xmm2[2,3],xmm5[4,5],xmm2[6,7]
|
|
; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8
|
|
; AVX1-NEXT: vpcmpeqd %xmm2, %xmm8, %xmm2
|
|
; AVX1-NEXT: vpcmpeqd %xmm6, %xmm6, %xmm6
|
|
; AVX1-NEXT: vpxor %xmm6, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm1[1,1,3,3]
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
|
|
; AVX1-NEXT: vpmuludq %xmm7, %xmm5, %xmm5
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm7
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3],xmm7[4,5],xmm5[6,7]
|
|
; AVX1-NEXT: vpcmpeqd %xmm5, %xmm8, %xmm5
|
|
; AVX1-NEXT: vpxor %xmm6, %xmm5, %xmm5
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpmulld %xmm3, %xmm4, %xmm1
|
|
; AVX1-NEXT: vmovdqa %xmm1, 16(%rdi)
|
|
; AVX1-NEXT: vmovdqa %xmm0, (%rdi)
|
|
; AVX1-NEXT: vmovaps %ymm2, %ymm0
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: umulo_v8i32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,1,3,3,5,5,7,7]
|
|
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[1,1,3,3,5,5,7,7]
|
|
; AVX2-NEXT: vpmuludq %ymm2, %ymm3, %ymm2
|
|
; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm3
|
|
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[1,1,3,3,5,5,7,7]
|
|
; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4],ymm2[5],ymm3[6],ymm2[7]
|
|
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vmovdqa %ymm0, (%rdi)
|
|
; AVX2-NEXT: vmovdqa %ymm2, %ymm0
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: umulo_v8i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpmuludq %ymm1, %ymm0, %ymm2
|
|
; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm1[1,1,3,3,5,5,7,7]
|
|
; AVX512-NEXT: vpshufd {{.*#+}} ymm4 = ymm0[1,1,3,3,5,5,7,7]
|
|
; AVX512-NEXT: vpmuludq %ymm3, %ymm4, %ymm3
|
|
; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [1,9,3,11,5,13,7,15]
|
|
; AVX512-NEXT: vpermi2d %ymm3, %ymm2, %ymm4
|
|
; AVX512-NEXT: vptestmd %ymm4, %ymm4, %k1
|
|
; AVX512-NEXT: vpmulld %ymm1, %ymm0, %ymm1
|
|
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
|
|
; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
|
|
; AVX512-NEXT: vmovdqa %ymm1, (%rdi)
|
|
; AVX512-NEXT: retq
|
|
%t = call {<8 x i32>, <8 x i1>} @llvm.umul.with.overflow.v8i32(<8 x i32> %a0, <8 x i32> %a1)
|
|
%val = extractvalue {<8 x i32>, <8 x i1>} %t, 0
|
|
%obit = extractvalue {<8 x i32>, <8 x i1>} %t, 1
|
|
%res = sext <8 x i1> %obit to <8 x i32>
|
|
store <8 x i32> %val, <8 x i32>* %p2
|
|
ret <8 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @umulo_v16i32(<16 x i32> %a0, <16 x i32> %a1, <16 x i32>* %p2) nounwind {
|
|
; SSE2-LABEL: umulo_v16i32:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm8
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm0[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm4, %xmm8
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm8[1,3,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm4[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm10, %xmm9
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm9[1,3,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
|
|
; SSE2-NEXT: pxor %xmm10, %xmm10
|
|
; SSE2-NEXT: pcmpeqd %xmm10, %xmm0
|
|
; SSE2-NEXT: pcmpeqd %xmm11, %xmm11
|
|
; SSE2-NEXT: pxor %xmm11, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm1[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm5, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm15 = xmm1[1,3,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm5[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm13, %xmm12
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm12[1,3,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm5[0],xmm15[1],xmm5[1]
|
|
; SSE2-NEXT: pcmpeqd %xmm10, %xmm15
|
|
; SSE2-NEXT: pxor %xmm11, %xmm15
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm14 = xmm2[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm6, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[1,3,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm6[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm14, %xmm13
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm13[1,3,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
|
|
; SSE2-NEXT: pcmpeqd %xmm10, %xmm5
|
|
; SSE2-NEXT: pxor %xmm11, %xmm5
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm14 = xmm3[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm7, %xmm3
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm3[1,3,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm14, %xmm7
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm7[1,3,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
|
|
; SSE2-NEXT: pcmpeqd %xmm10, %xmm6
|
|
; SSE2-NEXT: pxor %xmm11, %xmm6
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,2,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm9[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm12[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm13[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm7[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
|
|
; SSE2-NEXT: movdqa %xmm3, 48(%rdi)
|
|
; SSE2-NEXT: movdqa %xmm2, 32(%rdi)
|
|
; SSE2-NEXT: movdqa %xmm1, 16(%rdi)
|
|
; SSE2-NEXT: movdqa %xmm8, (%rdi)
|
|
; SSE2-NEXT: movdqa %xmm15, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm5, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm6, %xmm3
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSSE3-LABEL: umulo_v16i32:
|
|
; SSSE3: # %bb.0:
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm8
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm10 = xmm0[1,1,3,3]
|
|
; SSSE3-NEXT: pmuludq %xmm4, %xmm8
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm8[1,3,2,3]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm9 = xmm4[1,1,3,3]
|
|
; SSSE3-NEXT: pmuludq %xmm10, %xmm9
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm9[1,3,2,3]
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
|
|
; SSSE3-NEXT: pxor %xmm10, %xmm10
|
|
; SSSE3-NEXT: pcmpeqd %xmm10, %xmm0
|
|
; SSSE3-NEXT: pcmpeqd %xmm11, %xmm11
|
|
; SSSE3-NEXT: pxor %xmm11, %xmm0
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm13 = xmm1[1,1,3,3]
|
|
; SSSE3-NEXT: pmuludq %xmm5, %xmm1
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm15 = xmm1[1,3,2,3]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm12 = xmm5[1,1,3,3]
|
|
; SSSE3-NEXT: pmuludq %xmm13, %xmm12
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm12[1,3,2,3]
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm5[0],xmm15[1],xmm5[1]
|
|
; SSSE3-NEXT: pcmpeqd %xmm10, %xmm15
|
|
; SSSE3-NEXT: pxor %xmm11, %xmm15
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm14 = xmm2[1,1,3,3]
|
|
; SSSE3-NEXT: pmuludq %xmm6, %xmm2
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm2[1,3,2,3]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm13 = xmm6[1,1,3,3]
|
|
; SSSE3-NEXT: pmuludq %xmm14, %xmm13
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm13[1,3,2,3]
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
|
|
; SSSE3-NEXT: pcmpeqd %xmm10, %xmm5
|
|
; SSSE3-NEXT: pxor %xmm11, %xmm5
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm14 = xmm3[1,1,3,3]
|
|
; SSSE3-NEXT: pmuludq %xmm7, %xmm3
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm3[1,3,2,3]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
|
|
; SSSE3-NEXT: pmuludq %xmm14, %xmm7
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm7[1,3,2,3]
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
|
|
; SSSE3-NEXT: pcmpeqd %xmm10, %xmm6
|
|
; SSSE3-NEXT: pxor %xmm11, %xmm6
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,2,2,3]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm9[0,2,2,3]
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm12[0,2,2,3]
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm13[0,2,2,3]
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm7[0,2,2,3]
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
|
|
; SSSE3-NEXT: movdqa %xmm3, 48(%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm2, 32(%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm1, 16(%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm8, (%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm15, %xmm1
|
|
; SSSE3-NEXT: movdqa %xmm5, %xmm2
|
|
; SSSE3-NEXT: movdqa %xmm6, %xmm3
|
|
; SSSE3-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: umulo_v16i32:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm4[1,1,3,3]
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm0[1,1,3,3]
|
|
; SSE41-NEXT: pmuludq %xmm8, %xmm9
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm8
|
|
; SSE41-NEXT: pmuludq %xmm4, %xmm8
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm8 = xmm8[1,1,3,3]
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0,1],xmm9[2,3],xmm8[4,5],xmm9[6,7]
|
|
; SSE41-NEXT: pxor %xmm12, %xmm12
|
|
; SSE41-NEXT: pcmpeqd %xmm12, %xmm8
|
|
; SSE41-NEXT: pcmpeqd %xmm13, %xmm13
|
|
; SSE41-NEXT: pxor %xmm13, %xmm8
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm5[1,1,3,3]
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm1[1,1,3,3]
|
|
; SSE41-NEXT: pmuludq %xmm9, %xmm10
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm9
|
|
; SSE41-NEXT: pmuludq %xmm5, %xmm9
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm9 = xmm9[1,1,3,3]
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm9 = xmm9[0,1],xmm10[2,3],xmm9[4,5],xmm10[6,7]
|
|
; SSE41-NEXT: pcmpeqd %xmm12, %xmm9
|
|
; SSE41-NEXT: pxor %xmm13, %xmm9
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm6[1,1,3,3]
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm11 = xmm2[1,1,3,3]
|
|
; SSE41-NEXT: pmuludq %xmm10, %xmm11
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm10
|
|
; SSE41-NEXT: pmuludq %xmm6, %xmm10
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm10 = xmm10[1,1,3,3]
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm10 = xmm10[0,1],xmm11[2,3],xmm10[4,5],xmm11[6,7]
|
|
; SSE41-NEXT: pcmpeqd %xmm12, %xmm10
|
|
; SSE41-NEXT: pxor %xmm13, %xmm10
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm11 = xmm7[1,1,3,3]
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm14 = xmm3[1,1,3,3]
|
|
; SSE41-NEXT: pmuludq %xmm11, %xmm14
|
|
; SSE41-NEXT: movdqa %xmm3, %xmm11
|
|
; SSE41-NEXT: pmuludq %xmm7, %xmm11
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm11 = xmm11[1,1,3,3]
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm11 = xmm11[0,1],xmm14[2,3],xmm11[4,5],xmm14[6,7]
|
|
; SSE41-NEXT: pcmpeqd %xmm12, %xmm11
|
|
; SSE41-NEXT: pxor %xmm13, %xmm11
|
|
; SSE41-NEXT: pmulld %xmm4, %xmm0
|
|
; SSE41-NEXT: pmulld %xmm5, %xmm1
|
|
; SSE41-NEXT: pmulld %xmm6, %xmm2
|
|
; SSE41-NEXT: pmulld %xmm7, %xmm3
|
|
; SSE41-NEXT: movdqa %xmm3, 48(%rdi)
|
|
; SSE41-NEXT: movdqa %xmm2, 32(%rdi)
|
|
; SSE41-NEXT: movdqa %xmm1, 16(%rdi)
|
|
; SSE41-NEXT: movdqa %xmm0, (%rdi)
|
|
; SSE41-NEXT: movdqa %xmm8, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm9, %xmm1
|
|
; SSE41-NEXT: movdqa %xmm10, %xmm2
|
|
; SSE41-NEXT: movdqa %xmm11, %xmm3
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: umulo_v16i32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm10
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm10[1,1,3,3]
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm12
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm12[1,1,3,3]
|
|
; AVX1-NEXT: vpmuludq %xmm6, %xmm7, %xmm6
|
|
; AVX1-NEXT: vpmuludq %xmm10, %xmm12, %xmm7
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1],xmm6[2,3],xmm7[4,5],xmm6[6,7]
|
|
; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8
|
|
; AVX1-NEXT: vpcmpeqd %xmm7, %xmm8, %xmm7
|
|
; AVX1-NEXT: vpcmpeqd %xmm9, %xmm9, %xmm9
|
|
; AVX1-NEXT: vpxor %xmm7, %xmm9, %xmm7
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[1,1,3,3]
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
|
|
; AVX1-NEXT: vpmuludq %xmm6, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpmuludq %xmm3, %xmm1, %xmm6
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3],xmm6[4,5],xmm4[6,7]
|
|
; AVX1-NEXT: vpcmpeqd %xmm4, %xmm8, %xmm4
|
|
; AVX1-NEXT: vpxor %xmm4, %xmm9, %xmm4
|
|
; AVX1-NEXT: vpackssdw %xmm7, %xmm4, %xmm11
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm6[1,1,3,3]
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[1,1,3,3]
|
|
; AVX1-NEXT: vpmuludq %xmm7, %xmm5, %xmm5
|
|
; AVX1-NEXT: vpmuludq %xmm6, %xmm4, %xmm7
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3],xmm7[4,5],xmm5[6,7]
|
|
; AVX1-NEXT: vpcmpeqd %xmm5, %xmm8, %xmm5
|
|
; AVX1-NEXT: vpxor %xmm5, %xmm9, %xmm13
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm2[1,1,3,3]
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
|
|
; AVX1-NEXT: vpmuludq %xmm7, %xmm5, %xmm5
|
|
; AVX1-NEXT: vpmuludq %xmm2, %xmm0, %xmm7
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3],xmm7[4,5],xmm5[6,7]
|
|
; AVX1-NEXT: vpcmpeqd %xmm5, %xmm8, %xmm5
|
|
; AVX1-NEXT: vpxor %xmm5, %xmm9, %xmm5
|
|
; AVX1-NEXT: vpackssdw %xmm13, %xmm5, %xmm5
|
|
; AVX1-NEXT: vpacksswb %xmm11, %xmm5, %xmm5
|
|
; AVX1-NEXT: vpmulld %xmm2, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpmulld %xmm6, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpmulld %xmm3, %xmm1, %xmm3
|
|
; AVX1-NEXT: vpmulld %xmm10, %xmm12, %xmm6
|
|
; AVX1-NEXT: vpmovsxbd %xmm5, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm5[1,1,1,1]
|
|
; AVX1-NEXT: vpmovsxbd %xmm1, %xmm1
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; AVX1-NEXT: vpacksswb %xmm11, %xmm11, %xmm1
|
|
; AVX1-NEXT: vpmovsxbd %xmm1, %xmm5
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
|
|
; AVX1-NEXT: vpmovsxbd %xmm1, %xmm1
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm5, %ymm1
|
|
; AVX1-NEXT: vmovdqa %xmm6, 48(%rdi)
|
|
; AVX1-NEXT: vmovdqa %xmm3, 32(%rdi)
|
|
; AVX1-NEXT: vmovdqa %xmm4, 16(%rdi)
|
|
; AVX1-NEXT: vmovdqa %xmm2, (%rdi)
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: umulo_v16i32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm3[1,1,3,3,5,5,7,7]
|
|
; AVX2-NEXT: vpshufd {{.*#+}} ymm5 = ymm1[1,1,3,3,5,5,7,7]
|
|
; AVX2-NEXT: vpmuludq %ymm4, %ymm5, %ymm4
|
|
; AVX2-NEXT: vpmuludq %ymm3, %ymm1, %ymm5
|
|
; AVX2-NEXT: vpshufd {{.*#+}} ymm5 = ymm5[1,1,3,3,5,5,7,7]
|
|
; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2],ymm4[3],ymm5[4],ymm4[5],ymm5[6],ymm4[7]
|
|
; AVX2-NEXT: vpxor %xmm5, %xmm5, %xmm5
|
|
; AVX2-NEXT: vpcmpeqd %ymm5, %ymm4, %ymm4
|
|
; AVX2-NEXT: vpcmpeqd %ymm6, %ymm6, %ymm6
|
|
; AVX2-NEXT: vpxor %ymm6, %ymm4, %ymm4
|
|
; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm7
|
|
; AVX2-NEXT: vpackssdw %xmm7, %xmm4, %xmm4
|
|
; AVX2-NEXT: vpshufd {{.*#+}} ymm7 = ymm2[1,1,3,3,5,5,7,7]
|
|
; AVX2-NEXT: vpshufd {{.*#+}} ymm8 = ymm0[1,1,3,3,5,5,7,7]
|
|
; AVX2-NEXT: vpmuludq %ymm7, %ymm8, %ymm7
|
|
; AVX2-NEXT: vpmuludq %ymm2, %ymm0, %ymm8
|
|
; AVX2-NEXT: vpshufd {{.*#+}} ymm8 = ymm8[1,1,3,3,5,5,7,7]
|
|
; AVX2-NEXT: vpblendd {{.*#+}} ymm7 = ymm8[0],ymm7[1],ymm8[2],ymm7[3],ymm8[4],ymm7[5],ymm8[6],ymm7[7]
|
|
; AVX2-NEXT: vpcmpeqd %ymm5, %ymm7, %ymm5
|
|
; AVX2-NEXT: vpxor %ymm6, %ymm5, %ymm5
|
|
; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm6
|
|
; AVX2-NEXT: vpackssdw %xmm6, %xmm5, %xmm5
|
|
; AVX2-NEXT: vpacksswb %xmm5, %xmm5, %xmm5
|
|
; AVX2-NEXT: vpmulld %ymm2, %ymm0, %ymm2
|
|
; AVX2-NEXT: vpmulld %ymm3, %ymm1, %ymm3
|
|
; AVX2-NEXT: vpmovsxbd %xmm5, %ymm0
|
|
; AVX2-NEXT: vpacksswb %xmm4, %xmm4, %xmm1
|
|
; AVX2-NEXT: vpmovsxbd %xmm1, %ymm1
|
|
; AVX2-NEXT: vmovdqa %ymm3, 32(%rdi)
|
|
; AVX2-NEXT: vmovdqa %ymm2, (%rdi)
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: umulo_v16i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpmuludq %zmm1, %zmm0, %zmm2
|
|
; AVX512-NEXT: vpshufd {{.*#+}} zmm3 = zmm1[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
|
|
; AVX512-NEXT: vpshufd {{.*#+}} zmm4 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
|
|
; AVX512-NEXT: vpmuludq %zmm3, %zmm4, %zmm3
|
|
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [1,17,3,19,5,21,7,23,9,25,11,27,13,29,15,31]
|
|
; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
|
|
; AVX512-NEXT: vptestmd %zmm4, %zmm4, %k1
|
|
; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm1
|
|
; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
|
|
; AVX512-NEXT: vmovdqa64 %zmm1, (%rdi)
|
|
; AVX512-NEXT: retq
|
|
%t = call {<16 x i32>, <16 x i1>} @llvm.umul.with.overflow.v16i32(<16 x i32> %a0, <16 x i32> %a1)
|
|
%val = extractvalue {<16 x i32>, <16 x i1>} %t, 0
|
|
%obit = extractvalue {<16 x i32>, <16 x i1>} %t, 1
|
|
%res = sext <16 x i1> %obit to <16 x i32>
|
|
store <16 x i32> %val, <16 x i32>* %p2
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @umulo_v16i8(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>* %p2) nounwind {
|
|
; SSE2-LABEL: umulo_v16i8:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: pmullw %xmm2, %xmm3
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
|
|
; SSE2-NEXT: pand %xmm2, %xmm3
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm5
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm4
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm5, %xmm4
|
|
; SSE2-NEXT: pand %xmm2, %xmm4
|
|
; SSE2-NEXT: packuswb %xmm3, %xmm4
|
|
; SSE2-NEXT: pxor %xmm2, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm5
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15]
|
|
; SSE2-NEXT: pmullw %xmm3, %xmm5
|
|
; SSE2-NEXT: psrlw $8, %xmm5
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
|
|
; SSE2-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE2-NEXT: psrlw $8, %xmm0
|
|
; SSE2-NEXT: packuswb %xmm5, %xmm0
|
|
; SSE2-NEXT: pcmpeqb %xmm2, %xmm0
|
|
; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
|
|
; SSE2-NEXT: pxor %xmm0, %xmm3
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm1
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
|
; SSE2-NEXT: pslld $31, %xmm0
|
|
; SSE2-NEXT: psrad $31, %xmm0
|
|
; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pslld $31, %xmm1
|
|
; SSE2-NEXT: psrad $31, %xmm1
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm2
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3]
|
|
; SSE2-NEXT: pslld $31, %xmm2
|
|
; SSE2-NEXT: psrad $31, %xmm2
|
|
; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pslld $31, %xmm3
|
|
; SSE2-NEXT: psrad $31, %xmm3
|
|
; SSE2-NEXT: movdqa %xmm4, (%rdi)
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSSE3-LABEL: umulo_v16i8:
|
|
; SSSE3: # %bb.0:
|
|
; SSSE3-NEXT: movdqa %xmm1, %xmm2
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm3
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSSE3-NEXT: pmullw %xmm2, %xmm3
|
|
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
|
|
; SSSE3-NEXT: pand %xmm2, %xmm3
|
|
; SSSE3-NEXT: movdqa %xmm1, %xmm5
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm4
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: pmullw %xmm5, %xmm4
|
|
; SSSE3-NEXT: pand %xmm2, %xmm4
|
|
; SSSE3-NEXT: packuswb %xmm3, %xmm4
|
|
; SSSE3-NEXT: pxor %xmm2, %xmm2
|
|
; SSSE3-NEXT: movdqa %xmm1, %xmm3
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm5
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15]
|
|
; SSSE3-NEXT: pmullw %xmm3, %xmm5
|
|
; SSSE3-NEXT: psrlw $8, %xmm5
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
|
|
; SSSE3-NEXT: pmullw %xmm1, %xmm0
|
|
; SSSE3-NEXT: psrlw $8, %xmm0
|
|
; SSSE3-NEXT: packuswb %xmm5, %xmm0
|
|
; SSSE3-NEXT: pcmpeqb %xmm2, %xmm0
|
|
; SSSE3-NEXT: pcmpeqd %xmm3, %xmm3
|
|
; SSSE3-NEXT: pxor %xmm0, %xmm3
|
|
; SSSE3-NEXT: movdqa %xmm3, %xmm1
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: movdqa %xmm1, %xmm0
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
|
; SSSE3-NEXT: pslld $31, %xmm0
|
|
; SSSE3-NEXT: psrad $31, %xmm0
|
|
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: pslld $31, %xmm1
|
|
; SSSE3-NEXT: psrad $31, %xmm1
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSSE3-NEXT: movdqa %xmm3, %xmm2
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3]
|
|
; SSSE3-NEXT: pslld $31, %xmm2
|
|
; SSSE3-NEXT: psrad $31, %xmm2
|
|
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: pslld $31, %xmm3
|
|
; SSSE3-NEXT: psrad $31, %xmm3
|
|
; SSSE3-NEXT: movdqa %xmm4, (%rdi)
|
|
; SSSE3-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: umulo_v16i8:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm3
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm2, %xmm3
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
|
|
; SSE41-NEXT: pand %xmm4, %xmm3
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; SSE41-NEXT: pmullw %xmm2, %xmm5
|
|
; SSE41-NEXT: pand %xmm5, %xmm4
|
|
; SSE41-NEXT: packuswb %xmm3, %xmm4
|
|
; SSE41-NEXT: pxor %xmm2, %xmm2
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
|
|
; SSE41-NEXT: pmullw %xmm1, %xmm0
|
|
; SSE41-NEXT: psrlw $8, %xmm0
|
|
; SSE41-NEXT: psrlw $8, %xmm5
|
|
; SSE41-NEXT: packuswb %xmm0, %xmm5
|
|
; SSE41-NEXT: pcmpeqb %xmm2, %xmm5
|
|
; SSE41-NEXT: pcmpeqd %xmm3, %xmm3
|
|
; SSE41-NEXT: pxor %xmm5, %xmm3
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm0
|
|
; SSE41-NEXT: psrad $31, %xmm0
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,1,1]
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm1
|
|
; SSE41-NEXT: psrad $31, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm2
|
|
; SSE41-NEXT: psrad $31, %xmm2
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,3,3,3]
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm3
|
|
; SSE41-NEXT: psrad $31, %xmm3
|
|
; SSE41-NEXT: movdqa %xmm4, (%rdi)
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: umulo_v16i8:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
|
|
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm4, %xmm5, %xmm4
|
|
; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm3
|
|
; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm1
|
|
; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0
|
|
; AVX1-NEXT: vpcmpeqb %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpmovsxbd %xmm1, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,1,1]
|
|
; AVX1-NEXT: vpmovsxbd %xmm3, %xmm3
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
|
|
; AVX1-NEXT: vpmovsxbd %xmm3, %xmm3
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
|
|
; AVX1-NEXT: vpmovsxbd %xmm1, %xmm1
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
|
|
; AVX1-NEXT: vmovdqa %xmm2, (%rdi)
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: umulo_v16i8:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
|
|
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm1
|
|
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
|
|
; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm2
|
|
; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm1
|
|
; AVX2-NEXT: vpmovsxbd %xmm1, %ymm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
|
|
; AVX2-NEXT: vpmovsxbd %xmm1, %ymm1
|
|
; AVX2-NEXT: vmovdqa %xmm2, (%rdi)
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: umulo_v16i8:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
|
|
; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
|
|
; AVX512-NEXT: vpmullw %ymm1, %ymm0, %ymm1
|
|
; AVX512-NEXT: vpsrlw $8, %ymm1, %ymm0
|
|
; AVX512-NEXT: vpmovwb %ymm0, %xmm0
|
|
; AVX512-NEXT: vptestmb %xmm0, %xmm0, %k1
|
|
; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
|
|
; AVX512-NEXT: vpmovwb %ymm1, (%rdi)
|
|
; AVX512-NEXT: retq
|
|
%t = call {<16 x i8>, <16 x i1>} @llvm.umul.with.overflow.v16i8(<16 x i8> %a0, <16 x i8> %a1)
|
|
%val = extractvalue {<16 x i8>, <16 x i1>} %t, 0
|
|
%obit = extractvalue {<16 x i8>, <16 x i1>} %t, 1
|
|
%res = sext <16 x i1> %obit to <16 x i32>
|
|
store <16 x i8> %val, <16 x i8>* %p2
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <32 x i32> @umulo_v32i8(<32 x i8> %a0, <32 x i8> %a1, <32 x i8>* %p2) nounwind {
|
|
; SSE2-LABEL: umulo_v32i8:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movq %rdi, %rax
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm4
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm5
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: pmullw %xmm4, %xmm5
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
|
|
; SSE2-NEXT: pand %xmm8, %xmm5
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm7
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm9
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm7, %xmm9
|
|
; SSE2-NEXT: pand %xmm8, %xmm9
|
|
; SSE2-NEXT: packuswb %xmm5, %xmm9
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm5
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm7
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: pmullw %xmm5, %xmm7
|
|
; SSE2-NEXT: pand %xmm8, %xmm7
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm6
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm10
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm6, %xmm10
|
|
; SSE2-NEXT: pand %xmm8, %xmm10
|
|
; SSE2-NEXT: packuswb %xmm7, %xmm10
|
|
; SSE2-NEXT: pxor %xmm6, %xmm6
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm7
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm6[8],xmm7[9],xmm6[9],xmm7[10],xmm6[10],xmm7[11],xmm6[11],xmm7[12],xmm6[12],xmm7[13],xmm6[13],xmm7[14],xmm6[14],xmm7[15],xmm6[15]
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm4
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm6[8],xmm4[9],xmm6[9],xmm4[10],xmm6[10],xmm4[11],xmm6[11],xmm4[12],xmm6[12],xmm4[13],xmm6[13],xmm4[14],xmm6[14],xmm4[15],xmm6[15]
|
|
; SSE2-NEXT: pmullw %xmm7, %xmm4
|
|
; SSE2-NEXT: psrlw $8, %xmm4
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3],xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
|
|
; SSE2-NEXT: pmullw %xmm3, %xmm1
|
|
; SSE2-NEXT: psrlw $8, %xmm1
|
|
; SSE2-NEXT: packuswb %xmm4, %xmm1
|
|
; SSE2-NEXT: pcmpeqb %xmm6, %xmm1
|
|
; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
|
|
; SSE2-NEXT: pxor %xmm3, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm4
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm6[8],xmm4[9],xmm6[9],xmm4[10],xmm6[10],xmm4[11],xmm6[11],xmm4[12],xmm6[12],xmm4[13],xmm6[13],xmm4[14],xmm6[14],xmm4[15],xmm6[15]
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm7
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm6[8],xmm7[9],xmm6[9],xmm7[10],xmm6[10],xmm7[11],xmm6[11],xmm7[12],xmm6[12],xmm7[13],xmm6[13],xmm7[14],xmm6[14],xmm7[15],xmm6[15]
|
|
; SSE2-NEXT: pmullw %xmm4, %xmm7
|
|
; SSE2-NEXT: psrlw $8, %xmm7
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
|
|
; SSE2-NEXT: pmullw %xmm2, %xmm0
|
|
; SSE2-NEXT: psrlw $8, %xmm0
|
|
; SSE2-NEXT: packuswb %xmm7, %xmm0
|
|
; SSE2-NEXT: pcmpeqb %xmm6, %xmm0
|
|
; SSE2-NEXT: pxor %xmm3, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm3
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3]
|
|
; SSE2-NEXT: pslld $31, %xmm3
|
|
; SSE2-NEXT: psrad $31, %xmm3
|
|
; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pslld $31, %xmm2
|
|
; SSE2-NEXT: psrad $31, %xmm2
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm6
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0,0,1,1,2,2,3,3]
|
|
; SSE2-NEXT: pslld $31, %xmm6
|
|
; SSE2-NEXT: psrad $31, %xmm6
|
|
; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pslld $31, %xmm0
|
|
; SSE2-NEXT: psrad $31, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm4
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm7
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0,0,1,1,2,2,3,3]
|
|
; SSE2-NEXT: pslld $31, %xmm7
|
|
; SSE2-NEXT: psrad $31, %xmm7
|
|
; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pslld $31, %xmm4
|
|
; SSE2-NEXT: psrad $31, %xmm4
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm5
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3]
|
|
; SSE2-NEXT: pslld $31, %xmm5
|
|
; SSE2-NEXT: psrad $31, %xmm5
|
|
; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pslld $31, %xmm1
|
|
; SSE2-NEXT: psrad $31, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm10, 16(%rsi)
|
|
; SSE2-NEXT: movdqa %xmm9, (%rsi)
|
|
; SSE2-NEXT: movdqa %xmm1, 112(%rdi)
|
|
; SSE2-NEXT: movdqa %xmm5, 96(%rdi)
|
|
; SSE2-NEXT: movdqa %xmm4, 80(%rdi)
|
|
; SSE2-NEXT: movdqa %xmm7, 64(%rdi)
|
|
; SSE2-NEXT: movdqa %xmm0, 48(%rdi)
|
|
; SSE2-NEXT: movdqa %xmm6, 32(%rdi)
|
|
; SSE2-NEXT: movdqa %xmm2, 16(%rdi)
|
|
; SSE2-NEXT: movdqa %xmm3, (%rdi)
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSSE3-LABEL: umulo_v32i8:
|
|
; SSSE3: # %bb.0:
|
|
; SSSE3-NEXT: movq %rdi, %rax
|
|
; SSSE3-NEXT: movdqa %xmm2, %xmm4
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm5
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSSE3-NEXT: pmullw %xmm4, %xmm5
|
|
; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
|
|
; SSSE3-NEXT: pand %xmm8, %xmm5
|
|
; SSSE3-NEXT: movdqa %xmm2, %xmm7
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm9
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: pmullw %xmm7, %xmm9
|
|
; SSSE3-NEXT: pand %xmm8, %xmm9
|
|
; SSSE3-NEXT: packuswb %xmm5, %xmm9
|
|
; SSSE3-NEXT: movdqa %xmm3, %xmm5
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSSE3-NEXT: movdqa %xmm1, %xmm7
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSSE3-NEXT: pmullw %xmm5, %xmm7
|
|
; SSSE3-NEXT: pand %xmm8, %xmm7
|
|
; SSSE3-NEXT: movdqa %xmm3, %xmm6
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: movdqa %xmm1, %xmm10
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: pmullw %xmm6, %xmm10
|
|
; SSSE3-NEXT: pand %xmm8, %xmm10
|
|
; SSSE3-NEXT: packuswb %xmm7, %xmm10
|
|
; SSSE3-NEXT: pxor %xmm6, %xmm6
|
|
; SSSE3-NEXT: movdqa %xmm3, %xmm7
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm6[8],xmm7[9],xmm6[9],xmm7[10],xmm6[10],xmm7[11],xmm6[11],xmm7[12],xmm6[12],xmm7[13],xmm6[13],xmm7[14],xmm6[14],xmm7[15],xmm6[15]
|
|
; SSSE3-NEXT: movdqa %xmm1, %xmm4
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm6[8],xmm4[9],xmm6[9],xmm4[10],xmm6[10],xmm4[11],xmm6[11],xmm4[12],xmm6[12],xmm4[13],xmm6[13],xmm4[14],xmm6[14],xmm4[15],xmm6[15]
|
|
; SSSE3-NEXT: pmullw %xmm7, %xmm4
|
|
; SSSE3-NEXT: psrlw $8, %xmm4
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3],xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
|
|
; SSSE3-NEXT: pmullw %xmm3, %xmm1
|
|
; SSSE3-NEXT: psrlw $8, %xmm1
|
|
; SSSE3-NEXT: packuswb %xmm4, %xmm1
|
|
; SSSE3-NEXT: pcmpeqb %xmm6, %xmm1
|
|
; SSSE3-NEXT: pcmpeqd %xmm3, %xmm3
|
|
; SSSE3-NEXT: pxor %xmm3, %xmm1
|
|
; SSSE3-NEXT: movdqa %xmm2, %xmm4
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm6[8],xmm4[9],xmm6[9],xmm4[10],xmm6[10],xmm4[11],xmm6[11],xmm4[12],xmm6[12],xmm4[13],xmm6[13],xmm4[14],xmm6[14],xmm4[15],xmm6[15]
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm7
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm6[8],xmm7[9],xmm6[9],xmm7[10],xmm6[10],xmm7[11],xmm6[11],xmm7[12],xmm6[12],xmm7[13],xmm6[13],xmm7[14],xmm6[14],xmm7[15],xmm6[15]
|
|
; SSSE3-NEXT: pmullw %xmm4, %xmm7
|
|
; SSSE3-NEXT: psrlw $8, %xmm7
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
|
|
; SSSE3-NEXT: pmullw %xmm2, %xmm0
|
|
; SSSE3-NEXT: psrlw $8, %xmm0
|
|
; SSSE3-NEXT: packuswb %xmm7, %xmm0
|
|
; SSSE3-NEXT: pcmpeqb %xmm6, %xmm0
|
|
; SSSE3-NEXT: pxor %xmm3, %xmm0
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm2
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: movdqa %xmm2, %xmm3
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3]
|
|
; SSSE3-NEXT: pslld $31, %xmm3
|
|
; SSSE3-NEXT: psrad $31, %xmm3
|
|
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: pslld $31, %xmm2
|
|
; SSSE3-NEXT: psrad $31, %xmm2
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm6
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0,0,1,1,2,2,3,3]
|
|
; SSSE3-NEXT: pslld $31, %xmm6
|
|
; SSSE3-NEXT: psrad $31, %xmm6
|
|
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: pslld $31, %xmm0
|
|
; SSSE3-NEXT: psrad $31, %xmm0
|
|
; SSSE3-NEXT: movdqa %xmm1, %xmm4
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: movdqa %xmm4, %xmm7
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0,0,1,1,2,2,3,3]
|
|
; SSSE3-NEXT: pslld $31, %xmm7
|
|
; SSSE3-NEXT: psrad $31, %xmm7
|
|
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: pslld $31, %xmm4
|
|
; SSSE3-NEXT: psrad $31, %xmm4
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSSE3-NEXT: movdqa %xmm1, %xmm5
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3]
|
|
; SSSE3-NEXT: pslld $31, %xmm5
|
|
; SSSE3-NEXT: psrad $31, %xmm5
|
|
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: pslld $31, %xmm1
|
|
; SSSE3-NEXT: psrad $31, %xmm1
|
|
; SSSE3-NEXT: movdqa %xmm10, 16(%rsi)
|
|
; SSSE3-NEXT: movdqa %xmm9, (%rsi)
|
|
; SSSE3-NEXT: movdqa %xmm1, 112(%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm5, 96(%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm4, 80(%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm7, 64(%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm0, 48(%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm6, 32(%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm2, 16(%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm3, (%rdi)
|
|
; SSSE3-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: umulo_v32i8:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movq %rdi, %rax
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm4
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm7
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm4, %xmm7
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,255,255,255,255,255]
|
|
; SSE41-NEXT: pand %xmm9, %xmm7
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm5 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm6 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; SSE41-NEXT: pmullw %xmm5, %xmm6
|
|
; SSE41-NEXT: movdqa %xmm6, %xmm8
|
|
; SSE41-NEXT: pand %xmm9, %xmm8
|
|
; SSE41-NEXT: packuswb %xmm7, %xmm8
|
|
; SSE41-NEXT: movdqa %xmm3, %xmm7
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm5
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm7, %xmm5
|
|
; SSE41-NEXT: pand %xmm9, %xmm5
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm7 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; SSE41-NEXT: pmullw %xmm4, %xmm7
|
|
; SSE41-NEXT: pand %xmm7, %xmm9
|
|
; SSE41-NEXT: packuswb %xmm5, %xmm9
|
|
; SSE41-NEXT: pxor %xmm4, %xmm4
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15]
|
|
; SSE41-NEXT: pmullw %xmm3, %xmm1
|
|
; SSE41-NEXT: psrlw $8, %xmm1
|
|
; SSE41-NEXT: psrlw $8, %xmm7
|
|
; SSE41-NEXT: packuswb %xmm1, %xmm7
|
|
; SSE41-NEXT: pcmpeqb %xmm4, %xmm7
|
|
; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
|
|
; SSE41-NEXT: pxor %xmm1, %xmm7
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15]
|
|
; SSE41-NEXT: pmullw %xmm2, %xmm0
|
|
; SSE41-NEXT: psrlw $8, %xmm0
|
|
; SSE41-NEXT: psrlw $8, %xmm6
|
|
; SSE41-NEXT: packuswb %xmm0, %xmm6
|
|
; SSE41-NEXT: pcmpeqb %xmm4, %xmm6
|
|
; SSE41-NEXT: pxor %xmm1, %xmm6
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,1,1,1]
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm0
|
|
; SSE41-NEXT: psrad $31, %xmm0
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm6[2,3,2,3]
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm1
|
|
; SSE41-NEXT: psrad $31, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm6[3,3,3,3]
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm2
|
|
; SSE41-NEXT: psrad $31, %xmm2
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm7[1,1,1,1]
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm3
|
|
; SSE41-NEXT: psrad $31, %xmm3
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm7[2,3,2,3]
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm4
|
|
; SSE41-NEXT: psrad $31, %xmm4
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm7[3,3,3,3]
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm5
|
|
; SSE41-NEXT: psrad $31, %xmm5
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm6
|
|
; SSE41-NEXT: psrad $31, %xmm6
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm7
|
|
; SSE41-NEXT: psrad $31, %xmm7
|
|
; SSE41-NEXT: movdqa %xmm9, 16(%rsi)
|
|
; SSE41-NEXT: movdqa %xmm8, (%rsi)
|
|
; SSE41-NEXT: movdqa %xmm7, 64(%rdi)
|
|
; SSE41-NEXT: movdqa %xmm6, (%rdi)
|
|
; SSE41-NEXT: movdqa %xmm5, 112(%rdi)
|
|
; SSE41-NEXT: movdqa %xmm4, 96(%rdi)
|
|
; SSE41-NEXT: movdqa %xmm3, 80(%rdi)
|
|
; SSE41-NEXT: movdqa %xmm2, 48(%rdi)
|
|
; SSE41-NEXT: movdqa %xmm1, 32(%rdi)
|
|
; SSE41-NEXT: movdqa %xmm0, 16(%rdi)
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: umulo_v32i8:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
|
|
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm4, %xmm5, %xmm9
|
|
; AVX1-NEXT: vpand %xmm3, %xmm9, %xmm4
|
|
; AVX1-NEXT: vpackuswb %xmm2, %xmm4, %xmm8
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm7
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm7[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm5, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm6 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero,xmm7[4],zero,xmm7[5],zero,xmm7[6],zero,xmm7[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm5, %xmm6, %xmm6
|
|
; AVX1-NEXT: vpand %xmm3, %xmm6, %xmm3
|
|
; AVX1-NEXT: vpackuswb %xmm4, %xmm3, %xmm5
|
|
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm7[8],xmm3[8],xmm7[9],xmm3[9],xmm7[10],xmm3[10],xmm7[11],xmm3[11],xmm7[12],xmm3[12],xmm7[13],xmm3[13],xmm7[14],xmm3[14],xmm7[15],xmm3[15]
|
|
; AVX1-NEXT: vpmullw %xmm2, %xmm4, %xmm2
|
|
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrlw $8, %xmm6, %xmm4
|
|
; AVX1-NEXT: vpackuswb %xmm2, %xmm4, %xmm2
|
|
; AVX1-NEXT: vpcmpeqb %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm6
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrlw $8, %xmm9, %xmm1
|
|
; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0
|
|
; AVX1-NEXT: vpcmpeqb %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpmovsxbd %xmm1, %xmm0
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
|
|
; AVX1-NEXT: vpmovsxbd %xmm2, %xmm2
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
|
; AVX1-NEXT: vpmovsxbd %xmm6, %xmm2
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm6[1,1,1,1]
|
|
; AVX1-NEXT: vpmovsxbd %xmm3, %xmm3
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
|
|
; AVX1-NEXT: vpmovsxbd %xmm3, %xmm3
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
|
|
; AVX1-NEXT: vpmovsxbd %xmm1, %xmm1
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm6[2,3,2,3]
|
|
; AVX1-NEXT: vpmovsxbd %xmm3, %xmm3
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm6[3,3,3,3]
|
|
; AVX1-NEXT: vpmovsxbd %xmm4, %xmm4
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
|
|
; AVX1-NEXT: vmovdqa %xmm5, 16(%rdi)
|
|
; AVX1-NEXT: vmovdqa %xmm8, (%rdi)
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: umulo_v32i8:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX2-NEXT: vpmullw %ymm2, %ymm3, %ymm2
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
|
; AVX2-NEXT: vpand %ymm3, %ymm2, %ymm2
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX2-NEXT: vpmullw %ymm4, %ymm5, %ymm4
|
|
; AVX2-NEXT: vpand %ymm3, %ymm4, %ymm3
|
|
; AVX2-NEXT: vpackuswb %ymm2, %ymm3, %ymm4
|
|
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15],ymm1[24],ymm2[24],ymm1[25],ymm2[25],ymm1[26],ymm2[26],ymm1[27],ymm2[27],ymm1[28],ymm2[28],ymm1[29],ymm2[29],ymm1[30],ymm2[30],ymm1[31],ymm2[31]
|
|
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
|
|
; AVX2-NEXT: vpmullw %ymm3, %ymm5, %ymm3
|
|
; AVX2-NEXT: vpsrlw $8, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[16],ymm2[16],ymm1[17],ymm2[17],ymm1[18],ymm2[18],ymm1[19],ymm2[19],ymm1[20],ymm2[20],ymm1[21],ymm2[21],ymm1[22],ymm2[22],ymm1[23],ymm2[23]
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
|
|
; AVX2-NEXT: vpmullw %ymm1, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpackuswb %ymm3, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpcmpeqb %ymm2, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm1
|
|
; AVX2-NEXT: vpmovsxbd %xmm1, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
|
|
; AVX2-NEXT: vpmovsxbd %xmm3, %ymm2
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
|
|
; AVX2-NEXT: vpmovsxbd %xmm1, %ymm1
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
|
|
; AVX2-NEXT: vpmovsxbd %xmm3, %ymm3
|
|
; AVX2-NEXT: vmovdqa %ymm4, (%rdi)
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: umulo_v32i8:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
|
|
; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
|
|
; AVX512-NEXT: vpmullw %zmm1, %zmm0, %zmm2
|
|
; AVX512-NEXT: vpsrlw $8, %zmm2, %zmm0
|
|
; AVX512-NEXT: vpmovwb %zmm0, %ymm0
|
|
; AVX512-NEXT: vptestmb %ymm0, %ymm0, %k1
|
|
; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
|
|
; AVX512-NEXT: kshiftrd $16, %k1, %k1
|
|
; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
|
|
; AVX512-NEXT: vpmovwb %zmm2, (%rdi)
|
|
; AVX512-NEXT: retq
|
|
%t = call {<32 x i8>, <32 x i1>} @llvm.umul.with.overflow.v32i8(<32 x i8> %a0, <32 x i8> %a1)
|
|
%val = extractvalue {<32 x i8>, <32 x i1>} %t, 0
|
|
%obit = extractvalue {<32 x i8>, <32 x i1>} %t, 1
|
|
%res = sext <32 x i1> %obit to <32 x i32>
|
|
store <32 x i8> %val, <32 x i8>* %p2
|
|
ret <32 x i32> %res
|
|
}
|
|
|
|
define <64 x i32> @umulo_v64i8(<64 x i8> %a0, <64 x i8> %a1, <64 x i8>* %p2) nounwind {
|
|
; SSE2-LABEL: umulo_v64i8:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movq %rdi, %rax
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm8
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm9
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: pmullw %xmm8, %xmm9
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm12 = [255,255,255,255,255,255,255,255]
|
|
; SSE2-NEXT: pand %xmm12, %xmm9
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm10
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm8
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm10, %xmm8
|
|
; SSE2-NEXT: pand %xmm12, %xmm8
|
|
; SSE2-NEXT: packuswb %xmm9, %xmm8
|
|
; SSE2-NEXT: movdqa %xmm5, %xmm9
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm10
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: pmullw %xmm9, %xmm10
|
|
; SSE2-NEXT: pand %xmm12, %xmm10
|
|
; SSE2-NEXT: movdqa %xmm5, %xmm11
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm9
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm11, %xmm9
|
|
; SSE2-NEXT: pand %xmm12, %xmm9
|
|
; SSE2-NEXT: packuswb %xmm10, %xmm9
|
|
; SSE2-NEXT: movdqa %xmm6, %xmm10
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm11
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: pmullw %xmm10, %xmm11
|
|
; SSE2-NEXT: pand %xmm12, %xmm11
|
|
; SSE2-NEXT: movdqa %xmm6, %xmm13
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm10
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm13, %xmm10
|
|
; SSE2-NEXT: pand %xmm12, %xmm10
|
|
; SSE2-NEXT: packuswb %xmm11, %xmm10
|
|
; SSE2-NEXT: movdqa %xmm7, %xmm11
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm13
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: pmullw %xmm11, %xmm13
|
|
; SSE2-NEXT: pand %xmm12, %xmm13
|
|
; SSE2-NEXT: movdqa %xmm7, %xmm14
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm11
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pmullw %xmm14, %xmm11
|
|
; SSE2-NEXT: pand %xmm12, %xmm11
|
|
; SSE2-NEXT: packuswb %xmm13, %xmm11
|
|
; SSE2-NEXT: pxor %xmm12, %xmm12
|
|
; SSE2-NEXT: movdqa %xmm7, %xmm13
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm12[8],xmm13[9],xmm12[9],xmm13[10],xmm12[10],xmm13[11],xmm12[11],xmm13[12],xmm12[12],xmm13[13],xmm12[13],xmm13[14],xmm12[14],xmm13[15],xmm12[15]
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm14
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm14 = xmm14[8],xmm12[8],xmm14[9],xmm12[9],xmm14[10],xmm12[10],xmm14[11],xmm12[11],xmm14[12],xmm12[12],xmm14[13],xmm12[13],xmm14[14],xmm12[14],xmm14[15],xmm12[15]
|
|
; SSE2-NEXT: pmullw %xmm13, %xmm14
|
|
; SSE2-NEXT: psrlw $8, %xmm14
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm12[0],xmm7[1],xmm12[1],xmm7[2],xmm12[2],xmm7[3],xmm12[3],xmm7[4],xmm12[4],xmm7[5],xmm12[5],xmm7[6],xmm12[6],xmm7[7],xmm12[7]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm12[0],xmm3[1],xmm12[1],xmm3[2],xmm12[2],xmm3[3],xmm12[3],xmm3[4],xmm12[4],xmm3[5],xmm12[5],xmm3[6],xmm12[6],xmm3[7],xmm12[7]
|
|
; SSE2-NEXT: pmullw %xmm7, %xmm3
|
|
; SSE2-NEXT: psrlw $8, %xmm3
|
|
; SSE2-NEXT: packuswb %xmm14, %xmm3
|
|
; SSE2-NEXT: movdqa %xmm6, %xmm13
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm12[8],xmm13[9],xmm12[9],xmm13[10],xmm12[10],xmm13[11],xmm12[11],xmm13[12],xmm12[12],xmm13[13],xmm12[13],xmm13[14],xmm12[14],xmm13[15],xmm12[15]
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm7
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm12[8],xmm7[9],xmm12[9],xmm7[10],xmm12[10],xmm7[11],xmm12[11],xmm7[12],xmm12[12],xmm7[13],xmm12[13],xmm7[14],xmm12[14],xmm7[15],xmm12[15]
|
|
; SSE2-NEXT: pmullw %xmm13, %xmm7
|
|
; SSE2-NEXT: psrlw $8, %xmm7
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3],xmm6[4],xmm12[4],xmm6[5],xmm12[5],xmm6[6],xmm12[6],xmm6[7],xmm12[7]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7]
|
|
; SSE2-NEXT: pmullw %xmm6, %xmm2
|
|
; SSE2-NEXT: psrlw $8, %xmm2
|
|
; SSE2-NEXT: packuswb %xmm7, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm5, %xmm6
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm12[8],xmm6[9],xmm12[9],xmm6[10],xmm12[10],xmm6[11],xmm12[11],xmm6[12],xmm12[12],xmm6[13],xmm12[13],xmm6[14],xmm12[14],xmm6[15],xmm12[15]
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm7
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm12[8],xmm7[9],xmm12[9],xmm7[10],xmm12[10],xmm7[11],xmm12[11],xmm7[12],xmm12[12],xmm7[13],xmm12[13],xmm7[14],xmm12[14],xmm7[15],xmm12[15]
|
|
; SSE2-NEXT: pmullw %xmm6, %xmm7
|
|
; SSE2-NEXT: psrlw $8, %xmm7
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm12[0],xmm1[1],xmm12[1],xmm1[2],xmm12[2],xmm1[3],xmm12[3],xmm1[4],xmm12[4],xmm1[5],xmm12[5],xmm1[6],xmm12[6],xmm1[7],xmm12[7]
|
|
; SSE2-NEXT: pmullw %xmm5, %xmm1
|
|
; SSE2-NEXT: psrlw $8, %xmm1
|
|
; SSE2-NEXT: packuswb %xmm7, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm5
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm12[8],xmm5[9],xmm12[9],xmm5[10],xmm12[10],xmm5[11],xmm12[11],xmm5[12],xmm12[12],xmm5[13],xmm12[13],xmm5[14],xmm12[14],xmm5[15],xmm12[15]
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm6
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm12[8],xmm6[9],xmm12[9],xmm6[10],xmm12[10],xmm6[11],xmm12[11],xmm6[12],xmm12[12],xmm6[13],xmm12[13],xmm6[14],xmm12[14],xmm6[15],xmm12[15]
|
|
; SSE2-NEXT: pmullw %xmm5, %xmm6
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm12[0],xmm4[1],xmm12[1],xmm4[2],xmm12[2],xmm4[3],xmm12[3],xmm4[4],xmm12[4],xmm4[5],xmm12[5],xmm4[6],xmm12[6],xmm4[7],xmm12[7]
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3],xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7]
|
|
; SSE2-NEXT: pmullw %xmm4, %xmm0
|
|
; SSE2-NEXT: psrlw $8, %xmm6
|
|
; SSE2-NEXT: psrlw $8, %xmm0
|
|
; SSE2-NEXT: packuswb %xmm6, %xmm0
|
|
; SSE2-NEXT: pcmpeqb %xmm12, %xmm3
|
|
; SSE2-NEXT: pcmpeqb %xmm12, %xmm2
|
|
; SSE2-NEXT: pcmpeqb %xmm12, %xmm1
|
|
; SSE2-NEXT: pcmpeqb %xmm12, %xmm0
|
|
; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
|
|
; SSE2-NEXT: pxor %xmm4, %xmm3
|
|
; SSE2-NEXT: pxor %xmm4, %xmm2
|
|
; SSE2-NEXT: pxor %xmm4, %xmm1
|
|
; SSE2-NEXT: pxor %xmm4, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm11, 48(%rsi)
|
|
; SSE2-NEXT: movdqa %xmm10, 32(%rsi)
|
|
; SSE2-NEXT: movdqa %xmm9, 16(%rsi)
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm4
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: movdqa %xmm8, (%rsi)
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm5
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3]
|
|
; SSE2-NEXT: pslld $31, %xmm3
|
|
; SSE2-NEXT: psrad $31, %xmm3
|
|
; SSE2-NEXT: movdqa %xmm3, 224(%rdi)
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pslld $31, %xmm5
|
|
; SSE2-NEXT: psrad $31, %xmm5
|
|
; SSE2-NEXT: movdqa %xmm5, 240(%rdi)
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm3
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3]
|
|
; SSE2-NEXT: pslld $31, %xmm4
|
|
; SSE2-NEXT: psrad $31, %xmm4
|
|
; SSE2-NEXT: movdqa %xmm4, 192(%rdi)
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm4
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pslld $31, %xmm3
|
|
; SSE2-NEXT: psrad $31, %xmm3
|
|
; SSE2-NEXT: movdqa %xmm3, 208(%rdi)
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm3
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3]
|
|
; SSE2-NEXT: pslld $31, %xmm2
|
|
; SSE2-NEXT: psrad $31, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm2, 160(%rdi)
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pslld $31, %xmm3
|
|
; SSE2-NEXT: psrad $31, %xmm3
|
|
; SSE2-NEXT: movdqa %xmm3, 176(%rdi)
|
|
; SSE2-NEXT: movdqa %xmm4, %xmm2
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3]
|
|
; SSE2-NEXT: pslld $31, %xmm4
|
|
; SSE2-NEXT: psrad $31, %xmm4
|
|
; SSE2-NEXT: movdqa %xmm4, 128(%rdi)
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm3
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pslld $31, %xmm2
|
|
; SSE2-NEXT: psrad $31, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm2, 144(%rdi)
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
|
|
; SSE2-NEXT: pslld $31, %xmm1
|
|
; SSE2-NEXT: psrad $31, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, 96(%rdi)
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pslld $31, %xmm2
|
|
; SSE2-NEXT: psrad $31, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm2, 112(%rdi)
|
|
; SSE2-NEXT: movdqa %xmm3, %xmm1
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3]
|
|
; SSE2-NEXT: pslld $31, %xmm3
|
|
; SSE2-NEXT: psrad $31, %xmm3
|
|
; SSE2-NEXT: movdqa %xmm3, 64(%rdi)
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pslld $31, %xmm1
|
|
; SSE2-NEXT: psrad $31, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, 80(%rdi)
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
|
; SSE2-NEXT: pslld $31, %xmm0
|
|
; SSE2-NEXT: psrad $31, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm0, 32(%rdi)
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pslld $31, %xmm1
|
|
; SSE2-NEXT: psrad $31, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, 48(%rdi)
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm0
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3]
|
|
; SSE2-NEXT: pslld $31, %xmm2
|
|
; SSE2-NEXT: psrad $31, %xmm2
|
|
; SSE2-NEXT: movdqa %xmm2, (%rdi)
|
|
; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pslld $31, %xmm0
|
|
; SSE2-NEXT: psrad $31, %xmm0
|
|
; SSE2-NEXT: movdqa %xmm0, 16(%rdi)
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSSE3-LABEL: umulo_v64i8:
|
|
; SSSE3: # %bb.0:
|
|
; SSSE3-NEXT: movq %rdi, %rax
|
|
; SSSE3-NEXT: movdqa %xmm4, %xmm8
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm9
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSSE3-NEXT: pmullw %xmm8, %xmm9
|
|
; SSSE3-NEXT: movdqa {{.*#+}} xmm12 = [255,255,255,255,255,255,255,255]
|
|
; SSSE3-NEXT: pand %xmm12, %xmm9
|
|
; SSSE3-NEXT: movdqa %xmm4, %xmm10
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm8
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: pmullw %xmm10, %xmm8
|
|
; SSSE3-NEXT: pand %xmm12, %xmm8
|
|
; SSSE3-NEXT: packuswb %xmm9, %xmm8
|
|
; SSSE3-NEXT: movdqa %xmm5, %xmm9
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSSE3-NEXT: movdqa %xmm1, %xmm10
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSSE3-NEXT: pmullw %xmm9, %xmm10
|
|
; SSSE3-NEXT: pand %xmm12, %xmm10
|
|
; SSSE3-NEXT: movdqa %xmm5, %xmm11
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: movdqa %xmm1, %xmm9
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: pmullw %xmm11, %xmm9
|
|
; SSSE3-NEXT: pand %xmm12, %xmm9
|
|
; SSSE3-NEXT: packuswb %xmm10, %xmm9
|
|
; SSSE3-NEXT: movdqa %xmm6, %xmm10
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSSE3-NEXT: movdqa %xmm2, %xmm11
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSSE3-NEXT: pmullw %xmm10, %xmm11
|
|
; SSSE3-NEXT: pand %xmm12, %xmm11
|
|
; SSSE3-NEXT: movdqa %xmm6, %xmm13
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: movdqa %xmm2, %xmm10
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: pmullw %xmm13, %xmm10
|
|
; SSSE3-NEXT: pand %xmm12, %xmm10
|
|
; SSSE3-NEXT: packuswb %xmm11, %xmm10
|
|
; SSSE3-NEXT: movdqa %xmm7, %xmm11
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSSE3-NEXT: movdqa %xmm3, %xmm13
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSSE3-NEXT: pmullw %xmm11, %xmm13
|
|
; SSSE3-NEXT: pand %xmm12, %xmm13
|
|
; SSSE3-NEXT: movdqa %xmm7, %xmm14
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: movdqa %xmm3, %xmm11
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: pmullw %xmm14, %xmm11
|
|
; SSSE3-NEXT: pand %xmm12, %xmm11
|
|
; SSSE3-NEXT: packuswb %xmm13, %xmm11
|
|
; SSSE3-NEXT: pxor %xmm12, %xmm12
|
|
; SSSE3-NEXT: movdqa %xmm7, %xmm13
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm12[8],xmm13[9],xmm12[9],xmm13[10],xmm12[10],xmm13[11],xmm12[11],xmm13[12],xmm12[12],xmm13[13],xmm12[13],xmm13[14],xmm12[14],xmm13[15],xmm12[15]
|
|
; SSSE3-NEXT: movdqa %xmm3, %xmm14
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm14 = xmm14[8],xmm12[8],xmm14[9],xmm12[9],xmm14[10],xmm12[10],xmm14[11],xmm12[11],xmm14[12],xmm12[12],xmm14[13],xmm12[13],xmm14[14],xmm12[14],xmm14[15],xmm12[15]
|
|
; SSSE3-NEXT: pmullw %xmm13, %xmm14
|
|
; SSSE3-NEXT: psrlw $8, %xmm14
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm12[0],xmm7[1],xmm12[1],xmm7[2],xmm12[2],xmm7[3],xmm12[3],xmm7[4],xmm12[4],xmm7[5],xmm12[5],xmm7[6],xmm12[6],xmm7[7],xmm12[7]
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm12[0],xmm3[1],xmm12[1],xmm3[2],xmm12[2],xmm3[3],xmm12[3],xmm3[4],xmm12[4],xmm3[5],xmm12[5],xmm3[6],xmm12[6],xmm3[7],xmm12[7]
|
|
; SSSE3-NEXT: pmullw %xmm7, %xmm3
|
|
; SSSE3-NEXT: psrlw $8, %xmm3
|
|
; SSSE3-NEXT: packuswb %xmm14, %xmm3
|
|
; SSSE3-NEXT: movdqa %xmm6, %xmm13
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm12[8],xmm13[9],xmm12[9],xmm13[10],xmm12[10],xmm13[11],xmm12[11],xmm13[12],xmm12[12],xmm13[13],xmm12[13],xmm13[14],xmm12[14],xmm13[15],xmm12[15]
|
|
; SSSE3-NEXT: movdqa %xmm2, %xmm7
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm12[8],xmm7[9],xmm12[9],xmm7[10],xmm12[10],xmm7[11],xmm12[11],xmm7[12],xmm12[12],xmm7[13],xmm12[13],xmm7[14],xmm12[14],xmm7[15],xmm12[15]
|
|
; SSSE3-NEXT: pmullw %xmm13, %xmm7
|
|
; SSSE3-NEXT: psrlw $8, %xmm7
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3],xmm6[4],xmm12[4],xmm6[5],xmm12[5],xmm6[6],xmm12[6],xmm6[7],xmm12[7]
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7]
|
|
; SSSE3-NEXT: pmullw %xmm6, %xmm2
|
|
; SSSE3-NEXT: psrlw $8, %xmm2
|
|
; SSSE3-NEXT: packuswb %xmm7, %xmm2
|
|
; SSSE3-NEXT: movdqa %xmm5, %xmm6
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm12[8],xmm6[9],xmm12[9],xmm6[10],xmm12[10],xmm6[11],xmm12[11],xmm6[12],xmm12[12],xmm6[13],xmm12[13],xmm6[14],xmm12[14],xmm6[15],xmm12[15]
|
|
; SSSE3-NEXT: movdqa %xmm1, %xmm7
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm12[8],xmm7[9],xmm12[9],xmm7[10],xmm12[10],xmm7[11],xmm12[11],xmm7[12],xmm12[12],xmm7[13],xmm12[13],xmm7[14],xmm12[14],xmm7[15],xmm12[15]
|
|
; SSSE3-NEXT: pmullw %xmm6, %xmm7
|
|
; SSSE3-NEXT: psrlw $8, %xmm7
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7]
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm12[0],xmm1[1],xmm12[1],xmm1[2],xmm12[2],xmm1[3],xmm12[3],xmm1[4],xmm12[4],xmm1[5],xmm12[5],xmm1[6],xmm12[6],xmm1[7],xmm12[7]
|
|
; SSSE3-NEXT: pmullw %xmm5, %xmm1
|
|
; SSSE3-NEXT: psrlw $8, %xmm1
|
|
; SSSE3-NEXT: packuswb %xmm7, %xmm1
|
|
; SSSE3-NEXT: movdqa %xmm4, %xmm5
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm12[8],xmm5[9],xmm12[9],xmm5[10],xmm12[10],xmm5[11],xmm12[11],xmm5[12],xmm12[12],xmm5[13],xmm12[13],xmm5[14],xmm12[14],xmm5[15],xmm12[15]
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm6
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm12[8],xmm6[9],xmm12[9],xmm6[10],xmm12[10],xmm6[11],xmm12[11],xmm6[12],xmm12[12],xmm6[13],xmm12[13],xmm6[14],xmm12[14],xmm6[15],xmm12[15]
|
|
; SSSE3-NEXT: pmullw %xmm5, %xmm6
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm12[0],xmm4[1],xmm12[1],xmm4[2],xmm12[2],xmm4[3],xmm12[3],xmm4[4],xmm12[4],xmm4[5],xmm12[5],xmm4[6],xmm12[6],xmm4[7],xmm12[7]
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3],xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7]
|
|
; SSSE3-NEXT: pmullw %xmm4, %xmm0
|
|
; SSSE3-NEXT: psrlw $8, %xmm6
|
|
; SSSE3-NEXT: psrlw $8, %xmm0
|
|
; SSSE3-NEXT: packuswb %xmm6, %xmm0
|
|
; SSSE3-NEXT: pcmpeqb %xmm12, %xmm3
|
|
; SSSE3-NEXT: pcmpeqb %xmm12, %xmm2
|
|
; SSSE3-NEXT: pcmpeqb %xmm12, %xmm1
|
|
; SSSE3-NEXT: pcmpeqb %xmm12, %xmm0
|
|
; SSSE3-NEXT: pcmpeqd %xmm4, %xmm4
|
|
; SSSE3-NEXT: pxor %xmm4, %xmm3
|
|
; SSSE3-NEXT: pxor %xmm4, %xmm2
|
|
; SSSE3-NEXT: pxor %xmm4, %xmm1
|
|
; SSSE3-NEXT: pxor %xmm4, %xmm0
|
|
; SSSE3-NEXT: movdqa %xmm11, 48(%rsi)
|
|
; SSSE3-NEXT: movdqa %xmm10, 32(%rsi)
|
|
; SSSE3-NEXT: movdqa %xmm9, 16(%rsi)
|
|
; SSSE3-NEXT: movdqa %xmm3, %xmm4
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSSE3-NEXT: movdqa %xmm8, (%rsi)
|
|
; SSSE3-NEXT: movdqa %xmm3, %xmm5
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3]
|
|
; SSSE3-NEXT: pslld $31, %xmm3
|
|
; SSSE3-NEXT: psrad $31, %xmm3
|
|
; SSSE3-NEXT: movdqa %xmm3, 224(%rdi)
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: pslld $31, %xmm5
|
|
; SSSE3-NEXT: psrad $31, %xmm5
|
|
; SSSE3-NEXT: movdqa %xmm5, 240(%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm4, %xmm3
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3]
|
|
; SSSE3-NEXT: pslld $31, %xmm4
|
|
; SSSE3-NEXT: psrad $31, %xmm4
|
|
; SSSE3-NEXT: movdqa %xmm4, 192(%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm2, %xmm4
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: pslld $31, %xmm3
|
|
; SSSE3-NEXT: psrad $31, %xmm3
|
|
; SSSE3-NEXT: movdqa %xmm3, 208(%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm2, %xmm3
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3]
|
|
; SSSE3-NEXT: pslld $31, %xmm2
|
|
; SSSE3-NEXT: psrad $31, %xmm2
|
|
; SSSE3-NEXT: movdqa %xmm2, 160(%rdi)
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: pslld $31, %xmm3
|
|
; SSSE3-NEXT: psrad $31, %xmm3
|
|
; SSSE3-NEXT: movdqa %xmm3, 176(%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm4, %xmm2
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3]
|
|
; SSSE3-NEXT: pslld $31, %xmm4
|
|
; SSSE3-NEXT: psrad $31, %xmm4
|
|
; SSSE3-NEXT: movdqa %xmm4, 128(%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm1, %xmm3
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: pslld $31, %xmm2
|
|
; SSSE3-NEXT: psrad $31, %xmm2
|
|
; SSSE3-NEXT: movdqa %xmm2, 144(%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm1, %xmm2
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
|
|
; SSSE3-NEXT: pslld $31, %xmm1
|
|
; SSSE3-NEXT: psrad $31, %xmm1
|
|
; SSSE3-NEXT: movdqa %xmm1, 96(%rdi)
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: pslld $31, %xmm2
|
|
; SSSE3-NEXT: psrad $31, %xmm2
|
|
; SSSE3-NEXT: movdqa %xmm2, 112(%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm3, %xmm1
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3]
|
|
; SSSE3-NEXT: pslld $31, %xmm3
|
|
; SSSE3-NEXT: psrad $31, %xmm3
|
|
; SSSE3-NEXT: movdqa %xmm3, 64(%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm2
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: pslld $31, %xmm1
|
|
; SSSE3-NEXT: psrad $31, %xmm1
|
|
; SSSE3-NEXT: movdqa %xmm1, 80(%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm1
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
|
; SSSE3-NEXT: pslld $31, %xmm0
|
|
; SSSE3-NEXT: psrad $31, %xmm0
|
|
; SSSE3-NEXT: movdqa %xmm0, 32(%rdi)
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: pslld $31, %xmm1
|
|
; SSSE3-NEXT: psrad $31, %xmm1
|
|
; SSSE3-NEXT: movdqa %xmm1, 48(%rdi)
|
|
; SSSE3-NEXT: movdqa %xmm2, %xmm0
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3]
|
|
; SSSE3-NEXT: pslld $31, %xmm2
|
|
; SSSE3-NEXT: psrad $31, %xmm2
|
|
; SSSE3-NEXT: movdqa %xmm2, (%rdi)
|
|
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: pslld $31, %xmm0
|
|
; SSSE3-NEXT: psrad $31, %xmm0
|
|
; SSSE3-NEXT: movdqa %xmm0, 16(%rdi)
|
|
; SSSE3-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: umulo_v64i8:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movdqa %xmm5, %xmm11
|
|
; SSE41-NEXT: movdqa %xmm4, %xmm13
|
|
; SSE41-NEXT: movdqa %xmm3, %xmm4
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm5
|
|
; SSE41-NEXT: movq %rdi, %rax
|
|
; SSE41-NEXT: movdqa %xmm13, %xmm8
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm8, %xmm0
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm10 = [255,255,255,255,255,255,255,255]
|
|
; SSE41-NEXT: pand %xmm10, %xmm0
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm9 = xmm13[0],zero,xmm13[1],zero,xmm13[2],zero,xmm13[3],zero,xmm13[4],zero,xmm13[5],zero,xmm13[6],zero,xmm13[7],zero
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm8 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
|
|
; SSE41-NEXT: pmullw %xmm9, %xmm8
|
|
; SSE41-NEXT: movdqa %xmm8, %xmm3
|
|
; SSE41-NEXT: pand %xmm10, %xmm3
|
|
; SSE41-NEXT: packuswb %xmm0, %xmm3
|
|
; SSE41-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; SSE41-NEXT: movdqa %xmm11, %xmm9
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm9, %xmm0
|
|
; SSE41-NEXT: pand %xmm10, %xmm0
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm12 = xmm11[0],zero,xmm11[1],zero,xmm11[2],zero,xmm11[3],zero,xmm11[4],zero,xmm11[5],zero,xmm11[6],zero,xmm11[7],zero
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm9 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; SSE41-NEXT: pmullw %xmm12, %xmm9
|
|
; SSE41-NEXT: movdqa %xmm9, %xmm3
|
|
; SSE41-NEXT: pand %xmm10, %xmm3
|
|
; SSE41-NEXT: packuswb %xmm0, %xmm3
|
|
; SSE41-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; SSE41-NEXT: movdqa %xmm6, %xmm12
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm0
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm12, %xmm0
|
|
; SSE41-NEXT: pand %xmm10, %xmm0
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm14 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm12 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
; SSE41-NEXT: pmullw %xmm14, %xmm12
|
|
; SSE41-NEXT: movdqa %xmm12, %xmm15
|
|
; SSE41-NEXT: pand %xmm10, %xmm15
|
|
; SSE41-NEXT: packuswb %xmm0, %xmm15
|
|
; SSE41-NEXT: movdqa %xmm7, %xmm0
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: movdqa %xmm4, %xmm3
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm3
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero,xmm7[4],zero,xmm7[5],zero,xmm7[6],zero,xmm7[7],zero
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm14 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
|
|
; SSE41-NEXT: pmullw %xmm0, %xmm14
|
|
; SSE41-NEXT: pand %xmm10, %xmm3
|
|
; SSE41-NEXT: pand %xmm14, %xmm10
|
|
; SSE41-NEXT: packuswb %xmm3, %xmm10
|
|
; SSE41-NEXT: pxor %xmm0, %xmm0
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm0[8],xmm7[9],xmm0[9],xmm7[10],xmm0[10],xmm7[11],xmm0[11],xmm7[12],xmm0[12],xmm7[13],xmm0[13],xmm7[14],xmm0[14],xmm7[15],xmm0[15]
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
|
|
; SSE41-NEXT: pmullw %xmm7, %xmm4
|
|
; SSE41-NEXT: psrlw $8, %xmm4
|
|
; SSE41-NEXT: psrlw $8, %xmm14
|
|
; SSE41-NEXT: packuswb %xmm4, %xmm14
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
|
|
; SSE41-NEXT: pmullw %xmm6, %xmm2
|
|
; SSE41-NEXT: psrlw $8, %xmm2
|
|
; SSE41-NEXT: psrlw $8, %xmm12
|
|
; SSE41-NEXT: packuswb %xmm2, %xmm12
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm0[8],xmm11[9],xmm0[9],xmm11[10],xmm0[10],xmm11[11],xmm0[11],xmm11[12],xmm0[12],xmm11[13],xmm0[13],xmm11[14],xmm0[14],xmm11[15],xmm0[15]
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
|
|
; SSE41-NEXT: pmullw %xmm11, %xmm1
|
|
; SSE41-NEXT: psrlw $8, %xmm1
|
|
; SSE41-NEXT: psrlw $8, %xmm9
|
|
; SSE41-NEXT: packuswb %xmm1, %xmm9
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm0[8],xmm13[9],xmm0[9],xmm13[10],xmm0[10],xmm13[11],xmm0[11],xmm13[12],xmm0[12],xmm13[13],xmm0[13],xmm13[14],xmm0[14],xmm13[15],xmm0[15]
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
|
|
; SSE41-NEXT: pmullw %xmm13, %xmm5
|
|
; SSE41-NEXT: psrlw $8, %xmm5
|
|
; SSE41-NEXT: psrlw $8, %xmm8
|
|
; SSE41-NEXT: packuswb %xmm5, %xmm8
|
|
; SSE41-NEXT: pcmpeqb %xmm0, %xmm14
|
|
; SSE41-NEXT: pcmpeqb %xmm0, %xmm12
|
|
; SSE41-NEXT: pcmpeqb %xmm0, %xmm9
|
|
; SSE41-NEXT: pcmpeqb %xmm0, %xmm8
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm0
|
|
; SSE41-NEXT: pxor %xmm0, %xmm14
|
|
; SSE41-NEXT: pxor %xmm0, %xmm12
|
|
; SSE41-NEXT: pxor %xmm0, %xmm9
|
|
; SSE41-NEXT: pxor %xmm0, %xmm8
|
|
; SSE41-NEXT: movdqa %xmm10, 48(%rsi)
|
|
; SSE41-NEXT: movdqa %xmm15, 32(%rsi)
|
|
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; SSE41-NEXT: movaps %xmm0, 16(%rsi)
|
|
; SSE41-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; SSE41-NEXT: movaps %xmm0, (%rsi)
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm14[0],zero,zero,zero,xmm14[1],zero,zero,zero,xmm14[2],zero,zero,zero,xmm14[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm0
|
|
; SSE41-NEXT: psrad $31, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, 192(%rdi)
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm12[0],zero,zero,zero,xmm12[1],zero,zero,zero,xmm12[2],zero,zero,zero,xmm12[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm0
|
|
; SSE41-NEXT: psrad $31, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, 128(%rdi)
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm0
|
|
; SSE41-NEXT: psrad $31, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, 64(%rdi)
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm0
|
|
; SSE41-NEXT: psrad $31, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, (%rdi)
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm14[2,3,2,3]
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm0
|
|
; SSE41-NEXT: psrad $31, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, 224(%rdi)
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm14[3,3,3,3]
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm0
|
|
; SSE41-NEXT: psrad $31, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, 240(%rdi)
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm14[1,1,1,1]
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm0
|
|
; SSE41-NEXT: psrad $31, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, 208(%rdi)
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm12[2,3,2,3]
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm0
|
|
; SSE41-NEXT: psrad $31, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, 160(%rdi)
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm12[3,3,3,3]
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm0
|
|
; SSE41-NEXT: psrad $31, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, 176(%rdi)
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm12[1,1,1,1]
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm0
|
|
; SSE41-NEXT: psrad $31, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, 144(%rdi)
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[2,3,2,3]
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm0
|
|
; SSE41-NEXT: psrad $31, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, 96(%rdi)
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[3,3,3,3]
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm0
|
|
; SSE41-NEXT: psrad $31, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, 112(%rdi)
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm9[1,1,1,1]
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm0
|
|
; SSE41-NEXT: psrad $31, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, 80(%rdi)
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm8[2,3,2,3]
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm0
|
|
; SSE41-NEXT: psrad $31, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, 32(%rdi)
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm8[3,3,3,3]
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm0
|
|
; SSE41-NEXT: psrad $31, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, 48(%rdi)
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm8[1,1,1,1]
|
|
; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
|
|
; SSE41-NEXT: pslld $31, %xmm0
|
|
; SSE41-NEXT: psrad $31, %xmm0
|
|
; SSE41-NEXT: movdqa %xmm0, 16(%rdi)
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: umulo_v64i8:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovdqa %ymm2, %ymm10
|
|
; AVX1-NEXT: vmovdqa %ymm0, %ymm2
|
|
; AVX1-NEXT: movq %rdi, %rax
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm10[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm4, %xmm5, %xmm4
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255]
|
|
; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm10[0],zero,xmm10[1],zero,xmm10[2],zero,xmm10[3],zero,xmm10[4],zero,xmm10[5],zero,xmm10[6],zero,xmm10[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm6 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm5, %xmm6, %xmm12
|
|
; AVX1-NEXT: vpand %xmm7, %xmm12, %xmm5
|
|
; AVX1-NEXT: vpackuswb %xmm4, %xmm5, %xmm0
|
|
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vextractf128 $1, %ymm10, %xmm13
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm13[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm14
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm14[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm4, %xmm5, %xmm4
|
|
; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm5
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm13[0],zero,xmm13[1],zero,xmm13[2],zero,xmm13[3],zero,xmm13[4],zero,xmm13[5],zero,xmm13[6],zero,xmm13[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm6 = xmm14[0],zero,xmm14[1],zero,xmm14[2],zero,xmm14[3],zero,xmm14[4],zero,xmm14[5],zero,xmm14[6],zero,xmm14[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm4, %xmm6, %xmm15
|
|
; AVX1-NEXT: vpand %xmm7, %xmm15, %xmm6
|
|
; AVX1-NEXT: vpackuswb %xmm5, %xmm6, %xmm0
|
|
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm5, %xmm6, %xmm5
|
|
; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm5
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm6 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm6, %xmm4, %xmm0
|
|
; AVX1-NEXT: vpand %xmm7, %xmm0, %xmm6
|
|
; AVX1-NEXT: vpackuswb %xmm5, %xmm6, %xmm4
|
|
; AVX1-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm5
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm11
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm8 = xmm11[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
; AVX1-NEXT: vpmullw %xmm6, %xmm8, %xmm8
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm6 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
|
|
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm9 = xmm11[0],zero,xmm11[1],zero,xmm11[2],zero,xmm11[3],zero,xmm11[4],zero,xmm11[5],zero,xmm11[6],zero,xmm11[7],zero
|
|
; AVX1-NEXT: vpmullw %xmm6, %xmm9, %xmm6
|
|
; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm8
|
|
; AVX1-NEXT: vpand %xmm7, %xmm6, %xmm7
|
|
; AVX1-NEXT: vpackuswb %xmm8, %xmm7, %xmm8
|
|
; AVX1-NEXT: vpxor %xmm7, %xmm7, %xmm7
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm5[8],xmm7[8],xmm5[9],xmm7[9],xmm5[10],xmm7[10],xmm5[11],xmm7[11],xmm5[12],xmm7[12],xmm5[13],xmm7[13],xmm5[14],xmm7[14],xmm5[15],xmm7[15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm11[8],xmm7[8],xmm11[9],xmm7[9],xmm11[10],xmm7[10],xmm11[11],xmm7[11],xmm11[12],xmm7[12],xmm11[13],xmm7[13],xmm11[14],xmm7[14],xmm11[15],xmm7[15]
|
|
; AVX1-NEXT: vpmullw %xmm5, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpsrlw $8, %xmm6, %xmm5
|
|
; AVX1-NEXT: vpackuswb %xmm4, %xmm5, %xmm4
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm7[8],xmm3[9],xmm7[9],xmm3[10],xmm7[10],xmm3[11],xmm7[11],xmm3[12],xmm7[12],xmm3[13],xmm7[13],xmm3[14],xmm7[14],xmm3[15],xmm7[15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm7[8],xmm1[9],xmm7[9],xmm1[10],xmm7[10],xmm1[11],xmm7[11],xmm1[12],xmm7[12],xmm1[13],xmm7[13],xmm1[14],xmm7[14],xmm1[15],xmm7[15]
|
|
; AVX1-NEXT: vpmullw %xmm3, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm13[8],xmm7[8],xmm13[9],xmm7[9],xmm13[10],xmm7[10],xmm13[11],xmm7[11],xmm13[12],xmm7[12],xmm13[13],xmm7[13],xmm13[14],xmm7[14],xmm13[15],xmm7[15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm14[8],xmm7[8],xmm14[9],xmm7[9],xmm14[10],xmm7[10],xmm14[11],xmm7[11],xmm14[12],xmm7[12],xmm14[13],xmm7[13],xmm14[14],xmm7[14],xmm14[15],xmm7[15]
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm1
|
|
; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpsrlw $8, %xmm15, %xmm3
|
|
; AVX1-NEXT: vpackuswb %xmm1, %xmm3, %xmm1
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm10[8],xmm7[8],xmm10[9],xmm7[9],xmm10[10],xmm7[10],xmm10[11],xmm7[11],xmm10[12],xmm7[12],xmm10[13],xmm7[13],xmm10[14],xmm7[14],xmm10[15],xmm7[15]
|
|
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm7[8],xmm2[9],xmm7[9],xmm2[10],xmm7[10],xmm2[11],xmm7[11],xmm2[12],xmm7[12],xmm2[13],xmm7[13],xmm2[14],xmm7[14],xmm2[15],xmm7[15]
|
|
; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpsrlw $8, %xmm12, %xmm3
|
|
; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpcmpeqb %xmm7, %xmm4, %xmm3
|
|
; AVX1-NEXT: vpcmpeqb %xmm7, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpcmpeqb %xmm7, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpcmpeqb %xmm7, %xmm2, %xmm4
|
|
; AVX1-NEXT: vpcmpeqd %xmm5, %xmm5, %xmm5
|
|
; AVX1-NEXT: vpxor %xmm5, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm0
|
|
; AVX1-NEXT: vmovdqa %xmm8, 48(%rsi)
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
|
|
; AVX1-NEXT: vmovaps %xmm4, 32(%rsi)
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
|
|
; AVX1-NEXT: vmovaps %xmm4, 16(%rsi)
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
|
|
; AVX1-NEXT: vmovaps %xmm4, (%rsi)
|
|
; AVX1-NEXT: vpmovsxbd %xmm3, %xmm4
|
|
; AVX1-NEXT: vmovdqa %xmm4, 192(%rdi)
|
|
; AVX1-NEXT: vpmovsxbd %xmm2, %xmm4
|
|
; AVX1-NEXT: vmovdqa %xmm4, 128(%rdi)
|
|
; AVX1-NEXT: vpmovsxbd %xmm1, %xmm4
|
|
; AVX1-NEXT: vmovdqa %xmm4, 64(%rdi)
|
|
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm4
|
|
; AVX1-NEXT: vmovdqa %xmm4, (%rdi)
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[2,3,2,3]
|
|
; AVX1-NEXT: vpmovsxbd %xmm4, %xmm4
|
|
; AVX1-NEXT: vmovdqa %xmm4, 224(%rdi)
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[3,3,3,3]
|
|
; AVX1-NEXT: vpmovsxbd %xmm4, %xmm4
|
|
; AVX1-NEXT: vmovdqa %xmm4, 240(%rdi)
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,1,1]
|
|
; AVX1-NEXT: vpmovsxbd %xmm3, %xmm3
|
|
; AVX1-NEXT: vmovdqa %xmm3, 208(%rdi)
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
|
|
; AVX1-NEXT: vpmovsxbd %xmm3, %xmm3
|
|
; AVX1-NEXT: vmovdqa %xmm3, 160(%rdi)
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[3,3,3,3]
|
|
; AVX1-NEXT: vpmovsxbd %xmm3, %xmm3
|
|
; AVX1-NEXT: vmovdqa %xmm3, 176(%rdi)
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,1,1]
|
|
; AVX1-NEXT: vpmovsxbd %xmm2, %xmm2
|
|
; AVX1-NEXT: vmovdqa %xmm2, 144(%rdi)
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
|
|
; AVX1-NEXT: vpmovsxbd %xmm2, %xmm2
|
|
; AVX1-NEXT: vmovdqa %xmm2, 96(%rdi)
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
|
|
; AVX1-NEXT: vpmovsxbd %xmm2, %xmm2
|
|
; AVX1-NEXT: vmovdqa %xmm2, 112(%rdi)
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
|
|
; AVX1-NEXT: vpmovsxbd %xmm1, %xmm1
|
|
; AVX1-NEXT: vmovdqa %xmm1, 80(%rdi)
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX1-NEXT: vpmovsxbd %xmm1, %xmm1
|
|
; AVX1-NEXT: vmovdqa %xmm1, 32(%rdi)
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[3,3,3,3]
|
|
; AVX1-NEXT: vpmovsxbd %xmm1, %xmm1
|
|
; AVX1-NEXT: vmovdqa %xmm1, 48(%rdi)
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
|
|
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovdqa %xmm0, 16(%rdi)
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: umulo_v64i8:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: movq %rdi, %rax
|
|
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX2-NEXT: vpmullw %ymm4, %ymm5, %ymm4
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
|
; AVX2-NEXT: vpand %ymm5, %ymm4, %ymm4
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm6 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm7 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX2-NEXT: vpmullw %ymm6, %ymm7, %ymm6
|
|
; AVX2-NEXT: vpand %ymm5, %ymm6, %ymm6
|
|
; AVX2-NEXT: vpackuswb %ymm4, %ymm6, %ymm9
|
|
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm7 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
|
; AVX2-NEXT: vpmullw %ymm6, %ymm7, %ymm6
|
|
; AVX2-NEXT: vpand %ymm5, %ymm6, %ymm6
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm7 = ymm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm8 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
|
; AVX2-NEXT: vpmullw %ymm7, %ymm8, %ymm7
|
|
; AVX2-NEXT: vpand %ymm5, %ymm7, %ymm5
|
|
; AVX2-NEXT: vpackuswb %ymm6, %ymm5, %ymm10
|
|
; AVX2-NEXT: vpxor %xmm6, %xmm6, %xmm6
|
|
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm7 = ymm3[8],ymm6[8],ymm3[9],ymm6[9],ymm3[10],ymm6[10],ymm3[11],ymm6[11],ymm3[12],ymm6[12],ymm3[13],ymm6[13],ymm3[14],ymm6[14],ymm3[15],ymm6[15],ymm3[24],ymm6[24],ymm3[25],ymm6[25],ymm3[26],ymm6[26],ymm3[27],ymm6[27],ymm3[28],ymm6[28],ymm3[29],ymm6[29],ymm3[30],ymm6[30],ymm3[31],ymm6[31]
|
|
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm8 = ymm1[8],ymm6[8],ymm1[9],ymm6[9],ymm1[10],ymm6[10],ymm1[11],ymm6[11],ymm1[12],ymm6[12],ymm1[13],ymm6[13],ymm1[14],ymm6[14],ymm1[15],ymm6[15],ymm1[24],ymm6[24],ymm1[25],ymm6[25],ymm1[26],ymm6[26],ymm1[27],ymm6[27],ymm1[28],ymm6[28],ymm1[29],ymm6[29],ymm1[30],ymm6[30],ymm1[31],ymm6[31]
|
|
; AVX2-NEXT: vpmullw %ymm7, %ymm8, %ymm7
|
|
; AVX2-NEXT: vpsrlw $8, %ymm7, %ymm7
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm6[0],ymm3[1],ymm6[1],ymm3[2],ymm6[2],ymm3[3],ymm6[3],ymm3[4],ymm6[4],ymm3[5],ymm6[5],ymm3[6],ymm6[6],ymm3[7],ymm6[7],ymm3[16],ymm6[16],ymm3[17],ymm6[17],ymm3[18],ymm6[18],ymm3[19],ymm6[19],ymm3[20],ymm6[20],ymm3[21],ymm6[21],ymm3[22],ymm6[22],ymm3[23],ymm6[23]
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm6[0],ymm1[1],ymm6[1],ymm1[2],ymm6[2],ymm1[3],ymm6[3],ymm1[4],ymm6[4],ymm1[5],ymm6[5],ymm1[6],ymm6[6],ymm1[7],ymm6[7],ymm1[16],ymm6[16],ymm1[17],ymm6[17],ymm1[18],ymm6[18],ymm1[19],ymm6[19],ymm1[20],ymm6[20],ymm1[21],ymm6[21],ymm1[22],ymm6[22],ymm1[23],ymm6[23]
|
|
; AVX2-NEXT: vpmullw %ymm3, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpackuswb %ymm7, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpcmpeqb %ymm6, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
|
|
; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
|
|
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm7 = ymm2[8],ymm6[8],ymm2[9],ymm6[9],ymm2[10],ymm6[10],ymm2[11],ymm6[11],ymm2[12],ymm6[12],ymm2[13],ymm6[13],ymm2[14],ymm6[14],ymm2[15],ymm6[15],ymm2[24],ymm6[24],ymm2[25],ymm6[25],ymm2[26],ymm6[26],ymm2[27],ymm6[27],ymm2[28],ymm6[28],ymm2[29],ymm6[29],ymm2[30],ymm6[30],ymm2[31],ymm6[31]
|
|
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm8 = ymm0[8],ymm6[8],ymm0[9],ymm6[9],ymm0[10],ymm6[10],ymm0[11],ymm6[11],ymm0[12],ymm6[12],ymm0[13],ymm6[13],ymm0[14],ymm6[14],ymm0[15],ymm6[15],ymm0[24],ymm6[24],ymm0[25],ymm6[25],ymm0[26],ymm6[26],ymm0[27],ymm6[27],ymm0[28],ymm6[28],ymm0[29],ymm6[29],ymm0[30],ymm6[30],ymm0[31],ymm6[31]
|
|
; AVX2-NEXT: vpmullw %ymm7, %ymm8, %ymm7
|
|
; AVX2-NEXT: vpsrlw $8, %ymm7, %ymm7
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm6[0],ymm2[1],ymm6[1],ymm2[2],ymm6[2],ymm2[3],ymm6[3],ymm2[4],ymm6[4],ymm2[5],ymm6[5],ymm2[6],ymm6[6],ymm2[7],ymm6[7],ymm2[16],ymm6[16],ymm2[17],ymm6[17],ymm2[18],ymm6[18],ymm2[19],ymm6[19],ymm2[20],ymm6[20],ymm2[21],ymm6[21],ymm2[22],ymm6[22],ymm2[23],ymm6[23]
|
|
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm6[0],ymm0[1],ymm6[1],ymm0[2],ymm6[2],ymm0[3],ymm6[3],ymm0[4],ymm6[4],ymm0[5],ymm6[5],ymm0[6],ymm6[6],ymm0[7],ymm6[7],ymm0[16],ymm6[16],ymm0[17],ymm6[17],ymm0[18],ymm6[18],ymm0[19],ymm6[19],ymm0[20],ymm6[20],ymm0[21],ymm6[21],ymm0[22],ymm6[22],ymm0[23],ymm6[23]
|
|
; AVX2-NEXT: vpmullw %ymm2, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpackuswb %ymm7, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpcmpeqb %ymm6, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpxor %ymm3, %ymm0, %ymm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
|
|
; AVX2-NEXT: vpmovsxbd %xmm2, %ymm2
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[2,3,2,3]
|
|
; AVX2-NEXT: vpmovsxbd %xmm6, %ymm6
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm7 = xmm1[2,3,2,3]
|
|
; AVX2-NEXT: vpmovsxbd %xmm7, %ymm7
|
|
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[2,3,2,3]
|
|
; AVX2-NEXT: vpmovsxbd %xmm5, %ymm5
|
|
; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0
|
|
; AVX2-NEXT: vpmovsxbd %xmm3, %ymm3
|
|
; AVX2-NEXT: vpmovsxbd %xmm1, %ymm1
|
|
; AVX2-NEXT: vpmovsxbd %xmm4, %ymm4
|
|
; AVX2-NEXT: vmovdqa %ymm10, 32(%rsi)
|
|
; AVX2-NEXT: vmovdqa %ymm9, (%rsi)
|
|
; AVX2-NEXT: vmovdqa %ymm4, 192(%rdi)
|
|
; AVX2-NEXT: vmovdqa %ymm1, 128(%rdi)
|
|
; AVX2-NEXT: vmovdqa %ymm3, 64(%rdi)
|
|
; AVX2-NEXT: vmovdqa %ymm0, (%rdi)
|
|
; AVX2-NEXT: vmovdqa %ymm5, 224(%rdi)
|
|
; AVX2-NEXT: vmovdqa %ymm7, 160(%rdi)
|
|
; AVX2-NEXT: vmovdqa %ymm6, 96(%rdi)
|
|
; AVX2-NEXT: vmovdqa %ymm2, 32(%rdi)
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: umulo_v64i8:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
|
; AVX512-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
|
; AVX512-NEXT: vpmullw %zmm2, %zmm3, %zmm2
|
|
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
|
; AVX512-NEXT: vpandq %zmm3, %zmm2, %zmm2
|
|
; AVX512-NEXT: vpunpcklbw {{.*#+}} zmm4 = zmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
|
|
; AVX512-NEXT: vpunpcklbw {{.*#+}} zmm5 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
|
|
; AVX512-NEXT: vpmullw %zmm4, %zmm5, %zmm4
|
|
; AVX512-NEXT: vpandq %zmm3, %zmm4, %zmm3
|
|
; AVX512-NEXT: vpackuswb %zmm2, %zmm3, %zmm4
|
|
; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
|
; AVX512-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm1[8],zmm2[8],zmm1[9],zmm2[9],zmm1[10],zmm2[10],zmm1[11],zmm2[11],zmm1[12],zmm2[12],zmm1[13],zmm2[13],zmm1[14],zmm2[14],zmm1[15],zmm2[15],zmm1[24],zmm2[24],zmm1[25],zmm2[25],zmm1[26],zmm2[26],zmm1[27],zmm2[27],zmm1[28],zmm2[28],zmm1[29],zmm2[29],zmm1[30],zmm2[30],zmm1[31],zmm2[31],zmm1[40],zmm2[40],zmm1[41],zmm2[41],zmm1[42],zmm2[42],zmm1[43],zmm2[43],zmm1[44],zmm2[44],zmm1[45],zmm2[45],zmm1[46],zmm2[46],zmm1[47],zmm2[47],zmm1[56],zmm2[56],zmm1[57],zmm2[57],zmm1[58],zmm2[58],zmm1[59],zmm2[59],zmm1[60],zmm2[60],zmm1[61],zmm2[61],zmm1[62],zmm2[62],zmm1[63],zmm2[63]
|
|
; AVX512-NEXT: vpunpckhbw {{.*#+}} zmm5 = zmm0[8],zmm2[8],zmm0[9],zmm2[9],zmm0[10],zmm2[10],zmm0[11],zmm2[11],zmm0[12],zmm2[12],zmm0[13],zmm2[13],zmm0[14],zmm2[14],zmm0[15],zmm2[15],zmm0[24],zmm2[24],zmm0[25],zmm2[25],zmm0[26],zmm2[26],zmm0[27],zmm2[27],zmm0[28],zmm2[28],zmm0[29],zmm2[29],zmm0[30],zmm2[30],zmm0[31],zmm2[31],zmm0[40],zmm2[40],zmm0[41],zmm2[41],zmm0[42],zmm2[42],zmm0[43],zmm2[43],zmm0[44],zmm2[44],zmm0[45],zmm2[45],zmm0[46],zmm2[46],zmm0[47],zmm2[47],zmm0[56],zmm2[56],zmm0[57],zmm2[57],zmm0[58],zmm2[58],zmm0[59],zmm2[59],zmm0[60],zmm2[60],zmm0[61],zmm2[61],zmm0[62],zmm2[62],zmm0[63],zmm2[63]
|
|
; AVX512-NEXT: vpmullw %zmm3, %zmm5, %zmm3
|
|
; AVX512-NEXT: vpsrlw $8, %zmm3, %zmm3
|
|
; AVX512-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm2[0],zmm1[1],zmm2[1],zmm1[2],zmm2[2],zmm1[3],zmm2[3],zmm1[4],zmm2[4],zmm1[5],zmm2[5],zmm1[6],zmm2[6],zmm1[7],zmm2[7],zmm1[16],zmm2[16],zmm1[17],zmm2[17],zmm1[18],zmm2[18],zmm1[19],zmm2[19],zmm1[20],zmm2[20],zmm1[21],zmm2[21],zmm1[22],zmm2[22],zmm1[23],zmm2[23],zmm1[32],zmm2[32],zmm1[33],zmm2[33],zmm1[34],zmm2[34],zmm1[35],zmm2[35],zmm1[36],zmm2[36],zmm1[37],zmm2[37],zmm1[38],zmm2[38],zmm1[39],zmm2[39],zmm1[48],zmm2[48],zmm1[49],zmm2[49],zmm1[50],zmm2[50],zmm1[51],zmm2[51],zmm1[52],zmm2[52],zmm1[53],zmm2[53],zmm1[54],zmm2[54],zmm1[55],zmm2[55]
|
|
; AVX512-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0],zmm2[0],zmm0[1],zmm2[1],zmm0[2],zmm2[2],zmm0[3],zmm2[3],zmm0[4],zmm2[4],zmm0[5],zmm2[5],zmm0[6],zmm2[6],zmm0[7],zmm2[7],zmm0[16],zmm2[16],zmm0[17],zmm2[17],zmm0[18],zmm2[18],zmm0[19],zmm2[19],zmm0[20],zmm2[20],zmm0[21],zmm2[21],zmm0[22],zmm2[22],zmm0[23],zmm2[23],zmm0[32],zmm2[32],zmm0[33],zmm2[33],zmm0[34],zmm2[34],zmm0[35],zmm2[35],zmm0[36],zmm2[36],zmm0[37],zmm2[37],zmm0[38],zmm2[38],zmm0[39],zmm2[39],zmm0[48],zmm2[48],zmm0[49],zmm2[49],zmm0[50],zmm2[50],zmm0[51],zmm2[51],zmm0[52],zmm2[52],zmm0[53],zmm2[53],zmm0[54],zmm2[54],zmm0[55],zmm2[55]
|
|
; AVX512-NEXT: vpmullw %zmm1, %zmm0, %zmm0
|
|
; AVX512-NEXT: vpsrlw $8, %zmm0, %zmm0
|
|
; AVX512-NEXT: vpackuswb %zmm3, %zmm0, %zmm0
|
|
; AVX512-NEXT: vptestmb %zmm0, %zmm0, %k1
|
|
; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
|
|
; AVX512-NEXT: kshiftrd $16, %k1, %k2
|
|
; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
|
|
; AVX512-NEXT: kshiftrq $32, %k1, %k1
|
|
; AVX512-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
|
|
; AVX512-NEXT: kshiftrd $16, %k1, %k1
|
|
; AVX512-NEXT: vpternlogd $255, %zmm3, %zmm3, %zmm3 {%k1} {z}
|
|
; AVX512-NEXT: vmovdqa64 %zmm4, (%rdi)
|
|
; AVX512-NEXT: retq
|
|
%t = call {<64 x i8>, <64 x i1>} @llvm.umul.with.overflow.v64i8(<64 x i8> %a0, <64 x i8> %a1)
|
|
%val = extractvalue {<64 x i8>, <64 x i1>} %t, 0
|
|
%obit = extractvalue {<64 x i8>, <64 x i1>} %t, 1
|
|
%res = sext <64 x i1> %obit to <64 x i32>
|
|
store <64 x i8> %val, <64 x i8>* %p2
|
|
ret <64 x i32> %res
|
|
}
|
|
|
|
define <8 x i32> @umulo_v8i16(<8 x i16> %a0, <8 x i16> %a1, <8 x i16>* %p2) nounwind {
|
|
; SSE2-LABEL: umulo_v8i16:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE2-NEXT: pmullw %xmm1, %xmm2
|
|
; SSE2-NEXT: pmulhuw %xmm1, %xmm0
|
|
; SSE2-NEXT: pxor %xmm3, %xmm3
|
|
; SSE2-NEXT: pcmpeqw %xmm0, %xmm3
|
|
; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
|
|
; SSE2-NEXT: pxor %xmm3, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
|
; SSE2-NEXT: pslld $31, %xmm0
|
|
; SSE2-NEXT: psrad $31, %xmm0
|
|
; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
|
|
; SSE2-NEXT: pslld $31, %xmm1
|
|
; SSE2-NEXT: psrad $31, %xmm1
|
|
; SSE2-NEXT: movdqa %xmm2, (%rdi)
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSSE3-LABEL: umulo_v8i16:
|
|
; SSSE3: # %bb.0:
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm2
|
|
; SSSE3-NEXT: pmullw %xmm1, %xmm2
|
|
; SSSE3-NEXT: pmulhuw %xmm1, %xmm0
|
|
; SSSE3-NEXT: pxor %xmm3, %xmm3
|
|
; SSSE3-NEXT: pcmpeqw %xmm0, %xmm3
|
|
; SSSE3-NEXT: pcmpeqd %xmm1, %xmm1
|
|
; SSSE3-NEXT: pxor %xmm3, %xmm1
|
|
; SSSE3-NEXT: movdqa %xmm1, %xmm0
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
|
; SSSE3-NEXT: pslld $31, %xmm0
|
|
; SSSE3-NEXT: psrad $31, %xmm0
|
|
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
|
|
; SSSE3-NEXT: pslld $31, %xmm1
|
|
; SSSE3-NEXT: psrad $31, %xmm1
|
|
; SSSE3-NEXT: movdqa %xmm2, (%rdi)
|
|
; SSSE3-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: umulo_v8i16:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE41-NEXT: pmullw %xmm1, %xmm2
|
|
; SSE41-NEXT: pmulhuw %xmm1, %xmm0
|
|
; SSE41-NEXT: pxor %xmm3, %xmm3
|
|
; SSE41-NEXT: pcmpeqw %xmm0, %xmm3
|
|
; SSE41-NEXT: pcmpeqd %xmm1, %xmm1
|
|
; SSE41-NEXT: pxor %xmm3, %xmm1
|
|
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
|
|
; SSE41-NEXT: pslld $31, %xmm0
|
|
; SSE41-NEXT: psrad $31, %xmm0
|
|
; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
|
|
; SSE41-NEXT: pslld $31, %xmm1
|
|
; SSE41-NEXT: psrad $31, %xmm1
|
|
; SSE41-NEXT: movdqa %xmm2, (%rdi)
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: umulo_v8i16:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm2
|
|
; AVX1-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
|
|
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
; AVX1-NEXT: vmovdqa %xmm2, (%rdi)
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: umulo_v8i16:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpmullw %xmm1, %xmm0, %xmm2
|
|
; AVX2-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
|
|
; AVX2-NEXT: vmovdqa %xmm2, (%rdi)
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: umulo_v8i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm2
|
|
; AVX512-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vptestmw %xmm0, %xmm0, %k1
|
|
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
|
|
; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
|
|
; AVX512-NEXT: vmovdqa %xmm2, (%rdi)
|
|
; AVX512-NEXT: retq
|
|
%t = call {<8 x i16>, <8 x i1>} @llvm.umul.with.overflow.v8i16(<8 x i16> %a0, <8 x i16> %a1)
|
|
%val = extractvalue {<8 x i16>, <8 x i1>} %t, 0
|
|
%obit = extractvalue {<8 x i16>, <8 x i1>} %t, 1
|
|
%res = sext <8 x i1> %obit to <8 x i32>
|
|
store <8 x i16> %val, <8 x i16>* %p2
|
|
ret <8 x i32> %res
|
|
}
|
|
|
|
define <2 x i32> @umulo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) nounwind {
|
|
; SSE2-LABEL: umulo_v2i64:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
|
|
; SSE2-NEXT: movq %xmm2, %r8
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
|
|
; SSE2-NEXT: movq %xmm2, %r10
|
|
; SSE2-NEXT: movq %xmm0, %rax
|
|
; SSE2-NEXT: movq %xmm1, %rdx
|
|
; SSE2-NEXT: xorl %ecx, %ecx
|
|
; SSE2-NEXT: mulq %rdx
|
|
; SSE2-NEXT: movq $-1, %r9
|
|
; SSE2-NEXT: movl $0, %esi
|
|
; SSE2-NEXT: cmovoq %r9, %rsi
|
|
; SSE2-NEXT: movq %rax, %xmm1
|
|
; SSE2-NEXT: movq %r8, %rax
|
|
; SSE2-NEXT: mulq %r10
|
|
; SSE2-NEXT: movq %rax, %xmm0
|
|
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
|
|
; SSE2-NEXT: movq %rsi, %xmm0
|
|
; SSE2-NEXT: cmovoq %r9, %rcx
|
|
; SSE2-NEXT: movq %rcx, %xmm2
|
|
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
|
; SSE2-NEXT: movdqa %xmm1, (%rdi)
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSSE3-LABEL: umulo_v2i64:
|
|
; SSSE3: # %bb.0:
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
|
|
; SSSE3-NEXT: movq %xmm2, %r8
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
|
|
; SSSE3-NEXT: movq %xmm2, %r10
|
|
; SSSE3-NEXT: movq %xmm0, %rax
|
|
; SSSE3-NEXT: movq %xmm1, %rdx
|
|
; SSSE3-NEXT: xorl %ecx, %ecx
|
|
; SSSE3-NEXT: mulq %rdx
|
|
; SSSE3-NEXT: movq $-1, %r9
|
|
; SSSE3-NEXT: movl $0, %esi
|
|
; SSSE3-NEXT: cmovoq %r9, %rsi
|
|
; SSSE3-NEXT: movq %rax, %xmm1
|
|
; SSSE3-NEXT: movq %r8, %rax
|
|
; SSSE3-NEXT: mulq %r10
|
|
; SSSE3-NEXT: movq %rax, %xmm0
|
|
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
|
|
; SSSE3-NEXT: movq %rsi, %xmm0
|
|
; SSSE3-NEXT: cmovoq %r9, %rcx
|
|
; SSSE3-NEXT: movq %rcx, %xmm2
|
|
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
|
; SSSE3-NEXT: movdqa %xmm1, (%rdi)
|
|
; SSSE3-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: umulo_v2i64:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movq %xmm0, %r10
|
|
; SSE41-NEXT: movq %xmm1, %r8
|
|
; SSE41-NEXT: pextrq $1, %xmm0, %rax
|
|
; SSE41-NEXT: pextrq $1, %xmm1, %rdx
|
|
; SSE41-NEXT: xorl %esi, %esi
|
|
; SSE41-NEXT: mulq %rdx
|
|
; SSE41-NEXT: movq $-1, %r9
|
|
; SSE41-NEXT: movl $0, %ecx
|
|
; SSE41-NEXT: cmovoq %r9, %rcx
|
|
; SSE41-NEXT: movq %rax, %xmm0
|
|
; SSE41-NEXT: movq %r10, %rax
|
|
; SSE41-NEXT: mulq %r8
|
|
; SSE41-NEXT: movq %rax, %xmm1
|
|
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
|
|
; SSE41-NEXT: movq %rcx, %xmm0
|
|
; SSE41-NEXT: cmovoq %r9, %rsi
|
|
; SSE41-NEXT: movq %rsi, %xmm2
|
|
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
|
|
; SSE41-NEXT: movdqa %xmm1, (%rdi)
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: umulo_v2i64:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovq %xmm0, %r10
|
|
; AVX1-NEXT: vmovq %xmm1, %r8
|
|
; AVX1-NEXT: vpextrq $1, %xmm0, %rax
|
|
; AVX1-NEXT: vpextrq $1, %xmm1, %rdx
|
|
; AVX1-NEXT: xorl %esi, %esi
|
|
; AVX1-NEXT: mulq %rdx
|
|
; AVX1-NEXT: movq $-1, %r9
|
|
; AVX1-NEXT: movl $0, %ecx
|
|
; AVX1-NEXT: cmovoq %r9, %rcx
|
|
; AVX1-NEXT: vmovq %rax, %xmm0
|
|
; AVX1-NEXT: movq %r10, %rax
|
|
; AVX1-NEXT: mulq %r8
|
|
; AVX1-NEXT: vmovq %rax, %xmm1
|
|
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
|
|
; AVX1-NEXT: vmovq %rcx, %xmm0
|
|
; AVX1-NEXT: cmovoq %r9, %rsi
|
|
; AVX1-NEXT: vmovq %rsi, %xmm2
|
|
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
|
; AVX1-NEXT: vmovdqa %xmm1, (%rdi)
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: umulo_v2i64:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vmovq %xmm0, %r10
|
|
; AVX2-NEXT: vmovq %xmm1, %r8
|
|
; AVX2-NEXT: vpextrq $1, %xmm0, %rax
|
|
; AVX2-NEXT: vpextrq $1, %xmm1, %rdx
|
|
; AVX2-NEXT: xorl %esi, %esi
|
|
; AVX2-NEXT: mulq %rdx
|
|
; AVX2-NEXT: movq $-1, %r9
|
|
; AVX2-NEXT: movl $0, %ecx
|
|
; AVX2-NEXT: cmovoq %r9, %rcx
|
|
; AVX2-NEXT: vmovq %rax, %xmm0
|
|
; AVX2-NEXT: movq %r10, %rax
|
|
; AVX2-NEXT: mulq %r8
|
|
; AVX2-NEXT: vmovq %rax, %xmm1
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
|
|
; AVX2-NEXT: vmovq %rcx, %xmm0
|
|
; AVX2-NEXT: cmovoq %r9, %rsi
|
|
; AVX2-NEXT: vmovq %rsi, %xmm2
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
|
; AVX2-NEXT: vmovdqa %xmm1, (%rdi)
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: umulo_v2i64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovq %xmm0, %rcx
|
|
; AVX512-NEXT: vmovq %xmm1, %rsi
|
|
; AVX512-NEXT: vpextrq $1, %xmm0, %rax
|
|
; AVX512-NEXT: vpextrq $1, %xmm1, %rdx
|
|
; AVX512-NEXT: mulq %rdx
|
|
; AVX512-NEXT: seto %r8b
|
|
; AVX512-NEXT: vmovq %rax, %xmm0
|
|
; AVX512-NEXT: movq %rcx, %rax
|
|
; AVX512-NEXT: mulq %rsi
|
|
; AVX512-NEXT: vmovq %rax, %xmm1
|
|
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
|
|
; AVX512-NEXT: seto %al
|
|
; AVX512-NEXT: movw $-3, %cx
|
|
; AVX512-NEXT: kmovd %ecx, %k0
|
|
; AVX512-NEXT: kmovd %eax, %k1
|
|
; AVX512-NEXT: kandw %k0, %k1, %k0
|
|
; AVX512-NEXT: kmovd %r8d, %k1
|
|
; AVX512-NEXT: kshiftlw $15, %k1, %k1
|
|
; AVX512-NEXT: kshiftrw $14, %k1, %k1
|
|
; AVX512-NEXT: korw %k1, %k0, %k1
|
|
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
|
|
; AVX512-NEXT: vmovdqa %xmm1, (%rdi)
|
|
; AVX512-NEXT: retq
|
|
%t = call {<2 x i64>, <2 x i1>} @llvm.umul.with.overflow.v2i64(<2 x i64> %a0, <2 x i64> %a1)
|
|
%val = extractvalue {<2 x i64>, <2 x i1>} %t, 0
|
|
%obit = extractvalue {<2 x i64>, <2 x i1>} %t, 1
|
|
%res = sext <2 x i1> %obit to <2 x i32>
|
|
store <2 x i64> %val, <2 x i64>* %p2
|
|
ret <2 x i32> %res
|
|
}
|
|
|
|
define <4 x i32> @umulo_v4i24(<4 x i24> %a0, <4 x i24> %a1, <4 x i24>* %p2) nounwind {
|
|
; SSE2-LABEL: umulo_v4i24:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0]
|
|
; SSE2-NEXT: pand %xmm0, %xmm1
|
|
; SSE2-NEXT: pand %xmm0, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm1, %xmm2
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,3,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm0, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
|
|
; SSE2-NEXT: pxor %xmm4, %xmm4
|
|
; SSE2-NEXT: pcmpeqd %xmm4, %xmm3
|
|
; SSE2-NEXT: pcmpeqd %xmm5, %xmm5
|
|
; SSE2-NEXT: pxor %xmm5, %xmm3
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
|
|
; SSE2-NEXT: psrld $24, %xmm0
|
|
; SSE2-NEXT: pcmpeqd %xmm4, %xmm0
|
|
; SSE2-NEXT: pxor %xmm5, %xmm0
|
|
; SSE2-NEXT: por %xmm3, %xmm0
|
|
; SSE2-NEXT: movd %xmm2, %eax
|
|
; SSE2-NEXT: movw %ax, (%rdi)
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
|
|
; SSE2-NEXT: movd %xmm2, %ecx
|
|
; SSE2-NEXT: movw %cx, 6(%rdi)
|
|
; SSE2-NEXT: movd %xmm1, %edx
|
|
; SSE2-NEXT: movw %dx, 3(%rdi)
|
|
; SSE2-NEXT: shrl $16, %eax
|
|
; SSE2-NEXT: movb %al, 2(%rdi)
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,2,2]
|
|
; SSE2-NEXT: movd %xmm1, %eax
|
|
; SSE2-NEXT: movw %ax, 9(%rdi)
|
|
; SSE2-NEXT: shrl $16, %ecx
|
|
; SSE2-NEXT: movb %cl, 8(%rdi)
|
|
; SSE2-NEXT: shrl $16, %edx
|
|
; SSE2-NEXT: movb %dl, 5(%rdi)
|
|
; SSE2-NEXT: shrl $16, %eax
|
|
; SSE2-NEXT: movb %al, 11(%rdi)
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSSE3-LABEL: umulo_v4i24:
|
|
; SSSE3: # %bb.0:
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm2
|
|
; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0]
|
|
; SSSE3-NEXT: pand %xmm0, %xmm1
|
|
; SSSE3-NEXT: pand %xmm0, %xmm2
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
|
|
; SSSE3-NEXT: pmuludq %xmm1, %xmm2
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,3,2,3]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
|
; SSSE3-NEXT: pmuludq %xmm0, %xmm1
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
|
|
; SSSE3-NEXT: pxor %xmm4, %xmm4
|
|
; SSSE3-NEXT: pcmpeqd %xmm4, %xmm3
|
|
; SSSE3-NEXT: pcmpeqd %xmm5, %xmm5
|
|
; SSSE3-NEXT: pxor %xmm5, %xmm3
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,2,2,3]
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
|
|
; SSSE3-NEXT: psrld $24, %xmm0
|
|
; SSSE3-NEXT: pcmpeqd %xmm4, %xmm0
|
|
; SSSE3-NEXT: pxor %xmm5, %xmm0
|
|
; SSSE3-NEXT: por %xmm3, %xmm0
|
|
; SSSE3-NEXT: movd %xmm2, %eax
|
|
; SSSE3-NEXT: movw %ax, (%rdi)
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
|
|
; SSSE3-NEXT: movd %xmm2, %ecx
|
|
; SSSE3-NEXT: movw %cx, 6(%rdi)
|
|
; SSSE3-NEXT: movd %xmm1, %edx
|
|
; SSSE3-NEXT: movw %dx, 3(%rdi)
|
|
; SSSE3-NEXT: shrl $16, %eax
|
|
; SSSE3-NEXT: movb %al, 2(%rdi)
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,2,2]
|
|
; SSSE3-NEXT: movd %xmm1, %eax
|
|
; SSSE3-NEXT: movw %ax, 9(%rdi)
|
|
; SSSE3-NEXT: shrl $16, %ecx
|
|
; SSSE3-NEXT: movb %cl, 8(%rdi)
|
|
; SSSE3-NEXT: shrl $16, %edx
|
|
; SSSE3-NEXT: movb %dl, 5(%rdi)
|
|
; SSSE3-NEXT: shrl $16, %eax
|
|
; SSSE3-NEXT: movb %al, 11(%rdi)
|
|
; SSSE3-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: umulo_v4i24:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0]
|
|
; SSE41-NEXT: pand %xmm0, %xmm2
|
|
; SSE41-NEXT: pand %xmm0, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
|
|
; SSE41-NEXT: pmuludq %xmm0, %xmm3
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm0
|
|
; SSE41-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm3[2,3],xmm4[4,5],xmm3[6,7]
|
|
; SSE41-NEXT: pxor %xmm0, %xmm0
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm4
|
|
; SSE41-NEXT: pcmpeqd %xmm3, %xmm3
|
|
; SSE41-NEXT: pxor %xmm3, %xmm4
|
|
; SSE41-NEXT: pmulld %xmm2, %xmm1
|
|
; SSE41-NEXT: pextrd $3, %xmm1, %eax
|
|
; SSE41-NEXT: pextrd $2, %xmm1, %ecx
|
|
; SSE41-NEXT: pextrd $1, %xmm1, %edx
|
|
; SSE41-NEXT: movd %xmm1, %esi
|
|
; SSE41-NEXT: psrld $24, %xmm1
|
|
; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
|
|
; SSE41-NEXT: pxor %xmm3, %xmm0
|
|
; SSE41-NEXT: por %xmm4, %xmm0
|
|
; SSE41-NEXT: movw %ax, 9(%rdi)
|
|
; SSE41-NEXT: movw %cx, 6(%rdi)
|
|
; SSE41-NEXT: movw %dx, 3(%rdi)
|
|
; SSE41-NEXT: movw %si, (%rdi)
|
|
; SSE41-NEXT: shrl $16, %eax
|
|
; SSE41-NEXT: movb %al, 11(%rdi)
|
|
; SSE41-NEXT: shrl $16, %ecx
|
|
; SSE41-NEXT: movb %cl, 8(%rdi)
|
|
; SSE41-NEXT: shrl $16, %edx
|
|
; SSE41-NEXT: movb %dl, 5(%rdi)
|
|
; SSE41-NEXT: shrl $16, %esi
|
|
; SSE41-NEXT: movb %sil, 2(%rdi)
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: umulo_v4i24:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [16777215,16777215,16777215,16777215]
|
|
; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
|
; AVX1-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
|
; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
|
|
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpsrld $24, %xmm1, %xmm0
|
|
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpextrd $3, %xmm1, %eax
|
|
; AVX1-NEXT: movw %ax, 9(%rdi)
|
|
; AVX1-NEXT: vpextrd $2, %xmm1, %ecx
|
|
; AVX1-NEXT: movw %cx, 6(%rdi)
|
|
; AVX1-NEXT: vpextrd $1, %xmm1, %edx
|
|
; AVX1-NEXT: movw %dx, 3(%rdi)
|
|
; AVX1-NEXT: vmovd %xmm1, %esi
|
|
; AVX1-NEXT: movw %si, (%rdi)
|
|
; AVX1-NEXT: shrl $16, %eax
|
|
; AVX1-NEXT: movb %al, 11(%rdi)
|
|
; AVX1-NEXT: shrl $16, %ecx
|
|
; AVX1-NEXT: movb %cl, 8(%rdi)
|
|
; AVX1-NEXT: shrl $16, %edx
|
|
; AVX1-NEXT: movb %dl, 5(%rdi)
|
|
; AVX1-NEXT: shrl $16, %esi
|
|
; AVX1-NEXT: movb %sil, 2(%rdi)
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: umulo_v4i24:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [16777215,16777215,16777215,16777215]
|
|
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
|
; AVX2-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
|
|
; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
|
|
; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3]
|
|
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
|
|
; AVX2-NEXT: vpxor %xmm4, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm1
|
|
; AVX2-NEXT: vpsrld $24, %xmm1, %xmm0
|
|
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpxor %xmm4, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpor %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpextrd $3, %xmm1, %eax
|
|
; AVX2-NEXT: movw %ax, 9(%rdi)
|
|
; AVX2-NEXT: vpextrd $2, %xmm1, %ecx
|
|
; AVX2-NEXT: movw %cx, 6(%rdi)
|
|
; AVX2-NEXT: vpextrd $1, %xmm1, %edx
|
|
; AVX2-NEXT: movw %dx, 3(%rdi)
|
|
; AVX2-NEXT: vmovd %xmm1, %esi
|
|
; AVX2-NEXT: movw %si, (%rdi)
|
|
; AVX2-NEXT: shrl $16, %eax
|
|
; AVX2-NEXT: movb %al, 11(%rdi)
|
|
; AVX2-NEXT: shrl $16, %ecx
|
|
; AVX2-NEXT: movb %cl, 8(%rdi)
|
|
; AVX2-NEXT: shrl $16, %edx
|
|
; AVX2-NEXT: movb %dl, 5(%rdi)
|
|
; AVX2-NEXT: shrl $16, %esi
|
|
; AVX2-NEXT: movb %sil, 2(%rdi)
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: umulo_v4i24:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm2 = [16777215,16777215,16777215,16777215]
|
|
; AVX512-NEXT: vpand %xmm2, %xmm1, %xmm1
|
|
; AVX512-NEXT: vpand %xmm2, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
|
|
; AVX512-NEXT: vpmuludq %xmm3, %xmm4, %xmm3
|
|
; AVX512-NEXT: vmovdqa {{.*#+}} xmm4 = [1,5,3,7]
|
|
; AVX512-NEXT: vpermi2d %xmm3, %xmm2, %xmm4
|
|
; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm1
|
|
; AVX512-NEXT: vpsrld $24, %xmm1, %xmm0
|
|
; AVX512-NEXT: vpor %xmm4, %xmm0, %xmm0
|
|
; AVX512-NEXT: vptestmd %xmm0, %xmm0, %k1
|
|
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
|
|
; AVX512-NEXT: vpextrd $3, %xmm1, %eax
|
|
; AVX512-NEXT: movw %ax, 9(%rdi)
|
|
; AVX512-NEXT: vpextrd $2, %xmm1, %ecx
|
|
; AVX512-NEXT: movw %cx, 6(%rdi)
|
|
; AVX512-NEXT: vpextrd $1, %xmm1, %edx
|
|
; AVX512-NEXT: movw %dx, 3(%rdi)
|
|
; AVX512-NEXT: vmovd %xmm1, %esi
|
|
; AVX512-NEXT: movw %si, (%rdi)
|
|
; AVX512-NEXT: shrl $16, %eax
|
|
; AVX512-NEXT: movb %al, 11(%rdi)
|
|
; AVX512-NEXT: shrl $16, %ecx
|
|
; AVX512-NEXT: movb %cl, 8(%rdi)
|
|
; AVX512-NEXT: shrl $16, %edx
|
|
; AVX512-NEXT: movb %dl, 5(%rdi)
|
|
; AVX512-NEXT: shrl $16, %esi
|
|
; AVX512-NEXT: movb %sil, 2(%rdi)
|
|
; AVX512-NEXT: retq
|
|
%t = call {<4 x i24>, <4 x i1>} @llvm.umul.with.overflow.v4i24(<4 x i24> %a0, <4 x i24> %a1)
|
|
%val = extractvalue {<4 x i24>, <4 x i1>} %t, 0
|
|
%obit = extractvalue {<4 x i24>, <4 x i1>} %t, 1
|
|
%res = sext <4 x i1> %obit to <4 x i32>
|
|
store <4 x i24> %val, <4 x i24>* %p2
|
|
ret <4 x i32> %res
|
|
}
|
|
|
|
define <4 x i32> @umulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind {
|
|
; SSE2-LABEL: umulo_v4i1:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
|
|
; SSE2-NEXT: pand %xmm2, %xmm1
|
|
; SSE2-NEXT: pand %xmm2, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
|
; SSE2-NEXT: pmuludq %xmm2, %xmm1
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
|
|
; SSE2-NEXT: pxor %xmm2, %xmm2
|
|
; SSE2-NEXT: pcmpeqd %xmm2, %xmm3
|
|
; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
|
|
; SSE2-NEXT: pxor %xmm4, %xmm3
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,2,2,3]
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
|
|
; SSE2-NEXT: movdqa %xmm5, %xmm0
|
|
; SSE2-NEXT: psrld $1, %xmm0
|
|
; SSE2-NEXT: pcmpeqd %xmm2, %xmm0
|
|
; SSE2-NEXT: pxor %xmm4, %xmm0
|
|
; SSE2-NEXT: por %xmm3, %xmm0
|
|
; SSE2-NEXT: pslld $31, %xmm5
|
|
; SSE2-NEXT: movmskps %xmm5, %eax
|
|
; SSE2-NEXT: movb %al, (%rdi)
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSSE3-LABEL: umulo_v4i1:
|
|
; SSSE3: # %bb.0:
|
|
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
|
|
; SSSE3-NEXT: pand %xmm2, %xmm1
|
|
; SSSE3-NEXT: pand %xmm2, %xmm0
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
|
|
; SSSE3-NEXT: pmuludq %xmm1, %xmm0
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
|
; SSSE3-NEXT: pmuludq %xmm2, %xmm1
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
|
|
; SSSE3-NEXT: pxor %xmm2, %xmm2
|
|
; SSSE3-NEXT: pcmpeqd %xmm2, %xmm3
|
|
; SSSE3-NEXT: pcmpeqd %xmm4, %xmm4
|
|
; SSSE3-NEXT: pxor %xmm4, %xmm3
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,2,2,3]
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
|
|
; SSSE3-NEXT: movdqa %xmm5, %xmm0
|
|
; SSSE3-NEXT: psrld $1, %xmm0
|
|
; SSSE3-NEXT: pcmpeqd %xmm2, %xmm0
|
|
; SSSE3-NEXT: pxor %xmm4, %xmm0
|
|
; SSSE3-NEXT: por %xmm3, %xmm0
|
|
; SSSE3-NEXT: pslld $31, %xmm5
|
|
; SSSE3-NEXT: movmskps %xmm5, %eax
|
|
; SSSE3-NEXT: movb %al, (%rdi)
|
|
; SSSE3-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: umulo_v4i1:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
|
|
; SSE41-NEXT: pand %xmm2, %xmm0
|
|
; SSE41-NEXT: pand %xmm2, %xmm1
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
|
; SSE41-NEXT: pmuludq %xmm2, %xmm3
|
|
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
|
; SSE41-NEXT: pmuludq %xmm1, %xmm2
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm3[2,3],xmm4[4,5],xmm3[6,7]
|
|
; SSE41-NEXT: pxor %xmm2, %xmm2
|
|
; SSE41-NEXT: pcmpeqd %xmm2, %xmm4
|
|
; SSE41-NEXT: pcmpeqd %xmm3, %xmm3
|
|
; SSE41-NEXT: pxor %xmm3, %xmm4
|
|
; SSE41-NEXT: pmaddwd %xmm0, %xmm1
|
|
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
|
; SSE41-NEXT: psrld $1, %xmm0
|
|
; SSE41-NEXT: pcmpeqd %xmm0, %xmm2
|
|
; SSE41-NEXT: pxor %xmm3, %xmm2
|
|
; SSE41-NEXT: por %xmm4, %xmm2
|
|
; SSE41-NEXT: pslld $31, %xmm1
|
|
; SSE41-NEXT: movmskps %xmm1, %eax
|
|
; SSE41-NEXT: movb %al, (%rdi)
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm0
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: umulo_v4i1:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,1,1,1]
|
|
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
|
; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
|
|
; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm3
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
|
|
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
|
|
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
|
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
|
|
; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
|
|
; AVX1-NEXT: vpmaddwd %xmm1, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpsrld $1, %xmm1, %xmm0
|
|
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpslld $31, %xmm1, %xmm1
|
|
; AVX1-NEXT: vmovmskps %xmm1, %eax
|
|
; AVX1-NEXT: movb %al, (%rdi)
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: umulo_v4i1:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
|
|
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
|
|
; AVX2-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
|
|
; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm3
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
|
|
; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3]
|
|
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
|
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
|
|
; AVX2-NEXT: vpxor %xmm4, %xmm2, %xmm2
|
|
; AVX2-NEXT: vpmaddwd %xmm1, %xmm0, %xmm1
|
|
; AVX2-NEXT: vpsrld $1, %xmm1, %xmm0
|
|
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpxor %xmm4, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpor %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpslld $31, %xmm1, %xmm1
|
|
; AVX2-NEXT: vmovmskps %xmm1, %eax
|
|
; AVX2-NEXT: movb %al, (%rdi)
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: umulo_v4i1:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: pushq %rbp
|
|
; AVX512-NEXT: pushq %rbx
|
|
; AVX512-NEXT: vpslld $31, %xmm0, %xmm0
|
|
; AVX512-NEXT: vptestmd %xmm0, %xmm0, %k0
|
|
; AVX512-NEXT: kshiftrw $3, %k0, %k1
|
|
; AVX512-NEXT: kmovd %k1, %r8d
|
|
; AVX512-NEXT: andb $1, %r8b
|
|
; AVX512-NEXT: vpslld $31, %xmm1, %xmm0
|
|
; AVX512-NEXT: vptestmd %xmm0, %xmm0, %k1
|
|
; AVX512-NEXT: kshiftrw $3, %k1, %k2
|
|
; AVX512-NEXT: kmovd %k2, %r9d
|
|
; AVX512-NEXT: andb $1, %r9b
|
|
; AVX512-NEXT: kshiftrw $2, %k0, %k2
|
|
; AVX512-NEXT: kmovd %k2, %r10d
|
|
; AVX512-NEXT: andb $1, %r10b
|
|
; AVX512-NEXT: kshiftrw $2, %k1, %k2
|
|
; AVX512-NEXT: kmovd %k2, %r11d
|
|
; AVX512-NEXT: andb $1, %r11b
|
|
; AVX512-NEXT: kshiftrw $1, %k0, %k2
|
|
; AVX512-NEXT: kmovd %k2, %ecx
|
|
; AVX512-NEXT: andb $1, %cl
|
|
; AVX512-NEXT: kshiftrw $1, %k1, %k2
|
|
; AVX512-NEXT: kmovd %k2, %esi
|
|
; AVX512-NEXT: andb $1, %sil
|
|
; AVX512-NEXT: kmovd %k0, %eax
|
|
; AVX512-NEXT: andb $1, %al
|
|
; AVX512-NEXT: kmovd %k1, %edx
|
|
; AVX512-NEXT: andb $1, %dl
|
|
; AVX512-NEXT: # kill: def $al killed $al killed $eax
|
|
; AVX512-NEXT: mulb %dl
|
|
; AVX512-NEXT: movl %eax, %edx
|
|
; AVX512-NEXT: seto %al
|
|
; AVX512-NEXT: testb $-2, %dl
|
|
; AVX512-NEXT: setne %bl
|
|
; AVX512-NEXT: orb %al, %bl
|
|
; AVX512-NEXT: setne %al
|
|
; AVX512-NEXT: kmovd %eax, %k1
|
|
; AVX512-NEXT: movw $-3, %ax
|
|
; AVX512-NEXT: kmovd %eax, %k0
|
|
; AVX512-NEXT: kandw %k0, %k1, %k1
|
|
; AVX512-NEXT: movl %ecx, %eax
|
|
; AVX512-NEXT: mulb %sil
|
|
; AVX512-NEXT: movl %eax, %ebp
|
|
; AVX512-NEXT: seto %al
|
|
; AVX512-NEXT: testb $-2, %bpl
|
|
; AVX512-NEXT: setne %bl
|
|
; AVX512-NEXT: orb %al, %bl
|
|
; AVX512-NEXT: setne %al
|
|
; AVX512-NEXT: kmovd %eax, %k2
|
|
; AVX512-NEXT: kshiftlw $15, %k2, %k2
|
|
; AVX512-NEXT: kshiftrw $14, %k2, %k2
|
|
; AVX512-NEXT: korw %k2, %k1, %k2
|
|
; AVX512-NEXT: movw $-5, %ax
|
|
; AVX512-NEXT: kmovd %eax, %k1
|
|
; AVX512-NEXT: kandw %k1, %k2, %k2
|
|
; AVX512-NEXT: movl %r10d, %eax
|
|
; AVX512-NEXT: mulb %r11b
|
|
; AVX512-NEXT: movl %eax, %esi
|
|
; AVX512-NEXT: seto %al
|
|
; AVX512-NEXT: testb $-2, %sil
|
|
; AVX512-NEXT: setne %bl
|
|
; AVX512-NEXT: orb %al, %bl
|
|
; AVX512-NEXT: setne %al
|
|
; AVX512-NEXT: kmovd %eax, %k3
|
|
; AVX512-NEXT: kshiftlw $2, %k3, %k3
|
|
; AVX512-NEXT: korw %k3, %k2, %k2
|
|
; AVX512-NEXT: kshiftlw $13, %k2, %k2
|
|
; AVX512-NEXT: kshiftrw $13, %k2, %k2
|
|
; AVX512-NEXT: movl %r8d, %eax
|
|
; AVX512-NEXT: mulb %r9b
|
|
; AVX512-NEXT: # kill: def $al killed $al def $eax
|
|
; AVX512-NEXT: seto %bl
|
|
; AVX512-NEXT: testb $-2, %al
|
|
; AVX512-NEXT: setne %cl
|
|
; AVX512-NEXT: orb %bl, %cl
|
|
; AVX512-NEXT: setne %cl
|
|
; AVX512-NEXT: kmovd %ecx, %k3
|
|
; AVX512-NEXT: kshiftlw $3, %k3, %k3
|
|
; AVX512-NEXT: korw %k3, %k2, %k2
|
|
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k2} {z}
|
|
; AVX512-NEXT: andl $1, %edx
|
|
; AVX512-NEXT: kmovw %edx, %k2
|
|
; AVX512-NEXT: kandw %k0, %k2, %k0
|
|
; AVX512-NEXT: kmovd %ebp, %k2
|
|
; AVX512-NEXT: kshiftlw $15, %k2, %k2
|
|
; AVX512-NEXT: kshiftrw $14, %k2, %k2
|
|
; AVX512-NEXT: korw %k2, %k0, %k0
|
|
; AVX512-NEXT: kandw %k1, %k0, %k0
|
|
; AVX512-NEXT: kmovd %esi, %k1
|
|
; AVX512-NEXT: kshiftlw $15, %k1, %k1
|
|
; AVX512-NEXT: kshiftrw $13, %k1, %k1
|
|
; AVX512-NEXT: korw %k1, %k0, %k0
|
|
; AVX512-NEXT: movw $-9, %cx
|
|
; AVX512-NEXT: kmovd %ecx, %k1
|
|
; AVX512-NEXT: kandw %k1, %k0, %k0
|
|
; AVX512-NEXT: kmovd %eax, %k1
|
|
; AVX512-NEXT: kshiftlw $15, %k1, %k1
|
|
; AVX512-NEXT: kshiftrw $12, %k1, %k1
|
|
; AVX512-NEXT: korw %k1, %k0, %k0
|
|
; AVX512-NEXT: kmovd %k0, %eax
|
|
; AVX512-NEXT: movb %al, (%rdi)
|
|
; AVX512-NEXT: popq %rbx
|
|
; AVX512-NEXT: popq %rbp
|
|
; AVX512-NEXT: retq
|
|
%t = call {<4 x i1>, <4 x i1>} @llvm.umul.with.overflow.v4i1(<4 x i1> %a0, <4 x i1> %a1)
|
|
%val = extractvalue {<4 x i1>, <4 x i1>} %t, 0
|
|
%obit = extractvalue {<4 x i1>, <4 x i1>} %t, 1
|
|
%res = sext <4 x i1> %obit to <4 x i32>
|
|
store <4 x i1> %val, <4 x i1>* %p2
|
|
ret <4 x i32> %res
|
|
}
|
|
|
|
define <2 x i32> @umulo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2) nounwind {
|
|
; SSE2-LABEL: umulo_v2i128:
|
|
; SSE2: # %bb.0:
|
|
; SSE2-NEXT: pushq %rbp
|
|
; SSE2-NEXT: pushq %r15
|
|
; SSE2-NEXT: pushq %r14
|
|
; SSE2-NEXT: pushq %r13
|
|
; SSE2-NEXT: pushq %r12
|
|
; SSE2-NEXT: pushq %rbx
|
|
; SSE2-NEXT: movq %r9, %r10
|
|
; SSE2-NEXT: movq %rcx, %r12
|
|
; SSE2-NEXT: movq %rdx, %r11
|
|
; SSE2-NEXT: movq %rsi, %rax
|
|
; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r14
|
|
; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r15
|
|
; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r9
|
|
; SSE2-NEXT: testq %r10, %r10
|
|
; SSE2-NEXT: setne %cl
|
|
; SSE2-NEXT: testq %rsi, %rsi
|
|
; SSE2-NEXT: setne %r13b
|
|
; SSE2-NEXT: andb %cl, %r13b
|
|
; SSE2-NEXT: mulq %r8
|
|
; SSE2-NEXT: movq %rax, %rsi
|
|
; SSE2-NEXT: seto %bpl
|
|
; SSE2-NEXT: movq %r10, %rax
|
|
; SSE2-NEXT: mulq %rdi
|
|
; SSE2-NEXT: movq %rax, %rcx
|
|
; SSE2-NEXT: seto %bl
|
|
; SSE2-NEXT: orb %bpl, %bl
|
|
; SSE2-NEXT: addq %rsi, %rcx
|
|
; SSE2-NEXT: movq %rdi, %rax
|
|
; SSE2-NEXT: mulq %r8
|
|
; SSE2-NEXT: movq %rax, %rdi
|
|
; SSE2-NEXT: movq %rdx, %rsi
|
|
; SSE2-NEXT: addq %rcx, %rsi
|
|
; SSE2-NEXT: setb %cl
|
|
; SSE2-NEXT: orb %bl, %cl
|
|
; SSE2-NEXT: orb %r13b, %cl
|
|
; SSE2-NEXT: testq %r9, %r9
|
|
; SSE2-NEXT: setne %al
|
|
; SSE2-NEXT: testq %r12, %r12
|
|
; SSE2-NEXT: setne %r8b
|
|
; SSE2-NEXT: andb %al, %r8b
|
|
; SSE2-NEXT: movq %r12, %rax
|
|
; SSE2-NEXT: mulq %r15
|
|
; SSE2-NEXT: movq %rax, %rbp
|
|
; SSE2-NEXT: seto %r10b
|
|
; SSE2-NEXT: movq %r9, %rax
|
|
; SSE2-NEXT: mulq %r11
|
|
; SSE2-NEXT: movq %rax, %rbx
|
|
; SSE2-NEXT: seto %r9b
|
|
; SSE2-NEXT: orb %r10b, %r9b
|
|
; SSE2-NEXT: addq %rbp, %rbx
|
|
; SSE2-NEXT: movq %r11, %rax
|
|
; SSE2-NEXT: mulq %r15
|
|
; SSE2-NEXT: addq %rbx, %rdx
|
|
; SSE2-NEXT: setb %bl
|
|
; SSE2-NEXT: orb %r9b, %bl
|
|
; SSE2-NEXT: orb %r8b, %bl
|
|
; SSE2-NEXT: movzbl %bl, %ebp
|
|
; SSE2-NEXT: negl %ebp
|
|
; SSE2-NEXT: movd %ebp, %xmm1
|
|
; SSE2-NEXT: movzbl %cl, %ecx
|
|
; SSE2-NEXT: negl %ecx
|
|
; SSE2-NEXT: movd %ecx, %xmm0
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
; SSE2-NEXT: movq %rax, 16(%r14)
|
|
; SSE2-NEXT: movq %rdi, (%r14)
|
|
; SSE2-NEXT: movq %rdx, 24(%r14)
|
|
; SSE2-NEXT: movq %rsi, 8(%r14)
|
|
; SSE2-NEXT: popq %rbx
|
|
; SSE2-NEXT: popq %r12
|
|
; SSE2-NEXT: popq %r13
|
|
; SSE2-NEXT: popq %r14
|
|
; SSE2-NEXT: popq %r15
|
|
; SSE2-NEXT: popq %rbp
|
|
; SSE2-NEXT: retq
|
|
;
|
|
; SSSE3-LABEL: umulo_v2i128:
|
|
; SSSE3: # %bb.0:
|
|
; SSSE3-NEXT: pushq %rbp
|
|
; SSSE3-NEXT: pushq %r15
|
|
; SSSE3-NEXT: pushq %r14
|
|
; SSSE3-NEXT: pushq %r13
|
|
; SSSE3-NEXT: pushq %r12
|
|
; SSSE3-NEXT: pushq %rbx
|
|
; SSSE3-NEXT: movq %r9, %r10
|
|
; SSSE3-NEXT: movq %rcx, %r12
|
|
; SSSE3-NEXT: movq %rdx, %r11
|
|
; SSSE3-NEXT: movq %rsi, %rax
|
|
; SSSE3-NEXT: movq {{[0-9]+}}(%rsp), %r14
|
|
; SSSE3-NEXT: movq {{[0-9]+}}(%rsp), %r15
|
|
; SSSE3-NEXT: movq {{[0-9]+}}(%rsp), %r9
|
|
; SSSE3-NEXT: testq %r10, %r10
|
|
; SSSE3-NEXT: setne %cl
|
|
; SSSE3-NEXT: testq %rsi, %rsi
|
|
; SSSE3-NEXT: setne %r13b
|
|
; SSSE3-NEXT: andb %cl, %r13b
|
|
; SSSE3-NEXT: mulq %r8
|
|
; SSSE3-NEXT: movq %rax, %rsi
|
|
; SSSE3-NEXT: seto %bpl
|
|
; SSSE3-NEXT: movq %r10, %rax
|
|
; SSSE3-NEXT: mulq %rdi
|
|
; SSSE3-NEXT: movq %rax, %rcx
|
|
; SSSE3-NEXT: seto %bl
|
|
; SSSE3-NEXT: orb %bpl, %bl
|
|
; SSSE3-NEXT: addq %rsi, %rcx
|
|
; SSSE3-NEXT: movq %rdi, %rax
|
|
; SSSE3-NEXT: mulq %r8
|
|
; SSSE3-NEXT: movq %rax, %rdi
|
|
; SSSE3-NEXT: movq %rdx, %rsi
|
|
; SSSE3-NEXT: addq %rcx, %rsi
|
|
; SSSE3-NEXT: setb %cl
|
|
; SSSE3-NEXT: orb %bl, %cl
|
|
; SSSE3-NEXT: orb %r13b, %cl
|
|
; SSSE3-NEXT: testq %r9, %r9
|
|
; SSSE3-NEXT: setne %al
|
|
; SSSE3-NEXT: testq %r12, %r12
|
|
; SSSE3-NEXT: setne %r8b
|
|
; SSSE3-NEXT: andb %al, %r8b
|
|
; SSSE3-NEXT: movq %r12, %rax
|
|
; SSSE3-NEXT: mulq %r15
|
|
; SSSE3-NEXT: movq %rax, %rbp
|
|
; SSSE3-NEXT: seto %r10b
|
|
; SSSE3-NEXT: movq %r9, %rax
|
|
; SSSE3-NEXT: mulq %r11
|
|
; SSSE3-NEXT: movq %rax, %rbx
|
|
; SSSE3-NEXT: seto %r9b
|
|
; SSSE3-NEXT: orb %r10b, %r9b
|
|
; SSSE3-NEXT: addq %rbp, %rbx
|
|
; SSSE3-NEXT: movq %r11, %rax
|
|
; SSSE3-NEXT: mulq %r15
|
|
; SSSE3-NEXT: addq %rbx, %rdx
|
|
; SSSE3-NEXT: setb %bl
|
|
; SSSE3-NEXT: orb %r9b, %bl
|
|
; SSSE3-NEXT: orb %r8b, %bl
|
|
; SSSE3-NEXT: movzbl %bl, %ebp
|
|
; SSSE3-NEXT: negl %ebp
|
|
; SSSE3-NEXT: movd %ebp, %xmm1
|
|
; SSSE3-NEXT: movzbl %cl, %ecx
|
|
; SSSE3-NEXT: negl %ecx
|
|
; SSSE3-NEXT: movd %ecx, %xmm0
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
; SSSE3-NEXT: movq %rax, 16(%r14)
|
|
; SSSE3-NEXT: movq %rdi, (%r14)
|
|
; SSSE3-NEXT: movq %rdx, 24(%r14)
|
|
; SSSE3-NEXT: movq %rsi, 8(%r14)
|
|
; SSSE3-NEXT: popq %rbx
|
|
; SSSE3-NEXT: popq %r12
|
|
; SSSE3-NEXT: popq %r13
|
|
; SSSE3-NEXT: popq %r14
|
|
; SSSE3-NEXT: popq %r15
|
|
; SSSE3-NEXT: popq %rbp
|
|
; SSSE3-NEXT: retq
|
|
;
|
|
; SSE41-LABEL: umulo_v2i128:
|
|
; SSE41: # %bb.0:
|
|
; SSE41-NEXT: pushq %rbp
|
|
; SSE41-NEXT: pushq %r15
|
|
; SSE41-NEXT: pushq %r14
|
|
; SSE41-NEXT: pushq %r13
|
|
; SSE41-NEXT: pushq %r12
|
|
; SSE41-NEXT: pushq %rbx
|
|
; SSE41-NEXT: movq %r9, %r10
|
|
; SSE41-NEXT: movq %rcx, %r12
|
|
; SSE41-NEXT: movq %rdx, %r11
|
|
; SSE41-NEXT: movq %rsi, %rax
|
|
; SSE41-NEXT: movq {{[0-9]+}}(%rsp), %r14
|
|
; SSE41-NEXT: movq {{[0-9]+}}(%rsp), %r15
|
|
; SSE41-NEXT: movq {{[0-9]+}}(%rsp), %r9
|
|
; SSE41-NEXT: testq %r10, %r10
|
|
; SSE41-NEXT: setne %cl
|
|
; SSE41-NEXT: testq %rsi, %rsi
|
|
; SSE41-NEXT: setne %r13b
|
|
; SSE41-NEXT: andb %cl, %r13b
|
|
; SSE41-NEXT: mulq %r8
|
|
; SSE41-NEXT: movq %rax, %rsi
|
|
; SSE41-NEXT: seto %bpl
|
|
; SSE41-NEXT: movq %r10, %rax
|
|
; SSE41-NEXT: mulq %rdi
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
; SSE41-NEXT: seto %bl
|
|
; SSE41-NEXT: orb %bpl, %bl
|
|
; SSE41-NEXT: addq %rsi, %rcx
|
|
; SSE41-NEXT: movq %rdi, %rax
|
|
; SSE41-NEXT: mulq %r8
|
|
; SSE41-NEXT: movq %rax, %rdi
|
|
; SSE41-NEXT: movq %rdx, %rsi
|
|
; SSE41-NEXT: addq %rcx, %rsi
|
|
; SSE41-NEXT: setb %cl
|
|
; SSE41-NEXT: orb %bl, %cl
|
|
; SSE41-NEXT: orb %r13b, %cl
|
|
; SSE41-NEXT: testq %r9, %r9
|
|
; SSE41-NEXT: setne %al
|
|
; SSE41-NEXT: testq %r12, %r12
|
|
; SSE41-NEXT: setne %r8b
|
|
; SSE41-NEXT: andb %al, %r8b
|
|
; SSE41-NEXT: movq %r12, %rax
|
|
; SSE41-NEXT: mulq %r15
|
|
; SSE41-NEXT: movq %rax, %rbp
|
|
; SSE41-NEXT: seto %r10b
|
|
; SSE41-NEXT: movq %r9, %rax
|
|
; SSE41-NEXT: mulq %r11
|
|
; SSE41-NEXT: movq %rax, %rbx
|
|
; SSE41-NEXT: seto %r9b
|
|
; SSE41-NEXT: orb %r10b, %r9b
|
|
; SSE41-NEXT: addq %rbp, %rbx
|
|
; SSE41-NEXT: movq %r11, %rax
|
|
; SSE41-NEXT: mulq %r15
|
|
; SSE41-NEXT: addq %rbx, %rdx
|
|
; SSE41-NEXT: setb %bl
|
|
; SSE41-NEXT: orb %r9b, %bl
|
|
; SSE41-NEXT: orb %r8b, %bl
|
|
; SSE41-NEXT: movzbl %bl, %ebp
|
|
; SSE41-NEXT: negl %ebp
|
|
; SSE41-NEXT: movzbl %cl, %ecx
|
|
; SSE41-NEXT: negl %ecx
|
|
; SSE41-NEXT: movd %ecx, %xmm0
|
|
; SSE41-NEXT: pinsrd $1, %ebp, %xmm0
|
|
; SSE41-NEXT: movq %rax, 16(%r14)
|
|
; SSE41-NEXT: movq %rdi, (%r14)
|
|
; SSE41-NEXT: movq %rdx, 24(%r14)
|
|
; SSE41-NEXT: movq %rsi, 8(%r14)
|
|
; SSE41-NEXT: popq %rbx
|
|
; SSE41-NEXT: popq %r12
|
|
; SSE41-NEXT: popq %r13
|
|
; SSE41-NEXT: popq %r14
|
|
; SSE41-NEXT: popq %r15
|
|
; SSE41-NEXT: popq %rbp
|
|
; SSE41-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: umulo_v2i128:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: pushq %rbp
|
|
; AVX1-NEXT: pushq %r15
|
|
; AVX1-NEXT: pushq %r14
|
|
; AVX1-NEXT: pushq %r13
|
|
; AVX1-NEXT: pushq %r12
|
|
; AVX1-NEXT: pushq %rbx
|
|
; AVX1-NEXT: movq %r9, %r10
|
|
; AVX1-NEXT: movq %rcx, %r12
|
|
; AVX1-NEXT: movq %rdx, %r11
|
|
; AVX1-NEXT: movq %rsi, %rax
|
|
; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %r14
|
|
; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %r15
|
|
; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %r9
|
|
; AVX1-NEXT: testq %r10, %r10
|
|
; AVX1-NEXT: setne %cl
|
|
; AVX1-NEXT: testq %rsi, %rsi
|
|
; AVX1-NEXT: setne %r13b
|
|
; AVX1-NEXT: andb %cl, %r13b
|
|
; AVX1-NEXT: mulq %r8
|
|
; AVX1-NEXT: movq %rax, %rsi
|
|
; AVX1-NEXT: seto %bpl
|
|
; AVX1-NEXT: movq %r10, %rax
|
|
; AVX1-NEXT: mulq %rdi
|
|
; AVX1-NEXT: movq %rax, %rcx
|
|
; AVX1-NEXT: seto %bl
|
|
; AVX1-NEXT: orb %bpl, %bl
|
|
; AVX1-NEXT: addq %rsi, %rcx
|
|
; AVX1-NEXT: movq %rdi, %rax
|
|
; AVX1-NEXT: mulq %r8
|
|
; AVX1-NEXT: movq %rax, %rdi
|
|
; AVX1-NEXT: movq %rdx, %rsi
|
|
; AVX1-NEXT: addq %rcx, %rsi
|
|
; AVX1-NEXT: setb %cl
|
|
; AVX1-NEXT: orb %bl, %cl
|
|
; AVX1-NEXT: orb %r13b, %cl
|
|
; AVX1-NEXT: testq %r9, %r9
|
|
; AVX1-NEXT: setne %al
|
|
; AVX1-NEXT: testq %r12, %r12
|
|
; AVX1-NEXT: setne %r8b
|
|
; AVX1-NEXT: andb %al, %r8b
|
|
; AVX1-NEXT: movq %r12, %rax
|
|
; AVX1-NEXT: mulq %r15
|
|
; AVX1-NEXT: movq %rax, %rbp
|
|
; AVX1-NEXT: seto %r10b
|
|
; AVX1-NEXT: movq %r9, %rax
|
|
; AVX1-NEXT: mulq %r11
|
|
; AVX1-NEXT: movq %rax, %rbx
|
|
; AVX1-NEXT: seto %r9b
|
|
; AVX1-NEXT: orb %r10b, %r9b
|
|
; AVX1-NEXT: addq %rbp, %rbx
|
|
; AVX1-NEXT: movq %r11, %rax
|
|
; AVX1-NEXT: mulq %r15
|
|
; AVX1-NEXT: addq %rbx, %rdx
|
|
; AVX1-NEXT: setb %bl
|
|
; AVX1-NEXT: orb %r9b, %bl
|
|
; AVX1-NEXT: orb %r8b, %bl
|
|
; AVX1-NEXT: movzbl %bl, %ebp
|
|
; AVX1-NEXT: negl %ebp
|
|
; AVX1-NEXT: movzbl %cl, %ecx
|
|
; AVX1-NEXT: negl %ecx
|
|
; AVX1-NEXT: vmovd %ecx, %xmm0
|
|
; AVX1-NEXT: vpinsrd $1, %ebp, %xmm0, %xmm0
|
|
; AVX1-NEXT: movq %rax, 16(%r14)
|
|
; AVX1-NEXT: movq %rdi, (%r14)
|
|
; AVX1-NEXT: movq %rdx, 24(%r14)
|
|
; AVX1-NEXT: movq %rsi, 8(%r14)
|
|
; AVX1-NEXT: popq %rbx
|
|
; AVX1-NEXT: popq %r12
|
|
; AVX1-NEXT: popq %r13
|
|
; AVX1-NEXT: popq %r14
|
|
; AVX1-NEXT: popq %r15
|
|
; AVX1-NEXT: popq %rbp
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: umulo_v2i128:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: pushq %rbp
|
|
; AVX2-NEXT: pushq %r15
|
|
; AVX2-NEXT: pushq %r14
|
|
; AVX2-NEXT: pushq %r13
|
|
; AVX2-NEXT: pushq %r12
|
|
; AVX2-NEXT: pushq %rbx
|
|
; AVX2-NEXT: movq %r9, %r10
|
|
; AVX2-NEXT: movq %rcx, %r12
|
|
; AVX2-NEXT: movq %rdx, %r11
|
|
; AVX2-NEXT: movq %rsi, %rax
|
|
; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r14
|
|
; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r15
|
|
; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r9
|
|
; AVX2-NEXT: testq %r10, %r10
|
|
; AVX2-NEXT: setne %cl
|
|
; AVX2-NEXT: testq %rsi, %rsi
|
|
; AVX2-NEXT: setne %r13b
|
|
; AVX2-NEXT: andb %cl, %r13b
|
|
; AVX2-NEXT: mulq %r8
|
|
; AVX2-NEXT: movq %rax, %rsi
|
|
; AVX2-NEXT: seto %bpl
|
|
; AVX2-NEXT: movq %r10, %rax
|
|
; AVX2-NEXT: mulq %rdi
|
|
; AVX2-NEXT: movq %rax, %rcx
|
|
; AVX2-NEXT: seto %bl
|
|
; AVX2-NEXT: orb %bpl, %bl
|
|
; AVX2-NEXT: addq %rsi, %rcx
|
|
; AVX2-NEXT: movq %rdi, %rax
|
|
; AVX2-NEXT: mulq %r8
|
|
; AVX2-NEXT: movq %rax, %rdi
|
|
; AVX2-NEXT: movq %rdx, %rsi
|
|
; AVX2-NEXT: addq %rcx, %rsi
|
|
; AVX2-NEXT: setb %cl
|
|
; AVX2-NEXT: orb %bl, %cl
|
|
; AVX2-NEXT: orb %r13b, %cl
|
|
; AVX2-NEXT: testq %r9, %r9
|
|
; AVX2-NEXT: setne %al
|
|
; AVX2-NEXT: testq %r12, %r12
|
|
; AVX2-NEXT: setne %r8b
|
|
; AVX2-NEXT: andb %al, %r8b
|
|
; AVX2-NEXT: movq %r12, %rax
|
|
; AVX2-NEXT: mulq %r15
|
|
; AVX2-NEXT: movq %rax, %rbp
|
|
; AVX2-NEXT: seto %r10b
|
|
; AVX2-NEXT: movq %r9, %rax
|
|
; AVX2-NEXT: mulq %r11
|
|
; AVX2-NEXT: movq %rax, %rbx
|
|
; AVX2-NEXT: seto %r9b
|
|
; AVX2-NEXT: orb %r10b, %r9b
|
|
; AVX2-NEXT: addq %rbp, %rbx
|
|
; AVX2-NEXT: movq %r11, %rax
|
|
; AVX2-NEXT: mulq %r15
|
|
; AVX2-NEXT: addq %rbx, %rdx
|
|
; AVX2-NEXT: setb %bl
|
|
; AVX2-NEXT: orb %r9b, %bl
|
|
; AVX2-NEXT: orb %r8b, %bl
|
|
; AVX2-NEXT: movzbl %bl, %ebp
|
|
; AVX2-NEXT: negl %ebp
|
|
; AVX2-NEXT: movzbl %cl, %ecx
|
|
; AVX2-NEXT: negl %ecx
|
|
; AVX2-NEXT: vmovd %ecx, %xmm0
|
|
; AVX2-NEXT: vpinsrd $1, %ebp, %xmm0, %xmm0
|
|
; AVX2-NEXT: movq %rax, 16(%r14)
|
|
; AVX2-NEXT: movq %rdi, (%r14)
|
|
; AVX2-NEXT: movq %rdx, 24(%r14)
|
|
; AVX2-NEXT: movq %rsi, 8(%r14)
|
|
; AVX2-NEXT: popq %rbx
|
|
; AVX2-NEXT: popq %r12
|
|
; AVX2-NEXT: popq %r13
|
|
; AVX2-NEXT: popq %r14
|
|
; AVX2-NEXT: popq %r15
|
|
; AVX2-NEXT: popq %rbp
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: umulo_v2i128:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: pushq %rbp
|
|
; AVX512-NEXT: pushq %r15
|
|
; AVX512-NEXT: pushq %r14
|
|
; AVX512-NEXT: pushq %r13
|
|
; AVX512-NEXT: pushq %r12
|
|
; AVX512-NEXT: pushq %rbx
|
|
; AVX512-NEXT: movq %rcx, %rax
|
|
; AVX512-NEXT: movq %rdx, %r12
|
|
; AVX512-NEXT: movq %rdi, %r11
|
|
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r14
|
|
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r15
|
|
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10
|
|
; AVX512-NEXT: testq %r10, %r10
|
|
; AVX512-NEXT: setne %dl
|
|
; AVX512-NEXT: testq %rcx, %rcx
|
|
; AVX512-NEXT: setne %r13b
|
|
; AVX512-NEXT: andb %dl, %r13b
|
|
; AVX512-NEXT: mulq %r15
|
|
; AVX512-NEXT: movq %rax, %rdi
|
|
; AVX512-NEXT: seto %bpl
|
|
; AVX512-NEXT: movq %r10, %rax
|
|
; AVX512-NEXT: mulq %r12
|
|
; AVX512-NEXT: movq %rax, %rbx
|
|
; AVX512-NEXT: seto %cl
|
|
; AVX512-NEXT: orb %bpl, %cl
|
|
; AVX512-NEXT: addq %rdi, %rbx
|
|
; AVX512-NEXT: movq %r12, %rax
|
|
; AVX512-NEXT: mulq %r15
|
|
; AVX512-NEXT: movq %rax, %r10
|
|
; AVX512-NEXT: movq %rdx, %r15
|
|
; AVX512-NEXT: addq %rbx, %r15
|
|
; AVX512-NEXT: setb %al
|
|
; AVX512-NEXT: orb %cl, %al
|
|
; AVX512-NEXT: orb %r13b, %al
|
|
; AVX512-NEXT: kmovd %eax, %k0
|
|
; AVX512-NEXT: testq %r9, %r9
|
|
; AVX512-NEXT: setne %al
|
|
; AVX512-NEXT: testq %rsi, %rsi
|
|
; AVX512-NEXT: setne %cl
|
|
; AVX512-NEXT: andb %al, %cl
|
|
; AVX512-NEXT: movq %rsi, %rax
|
|
; AVX512-NEXT: mulq %r8
|
|
; AVX512-NEXT: movq %rax, %rsi
|
|
; AVX512-NEXT: seto %bpl
|
|
; AVX512-NEXT: movq %r9, %rax
|
|
; AVX512-NEXT: mulq %r11
|
|
; AVX512-NEXT: movq %rax, %rdi
|
|
; AVX512-NEXT: seto %bl
|
|
; AVX512-NEXT: orb %bpl, %bl
|
|
; AVX512-NEXT: addq %rsi, %rdi
|
|
; AVX512-NEXT: movq %r11, %rax
|
|
; AVX512-NEXT: mulq %r8
|
|
; AVX512-NEXT: addq %rdi, %rdx
|
|
; AVX512-NEXT: setb %sil
|
|
; AVX512-NEXT: orb %bl, %sil
|
|
; AVX512-NEXT: orb %cl, %sil
|
|
; AVX512-NEXT: andl $1, %esi
|
|
; AVX512-NEXT: kmovw %esi, %k1
|
|
; AVX512-NEXT: kshiftlw $1, %k0, %k0
|
|
; AVX512-NEXT: korw %k0, %k1, %k1
|
|
; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
|
|
; AVX512-NEXT: movq %r10, 16(%r14)
|
|
; AVX512-NEXT: movq %rax, (%r14)
|
|
; AVX512-NEXT: movq %r15, 24(%r14)
|
|
; AVX512-NEXT: movq %rdx, 8(%r14)
|
|
; AVX512-NEXT: popq %rbx
|
|
; AVX512-NEXT: popq %r12
|
|
; AVX512-NEXT: popq %r13
|
|
; AVX512-NEXT: popq %r14
|
|
; AVX512-NEXT: popq %r15
|
|
; AVX512-NEXT: popq %rbp
|
|
; AVX512-NEXT: retq
|
|
%t = call {<2 x i128>, <2 x i1>} @llvm.umul.with.overflow.v2i128(<2 x i128> %a0, <2 x i128> %a1)
|
|
%val = extractvalue {<2 x i128>, <2 x i1>} %t, 0
|
|
%obit = extractvalue {<2 x i128>, <2 x i1>} %t, 1
|
|
%res = sext <2 x i1> %obit to <2 x i32>
|
|
store <2 x i128> %val, <2 x i128>* %p2
|
|
ret <2 x i32> %res
|
|
}
|