; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512BW define void @avg_v4i8(<4 x i8>* %a, <4 x i8>* %b) nounwind { ; SSE2-LABEL: avg_v4i8: ; SSE2: # %bb.0: ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero ; SSE2-NEXT: pavgb %xmm0, %xmm1 ; SSE2-NEXT: movd %xmm1, (%rax) ; SSE2-NEXT: retq ; ; AVX-LABEL: avg_v4i8: ; AVX: # %bb.0: ; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero ; AVX-NEXT: vpavgb %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vmovd %xmm0, (%rax) ; AVX-NEXT: retq %1 = load <4 x i8>, <4 x i8>* %a %2 = load <4 x i8>, <4 x i8>* %b %3 = zext <4 x i8> %1 to <4 x i32> %4 = zext <4 x i8> %2 to <4 x i32> %5 = add nuw nsw <4 x i32> %3, %6 = add nuw nsw <4 x i32> %5, %4 %7 = lshr <4 x i32> %6, %8 = trunc <4 x i32> %7 to <4 x i8> store <4 x i8> %8, <4 x i8>* undef, align 4 ret void } define void @avg_v8i8(<8 x i8>* %a, <8 x i8>* %b) nounwind { ; SSE2-LABEL: avg_v8i8: ; SSE2: # %bb.0: ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero ; SSE2-NEXT: pavgb %xmm0, %xmm1 ; SSE2-NEXT: movq %xmm1, (%rax) ; SSE2-NEXT: retq ; ; AVX-LABEL: avg_v8i8: ; AVX: # %bb.0: ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX-NEXT: vpavgb %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vmovq %xmm0, (%rax) ; AVX-NEXT: retq %1 = load <8 x i8>, <8 x i8>* %a %2 = load <8 x i8>, <8 x i8>* %b %3 = zext <8 x i8> %1 to <8 x i32> %4 = zext <8 x i8> %2 to <8 x i32> %5 = add nuw nsw <8 x i32> %3, %6 = add nuw nsw <8 x i32> %5, %4 %7 = lshr <8 x i32> %6, %8 = trunc <8 x i32> %7 to <8 x i8> store <8 x i8> %8, <8 x i8>* undef, align 4 ret void } define void @avg_v16i8(<16 x i8>* %a, <16 x i8>* %b) nounwind { ; SSE2-LABEL: avg_v16i8: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa (%rsi), %xmm0 ; SSE2-NEXT: pavgb (%rdi), %xmm0 ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; ; AVX-LABEL: avg_v16i8: ; AVX: # %bb.0: ; AVX-NEXT: vmovdqa (%rsi), %xmm0 ; AVX-NEXT: vpavgb (%rdi), %xmm0, %xmm0 ; AVX-NEXT: vmovdqu %xmm0, (%rax) ; AVX-NEXT: retq %1 = load <16 x i8>, <16 x i8>* %a %2 = load <16 x i8>, <16 x i8>* %b %3 = zext <16 x i8> %1 to <16 x i32> %4 = zext <16 x i8> %2 to <16 x i32> %5 = add nuw nsw <16 x i32> %3, %6 = add nuw nsw <16 x i32> %5, %4 %7 = lshr <16 x i32> %6, %8 = trunc <16 x i32> %7 to <16 x i8> store <16 x i8> %8, <16 x i8>* undef, align 4 ret void } define void @avg_v24i8(<24 x i8>* %a, <24 x i8>* %b) nounwind { ; SSE2-LABEL: avg_v24i8: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa (%rsi), %xmm0 ; SSE2-NEXT: movdqa 16(%rsi), %xmm1 ; SSE2-NEXT: pavgb (%rdi), %xmm0 ; SSE2-NEXT: pavgb 16(%rdi), %xmm1 ; SSE2-NEXT: movq %xmm1, (%rax) ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; ; AVX1-LABEL: avg_v24i8: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovdqa (%rsi), %xmm0 ; AVX1-NEXT: vmovdqa 16(%rsi), %xmm1 ; AVX1-NEXT: vpavgb (%rdi), %xmm0, %xmm0 ; AVX1-NEXT: vpavgb 16(%rdi), %xmm1, %xmm1 ; AVX1-NEXT: vmovq %xmm1, (%rax) ; AVX1-NEXT: vmovdqu %xmm0, (%rax) ; AVX1-NEXT: retq ; ; AVX2-LABEL: avg_v24i8: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovdqa (%rsi), %ymm0 ; AVX2-NEXT: vpavgb (%rdi), %ymm0, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vmovq %xmm1, (%rax) ; AVX2-NEXT: vmovdqu %xmm0, (%rax) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: avg_v24i8: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovdqa (%rsi), %ymm0 ; AVX512-NEXT: vpavgb (%rdi), %ymm0, %ymm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX512-NEXT: vmovq %xmm1, (%rax) ; AVX512-NEXT: vmovdqu %xmm0, (%rax) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = load <24 x i8>, <24 x i8>* %a %2 = load <24 x i8>, <24 x i8>* %b %3 = zext <24 x i8> %1 to <24 x i32> %4 = zext <24 x i8> %2 to <24 x i32> %5 = add nuw nsw <24 x i32> %3, %6 = add nuw nsw <24 x i32> %5, %4 %7 = lshr <24 x i32> %6, %8 = trunc <24 x i32> %7 to <24 x i8> store <24 x i8> %8, <24 x i8>* undef, align 4 ret void } define void @avg_v32i8(<32 x i8>* %a, <32 x i8>* %b) nounwind { ; SSE2-LABEL: avg_v32i8: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa (%rsi), %xmm0 ; SSE2-NEXT: movdqa 16(%rsi), %xmm1 ; SSE2-NEXT: pavgb (%rdi), %xmm0 ; SSE2-NEXT: pavgb 16(%rdi), %xmm1 ; SSE2-NEXT: movdqu %xmm1, (%rax) ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; ; AVX1-LABEL: avg_v32i8: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovdqa (%rsi), %xmm0 ; AVX1-NEXT: vmovdqa 16(%rsi), %xmm1 ; AVX1-NEXT: vpavgb (%rdi), %xmm0, %xmm0 ; AVX1-NEXT: vpavgb 16(%rdi), %xmm1, %xmm1 ; AVX1-NEXT: vmovdqu %xmm1, (%rax) ; AVX1-NEXT: vmovdqu %xmm0, (%rax) ; AVX1-NEXT: retq ; ; AVX2-LABEL: avg_v32i8: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovdqa (%rsi), %ymm0 ; AVX2-NEXT: vpavgb (%rdi), %ymm0, %ymm0 ; AVX2-NEXT: vmovdqu %ymm0, (%rax) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: avg_v32i8: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovdqa (%rsi), %ymm0 ; AVX512-NEXT: vpavgb (%rdi), %ymm0, %ymm0 ; AVX512-NEXT: vmovdqu %ymm0, (%rax) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = load <32 x i8>, <32 x i8>* %a %2 = load <32 x i8>, <32 x i8>* %b %3 = zext <32 x i8> %1 to <32 x i32> %4 = zext <32 x i8> %2 to <32 x i32> %5 = add nuw nsw <32 x i32> %3, %6 = add nuw nsw <32 x i32> %5, %4 %7 = lshr <32 x i32> %6, %8 = trunc <32 x i32> %7 to <32 x i8> store <32 x i8> %8, <32 x i8>* undef, align 4 ret void } define void @avg_v48i8(<48 x i8>* %a, <48 x i8>* %b) nounwind { ; SSE2-LABEL: avg_v48i8: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa (%rsi), %xmm0 ; SSE2-NEXT: movdqa 16(%rsi), %xmm1 ; SSE2-NEXT: movdqa 32(%rsi), %xmm2 ; SSE2-NEXT: pavgb (%rdi), %xmm0 ; SSE2-NEXT: pavgb 16(%rdi), %xmm1 ; SSE2-NEXT: pavgb 32(%rdi), %xmm2 ; SSE2-NEXT: movdqu %xmm2, (%rax) ; SSE2-NEXT: movdqu %xmm1, (%rax) ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; ; AVX1-LABEL: avg_v48i8: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovdqa (%rsi), %xmm0 ; AVX1-NEXT: vmovdqa 16(%rsi), %xmm1 ; AVX1-NEXT: vmovdqa 32(%rsi), %xmm2 ; AVX1-NEXT: vpavgb 32(%rdi), %xmm2, %xmm2 ; AVX1-NEXT: vpavgb (%rdi), %xmm0, %xmm0 ; AVX1-NEXT: vpavgb 16(%rdi), %xmm1, %xmm1 ; AVX1-NEXT: vmovdqu %xmm1, (%rax) ; AVX1-NEXT: vmovdqu %xmm0, (%rax) ; AVX1-NEXT: vmovdqu %xmm2, (%rax) ; AVX1-NEXT: retq ; ; AVX2-LABEL: avg_v48i8: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovdqa (%rsi), %ymm0 ; AVX2-NEXT: vpavgb (%rdi), %ymm0, %ymm0 ; AVX2-NEXT: vmovdqa 32(%rsi), %xmm1 ; AVX2-NEXT: vpavgb 32(%rdi), %xmm1, %xmm1 ; AVX2-NEXT: vmovdqu %xmm1, (%rax) ; AVX2-NEXT: vmovdqu %ymm0, (%rax) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512F-LABEL: avg_v48i8: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vmovdqa (%rsi), %ymm0 ; AVX512F-NEXT: vpavgb (%rdi), %ymm0, %ymm0 ; AVX512F-NEXT: vmovdqa 32(%rsi), %xmm1 ; AVX512F-NEXT: vpavgb 32(%rdi), %xmm1, %xmm1 ; AVX512F-NEXT: vmovdqu %xmm1, (%rax) ; AVX512F-NEXT: vmovdqu %ymm0, (%rax) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: avg_v48i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm0 ; AVX512BW-NEXT: vpavgb (%rdi), %zmm0, %zmm0 ; AVX512BW-NEXT: vextracti32x4 $2, %zmm0, (%rax) ; AVX512BW-NEXT: vmovdqu %ymm0, (%rax) ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %1 = load <48 x i8>, <48 x i8>* %a %2 = load <48 x i8>, <48 x i8>* %b %3 = zext <48 x i8> %1 to <48 x i32> %4 = zext <48 x i8> %2 to <48 x i32> %5 = add nuw nsw <48 x i32> %3, %6 = add nuw nsw <48 x i32> %5, %4 %7 = lshr <48 x i32> %6, %8 = trunc <48 x i32> %7 to <48 x i8> store <48 x i8> %8, <48 x i8>* undef, align 4 ret void } define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) nounwind { ; SSE2-LABEL: avg_v64i8: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa (%rsi), %xmm0 ; SSE2-NEXT: movdqa 16(%rsi), %xmm1 ; SSE2-NEXT: movdqa 32(%rsi), %xmm2 ; SSE2-NEXT: movdqa 48(%rsi), %xmm3 ; SSE2-NEXT: pavgb (%rdi), %xmm0 ; SSE2-NEXT: pavgb 16(%rdi), %xmm1 ; SSE2-NEXT: pavgb 32(%rdi), %xmm2 ; SSE2-NEXT: pavgb 48(%rdi), %xmm3 ; SSE2-NEXT: movdqu %xmm3, (%rax) ; SSE2-NEXT: movdqu %xmm2, (%rax) ; SSE2-NEXT: movdqu %xmm1, (%rax) ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; ; AVX1-LABEL: avg_v64i8: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovdqa (%rsi), %xmm0 ; AVX1-NEXT: vmovdqa 16(%rsi), %xmm1 ; AVX1-NEXT: vmovdqa 32(%rsi), %xmm2 ; AVX1-NEXT: vmovdqa 48(%rsi), %xmm3 ; AVX1-NEXT: vpavgb (%rdi), %xmm0, %xmm0 ; AVX1-NEXT: vpavgb 16(%rdi), %xmm1, %xmm1 ; AVX1-NEXT: vpavgb 32(%rdi), %xmm2, %xmm2 ; AVX1-NEXT: vpavgb 48(%rdi), %xmm3, %xmm3 ; AVX1-NEXT: vmovdqu %xmm3, (%rax) ; AVX1-NEXT: vmovdqu %xmm2, (%rax) ; AVX1-NEXT: vmovdqu %xmm1, (%rax) ; AVX1-NEXT: vmovdqu %xmm0, (%rax) ; AVX1-NEXT: retq ; ; AVX2-LABEL: avg_v64i8: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovdqa (%rsi), %ymm0 ; AVX2-NEXT: vmovdqa 32(%rsi), %ymm1 ; AVX2-NEXT: vpavgb (%rdi), %ymm0, %ymm0 ; AVX2-NEXT: vpavgb 32(%rdi), %ymm1, %ymm1 ; AVX2-NEXT: vmovdqu %ymm1, (%rax) ; AVX2-NEXT: vmovdqu %ymm0, (%rax) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512F-LABEL: avg_v64i8: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vmovdqa (%rsi), %ymm0 ; AVX512F-NEXT: vmovdqa 32(%rsi), %ymm1 ; AVX512F-NEXT: vpavgb (%rdi), %ymm0, %ymm0 ; AVX512F-NEXT: vpavgb 32(%rdi), %ymm1, %ymm1 ; AVX512F-NEXT: vmovdqu %ymm1, (%rax) ; AVX512F-NEXT: vmovdqu %ymm0, (%rax) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: avg_v64i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm0 ; AVX512BW-NEXT: vpavgb (%rdi), %zmm0, %zmm0 ; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax) ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %1 = load <64 x i8>, <64 x i8>* %a %2 = load <64 x i8>, <64 x i8>* %b %3 = zext <64 x i8> %1 to <64 x i32> %4 = zext <64 x i8> %2 to <64 x i32> %5 = add nuw nsw <64 x i32> %3, %6 = add nuw nsw <64 x i32> %5, %4 %7 = lshr <64 x i32> %6, %8 = trunc <64 x i32> %7 to <64 x i8> store <64 x i8> %8, <64 x i8>* undef, align 4 ret void } define void @avg_v4i16(<4 x i16>* %a, <4 x i16>* %b) nounwind { ; SSE2-LABEL: avg_v4i16: ; SSE2: # %bb.0: ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero ; SSE2-NEXT: pavgw %xmm0, %xmm1 ; SSE2-NEXT: movq %xmm1, (%rax) ; SSE2-NEXT: retq ; ; AVX-LABEL: avg_v4i16: ; AVX: # %bb.0: ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX-NEXT: vpavgw %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vmovq %xmm0, (%rax) ; AVX-NEXT: retq %1 = load <4 x i16>, <4 x i16>* %a %2 = load <4 x i16>, <4 x i16>* %b %3 = zext <4 x i16> %1 to <4 x i32> %4 = zext <4 x i16> %2 to <4 x i32> %5 = add nuw nsw <4 x i32> %3, %6 = add nuw nsw <4 x i32> %5, %4 %7 = lshr <4 x i32> %6, %8 = trunc <4 x i32> %7 to <4 x i16> store <4 x i16> %8, <4 x i16>* undef, align 4 ret void } define void @avg_v8i16(<8 x i16>* %a, <8 x i16>* %b) nounwind { ; SSE2-LABEL: avg_v8i16: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa (%rsi), %xmm0 ; SSE2-NEXT: pavgw (%rdi), %xmm0 ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; ; AVX-LABEL: avg_v8i16: ; AVX: # %bb.0: ; AVX-NEXT: vmovdqa (%rsi), %xmm0 ; AVX-NEXT: vpavgw (%rdi), %xmm0, %xmm0 ; AVX-NEXT: vmovdqu %xmm0, (%rax) ; AVX-NEXT: retq %1 = load <8 x i16>, <8 x i16>* %a %2 = load <8 x i16>, <8 x i16>* %b %3 = zext <8 x i16> %1 to <8 x i32> %4 = zext <8 x i16> %2 to <8 x i32> %5 = add nuw nsw <8 x i32> %3, %6 = add nuw nsw <8 x i32> %5, %4 %7 = lshr <8 x i32> %6, %8 = trunc <8 x i32> %7 to <8 x i16> store <8 x i16> %8, <8 x i16>* undef, align 4 ret void } define void @avg_v16i16(<16 x i16>* %a, <16 x i16>* %b) nounwind { ; SSE2-LABEL: avg_v16i16: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa (%rsi), %xmm0 ; SSE2-NEXT: movdqa 16(%rsi), %xmm1 ; SSE2-NEXT: pavgw (%rdi), %xmm0 ; SSE2-NEXT: pavgw 16(%rdi), %xmm1 ; SSE2-NEXT: movdqu %xmm1, (%rax) ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; ; AVX1-LABEL: avg_v16i16: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovdqa (%rsi), %xmm0 ; AVX1-NEXT: vmovdqa 16(%rsi), %xmm1 ; AVX1-NEXT: vpavgw (%rdi), %xmm0, %xmm0 ; AVX1-NEXT: vpavgw 16(%rdi), %xmm1, %xmm1 ; AVX1-NEXT: vmovdqu %xmm1, (%rax) ; AVX1-NEXT: vmovdqu %xmm0, (%rax) ; AVX1-NEXT: retq ; ; AVX2-LABEL: avg_v16i16: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovdqa (%rsi), %ymm0 ; AVX2-NEXT: vpavgw (%rdi), %ymm0, %ymm0 ; AVX2-NEXT: vmovdqu %ymm0, (%rax) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: avg_v16i16: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovdqa (%rsi), %ymm0 ; AVX512-NEXT: vpavgw (%rdi), %ymm0, %ymm0 ; AVX512-NEXT: vmovdqu %ymm0, (%rax) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = load <16 x i16>, <16 x i16>* %a %2 = load <16 x i16>, <16 x i16>* %b %3 = zext <16 x i16> %1 to <16 x i32> %4 = zext <16 x i16> %2 to <16 x i32> %5 = add nuw nsw <16 x i32> %3, %6 = add nuw nsw <16 x i32> %5, %4 %7 = lshr <16 x i32> %6, %8 = trunc <16 x i32> %7 to <16 x i16> store <16 x i16> %8, <16 x i16>* undef, align 4 ret void } define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) nounwind { ; SSE2-LABEL: avg_v32i16: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa (%rsi), %xmm0 ; SSE2-NEXT: movdqa 16(%rsi), %xmm1 ; SSE2-NEXT: movdqa 32(%rsi), %xmm2 ; SSE2-NEXT: movdqa 48(%rsi), %xmm3 ; SSE2-NEXT: pavgw (%rdi), %xmm0 ; SSE2-NEXT: pavgw 16(%rdi), %xmm1 ; SSE2-NEXT: pavgw 32(%rdi), %xmm2 ; SSE2-NEXT: pavgw 48(%rdi), %xmm3 ; SSE2-NEXT: movdqu %xmm3, (%rax) ; SSE2-NEXT: movdqu %xmm2, (%rax) ; SSE2-NEXT: movdqu %xmm1, (%rax) ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; ; AVX1-LABEL: avg_v32i16: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovdqa (%rsi), %xmm0 ; AVX1-NEXT: vmovdqa 16(%rsi), %xmm1 ; AVX1-NEXT: vmovdqa 32(%rsi), %xmm2 ; AVX1-NEXT: vmovdqa 48(%rsi), %xmm3 ; AVX1-NEXT: vpavgw (%rdi), %xmm0, %xmm0 ; AVX1-NEXT: vpavgw 16(%rdi), %xmm1, %xmm1 ; AVX1-NEXT: vpavgw 32(%rdi), %xmm2, %xmm2 ; AVX1-NEXT: vpavgw 48(%rdi), %xmm3, %xmm3 ; AVX1-NEXT: vmovdqu %xmm3, (%rax) ; AVX1-NEXT: vmovdqu %xmm2, (%rax) ; AVX1-NEXT: vmovdqu %xmm1, (%rax) ; AVX1-NEXT: vmovdqu %xmm0, (%rax) ; AVX1-NEXT: retq ; ; AVX2-LABEL: avg_v32i16: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovdqa (%rsi), %ymm0 ; AVX2-NEXT: vmovdqa 32(%rsi), %ymm1 ; AVX2-NEXT: vpavgw (%rdi), %ymm0, %ymm0 ; AVX2-NEXT: vpavgw 32(%rdi), %ymm1, %ymm1 ; AVX2-NEXT: vmovdqu %ymm1, (%rax) ; AVX2-NEXT: vmovdqu %ymm0, (%rax) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512F-LABEL: avg_v32i16: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vmovdqa (%rsi), %ymm0 ; AVX512F-NEXT: vmovdqa 32(%rsi), %ymm1 ; AVX512F-NEXT: vpavgw (%rdi), %ymm0, %ymm0 ; AVX512F-NEXT: vpavgw 32(%rdi), %ymm1, %ymm1 ; AVX512F-NEXT: vmovdqu %ymm1, (%rax) ; AVX512F-NEXT: vmovdqu %ymm0, (%rax) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: avg_v32i16: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm0 ; AVX512BW-NEXT: vpavgw (%rdi), %zmm0, %zmm0 ; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax) ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %1 = load <32 x i16>, <32 x i16>* %a %2 = load <32 x i16>, <32 x i16>* %b %3 = zext <32 x i16> %1 to <32 x i32> %4 = zext <32 x i16> %2 to <32 x i32> %5 = add nuw nsw <32 x i32> %3, %6 = add nuw nsw <32 x i32> %5, %4 %7 = lshr <32 x i32> %6, %8 = trunc <32 x i32> %7 to <32 x i16> store <32 x i16> %8, <32 x i16>* undef, align 4 ret void } define void @avg_v40i16(<40 x i16>* %a, <40 x i16>* %b) nounwind { ; SSE2-LABEL: avg_v40i16: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa (%rsi), %xmm0 ; SSE2-NEXT: movdqa 16(%rsi), %xmm1 ; SSE2-NEXT: movdqa 32(%rsi), %xmm2 ; SSE2-NEXT: movdqa 48(%rsi), %xmm3 ; SSE2-NEXT: pavgw (%rdi), %xmm0 ; SSE2-NEXT: pavgw 16(%rdi), %xmm1 ; SSE2-NEXT: pavgw 32(%rdi), %xmm2 ; SSE2-NEXT: pavgw 48(%rdi), %xmm3 ; SSE2-NEXT: movdqa 64(%rsi), %xmm4 ; SSE2-NEXT: pavgw 64(%rdi), %xmm4 ; SSE2-NEXT: movdqu %xmm4, (%rax) ; SSE2-NEXT: movdqu %xmm3, (%rax) ; SSE2-NEXT: movdqu %xmm2, (%rax) ; SSE2-NEXT: movdqu %xmm1, (%rax) ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; ; AVX1-LABEL: avg_v40i16: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovdqa 64(%rsi), %xmm0 ; AVX1-NEXT: vpavgw 64(%rdi), %xmm0, %xmm0 ; AVX1-NEXT: vmovdqa (%rsi), %xmm1 ; AVX1-NEXT: vmovdqa 16(%rsi), %xmm2 ; AVX1-NEXT: vmovdqa 32(%rsi), %xmm3 ; AVX1-NEXT: vmovdqa 48(%rsi), %xmm4 ; AVX1-NEXT: vpavgw (%rdi), %xmm1, %xmm1 ; AVX1-NEXT: vpavgw 16(%rdi), %xmm2, %xmm2 ; AVX1-NEXT: vpavgw 32(%rdi), %xmm3, %xmm3 ; AVX1-NEXT: vpavgw 48(%rdi), %xmm4, %xmm4 ; AVX1-NEXT: vmovdqu %xmm4, (%rax) ; AVX1-NEXT: vmovdqu %xmm3, (%rax) ; AVX1-NEXT: vmovdqu %xmm2, (%rax) ; AVX1-NEXT: vmovdqu %xmm1, (%rax) ; AVX1-NEXT: vmovdqu %xmm0, (%rax) ; AVX1-NEXT: retq ; ; AVX2-LABEL: avg_v40i16: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovdqa (%rsi), %ymm0 ; AVX2-NEXT: vmovdqa 32(%rsi), %ymm1 ; AVX2-NEXT: vpavgw (%rdi), %ymm0, %ymm0 ; AVX2-NEXT: vpavgw 32(%rdi), %ymm1, %ymm1 ; AVX2-NEXT: vmovdqa 64(%rsi), %xmm2 ; AVX2-NEXT: vpavgw 64(%rdi), %xmm2, %xmm2 ; AVX2-NEXT: vmovdqu %xmm2, (%rax) ; AVX2-NEXT: vmovdqu %ymm1, (%rax) ; AVX2-NEXT: vmovdqu %ymm0, (%rax) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512F-LABEL: avg_v40i16: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vmovdqa 64(%rsi), %xmm0 ; AVX512F-NEXT: vpavgw 64(%rdi), %xmm0, %xmm0 ; AVX512F-NEXT: vmovdqa (%rsi), %ymm1 ; AVX512F-NEXT: vmovdqa 32(%rsi), %ymm2 ; AVX512F-NEXT: vpavgw (%rdi), %ymm1, %ymm1 ; AVX512F-NEXT: vpavgw 32(%rdi), %ymm2, %ymm2 ; AVX512F-NEXT: vmovdqu %ymm2, (%rax) ; AVX512F-NEXT: vmovdqu %ymm1, (%rax) ; AVX512F-NEXT: vmovdqu %xmm0, (%rax) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: avg_v40i16: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm0 ; AVX512BW-NEXT: vpavgw (%rdi), %zmm0, %zmm0 ; AVX512BW-NEXT: vmovdqa 64(%rsi), %xmm1 ; AVX512BW-NEXT: vpavgw 64(%rdi), %xmm1, %xmm1 ; AVX512BW-NEXT: vmovdqu %xmm1, (%rax) ; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax) ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %1 = load <40 x i16>, <40 x i16>* %a %2 = load <40 x i16>, <40 x i16>* %b %3 = zext <40 x i16> %1 to <40 x i32> %4 = zext <40 x i16> %2 to <40 x i32> %5 = add nuw nsw <40 x i32> %3, %6 = add nuw nsw <40 x i32> %5, %4 %7 = lshr <40 x i32> %6, %8 = trunc <40 x i32> %7 to <40 x i16> store <40 x i16> %8, <40 x i16>* undef, align 4 ret void } define void @avg_v4i8_2(<4 x i8>* %a, <4 x i8>* %b) nounwind { ; SSE2-LABEL: avg_v4i8_2: ; SSE2: # %bb.0: ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero ; SSE2-NEXT: pavgb %xmm0, %xmm1 ; SSE2-NEXT: movd %xmm1, (%rax) ; SSE2-NEXT: retq ; ; AVX-LABEL: avg_v4i8_2: ; AVX: # %bb.0: ; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero ; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmovd %xmm0, (%rax) ; AVX-NEXT: retq %1 = load <4 x i8>, <4 x i8>* %a %2 = load <4 x i8>, <4 x i8>* %b %3 = zext <4 x i8> %1 to <4 x i32> %4 = zext <4 x i8> %2 to <4 x i32> %5 = add nuw nsw <4 x i32> %3, %4 %6 = add nuw nsw <4 x i32> %5, %7 = lshr <4 x i32> %6, %8 = trunc <4 x i32> %7 to <4 x i8> store <4 x i8> %8, <4 x i8>* undef, align 4 ret void } define void @avg_v8i8_2(<8 x i8>* %a, <8 x i8>* %b) nounwind { ; SSE2-LABEL: avg_v8i8_2: ; SSE2: # %bb.0: ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero ; SSE2-NEXT: pavgb %xmm0, %xmm1 ; SSE2-NEXT: movq %xmm1, (%rax) ; SSE2-NEXT: retq ; ; AVX-LABEL: avg_v8i8_2: ; AVX: # %bb.0: ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmovq %xmm0, (%rax) ; AVX-NEXT: retq %1 = load <8 x i8>, <8 x i8>* %a %2 = load <8 x i8>, <8 x i8>* %b %3 = zext <8 x i8> %1 to <8 x i32> %4 = zext <8 x i8> %2 to <8 x i32> %5 = add nuw nsw <8 x i32> %3, %4 %6 = add nuw nsw <8 x i32> %5, %7 = lshr <8 x i32> %6, %8 = trunc <8 x i32> %7 to <8 x i8> store <8 x i8> %8, <8 x i8>* undef, align 4 ret void } define void @avg_v16i8_2(<16 x i8>* %a, <16 x i8>* %b) nounwind { ; SSE2-LABEL: avg_v16i8_2: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa (%rdi), %xmm0 ; SSE2-NEXT: pavgb (%rsi), %xmm0 ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; ; AVX-LABEL: avg_v16i8_2: ; AVX: # %bb.0: ; AVX-NEXT: vmovdqa (%rdi), %xmm0 ; AVX-NEXT: vpavgb (%rsi), %xmm0, %xmm0 ; AVX-NEXT: vmovdqu %xmm0, (%rax) ; AVX-NEXT: retq %1 = load <16 x i8>, <16 x i8>* %a %2 = load <16 x i8>, <16 x i8>* %b %3 = zext <16 x i8> %1 to <16 x i32> %4 = zext <16 x i8> %2 to <16 x i32> %5 = add nuw nsw <16 x i32> %3, %4 %6 = add nuw nsw <16 x i32> %5, %7 = lshr <16 x i32> %6, %8 = trunc <16 x i32> %7 to <16 x i8> store <16 x i8> %8, <16 x i8>* undef, align 4 ret void } define void @avg_v32i8_2(<32 x i8>* %a, <32 x i8>* %b) nounwind { ; SSE2-LABEL: avg_v32i8_2: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa (%rdi), %xmm0 ; SSE2-NEXT: movdqa 16(%rdi), %xmm1 ; SSE2-NEXT: pavgb (%rsi), %xmm0 ; SSE2-NEXT: pavgb 16(%rsi), %xmm1 ; SSE2-NEXT: movdqu %xmm1, (%rax) ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; ; AVX1-LABEL: avg_v32i8_2: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovdqa (%rdi), %xmm0 ; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1 ; AVX1-NEXT: vpavgb (%rsi), %xmm0, %xmm0 ; AVX1-NEXT: vpavgb 16(%rsi), %xmm1, %xmm1 ; AVX1-NEXT: vmovdqu %xmm1, (%rax) ; AVX1-NEXT: vmovdqu %xmm0, (%rax) ; AVX1-NEXT: retq ; ; AVX2-LABEL: avg_v32i8_2: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 ; AVX2-NEXT: vpavgb (%rsi), %ymm0, %ymm0 ; AVX2-NEXT: vmovdqu %ymm0, (%rax) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: avg_v32i8_2: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovdqa (%rdi), %ymm0 ; AVX512-NEXT: vpavgb (%rsi), %ymm0, %ymm0 ; AVX512-NEXT: vmovdqu %ymm0, (%rax) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = load <32 x i8>, <32 x i8>* %a %2 = load <32 x i8>, <32 x i8>* %b %3 = zext <32 x i8> %1 to <32 x i32> %4 = zext <32 x i8> %2 to <32 x i32> %5 = add nuw nsw <32 x i32> %3, %4 %6 = add nuw nsw <32 x i32> %5, %7 = lshr <32 x i32> %6, %8 = trunc <32 x i32> %7 to <32 x i8> store <32 x i8> %8, <32 x i8>* undef, align 4 ret void } define void @avg_v64i8_2(<64 x i8>* %a, <64 x i8>* %b) nounwind { ; SSE2-LABEL: avg_v64i8_2: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa (%rsi), %xmm0 ; SSE2-NEXT: movdqa 16(%rsi), %xmm1 ; SSE2-NEXT: movdqa 32(%rsi), %xmm2 ; SSE2-NEXT: movdqa 48(%rsi), %xmm3 ; SSE2-NEXT: pavgb %xmm0, %xmm0 ; SSE2-NEXT: pavgb %xmm1, %xmm1 ; SSE2-NEXT: pavgb %xmm2, %xmm2 ; SSE2-NEXT: pavgb %xmm3, %xmm3 ; SSE2-NEXT: movdqu %xmm3, (%rax) ; SSE2-NEXT: movdqu %xmm2, (%rax) ; SSE2-NEXT: movdqu %xmm1, (%rax) ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; ; AVX1-LABEL: avg_v64i8_2: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovdqa (%rsi), %xmm0 ; AVX1-NEXT: vmovdqa 16(%rsi), %xmm1 ; AVX1-NEXT: vmovdqa 32(%rsi), %xmm2 ; AVX1-NEXT: vmovdqa 48(%rsi), %xmm3 ; AVX1-NEXT: vpavgb %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: vpavgb %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpavgb %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpavgb %xmm3, %xmm3, %xmm3 ; AVX1-NEXT: vmovdqu %xmm3, (%rax) ; AVX1-NEXT: vmovdqu %xmm2, (%rax) ; AVX1-NEXT: vmovdqu %xmm1, (%rax) ; AVX1-NEXT: vmovdqu %xmm0, (%rax) ; AVX1-NEXT: retq ; ; AVX2-LABEL: avg_v64i8_2: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovdqa (%rsi), %ymm0 ; AVX2-NEXT: vmovdqa 32(%rsi), %ymm1 ; AVX2-NEXT: vpavgb %ymm0, %ymm0, %ymm0 ; AVX2-NEXT: vpavgb %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vmovdqu %ymm1, (%rax) ; AVX2-NEXT: vmovdqu %ymm0, (%rax) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512F-LABEL: avg_v64i8_2: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vmovdqa (%rsi), %ymm0 ; AVX512F-NEXT: vmovdqa 32(%rsi), %ymm1 ; AVX512F-NEXT: vpavgb %ymm0, %ymm0, %ymm0 ; AVX512F-NEXT: vpavgb %ymm1, %ymm1, %ymm1 ; AVX512F-NEXT: vmovdqu %ymm1, (%rax) ; AVX512F-NEXT: vmovdqu %ymm0, (%rax) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: avg_v64i8_2: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm0 ; AVX512BW-NEXT: vpavgb %zmm0, %zmm0, %zmm0 ; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax) ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %1 = load <64 x i8>, <64 x i8>* %a %2 = load <64 x i8>, <64 x i8>* %b %3 = zext <64 x i8> %1 to <64 x i32> %4 = zext <64 x i8> %2 to <64 x i32> %5 = add nuw nsw <64 x i32> %4, %4 %6 = add nuw nsw <64 x i32> %5, %7 = lshr <64 x i32> %6, %8 = trunc <64 x i32> %7 to <64 x i8> store <64 x i8> %8, <64 x i8>* undef, align 4 ret void } define void @avg_v4i16_2(<4 x i16>* %a, <4 x i16>* %b) nounwind { ; SSE2-LABEL: avg_v4i16_2: ; SSE2: # %bb.0: ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero ; SSE2-NEXT: pavgw %xmm0, %xmm1 ; SSE2-NEXT: movq %xmm1, (%rax) ; SSE2-NEXT: retq ; ; AVX-LABEL: avg_v4i16_2: ; AVX: # %bb.0: ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX-NEXT: vpavgw %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmovq %xmm0, (%rax) ; AVX-NEXT: retq %1 = load <4 x i16>, <4 x i16>* %a %2 = load <4 x i16>, <4 x i16>* %b %3 = zext <4 x i16> %1 to <4 x i32> %4 = zext <4 x i16> %2 to <4 x i32> %5 = add nuw nsw <4 x i32> %3, %4 %6 = add nuw nsw <4 x i32> %5, %7 = lshr <4 x i32> %6, %8 = trunc <4 x i32> %7 to <4 x i16> store <4 x i16> %8, <4 x i16>* undef, align 4 ret void } define void @avg_v8i16_2(<8 x i16>* %a, <8 x i16>* %b) nounwind { ; SSE2-LABEL: avg_v8i16_2: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa (%rdi), %xmm0 ; SSE2-NEXT: pavgw (%rsi), %xmm0 ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; ; AVX-LABEL: avg_v8i16_2: ; AVX: # %bb.0: ; AVX-NEXT: vmovdqa (%rdi), %xmm0 ; AVX-NEXT: vpavgw (%rsi), %xmm0, %xmm0 ; AVX-NEXT: vmovdqu %xmm0, (%rax) ; AVX-NEXT: retq %1 = load <8 x i16>, <8 x i16>* %a %2 = load <8 x i16>, <8 x i16>* %b %3 = zext <8 x i16> %1 to <8 x i32> %4 = zext <8 x i16> %2 to <8 x i32> %5 = add nuw nsw <8 x i32> %3, %4 %6 = add nuw nsw <8 x i32> %5, %7 = lshr <8 x i32> %6, %8 = trunc <8 x i32> %7 to <8 x i16> store <8 x i16> %8, <8 x i16>* undef, align 4 ret void } define void @avg_v16i16_2(<16 x i16>* %a, <16 x i16>* %b) nounwind { ; SSE2-LABEL: avg_v16i16_2: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa (%rdi), %xmm0 ; SSE2-NEXT: movdqa 16(%rdi), %xmm1 ; SSE2-NEXT: pavgw (%rsi), %xmm0 ; SSE2-NEXT: pavgw 16(%rsi), %xmm1 ; SSE2-NEXT: movdqu %xmm1, (%rax) ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; ; AVX1-LABEL: avg_v16i16_2: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovdqa (%rdi), %xmm0 ; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1 ; AVX1-NEXT: vpavgw (%rsi), %xmm0, %xmm0 ; AVX1-NEXT: vpavgw 16(%rsi), %xmm1, %xmm1 ; AVX1-NEXT: vmovdqu %xmm1, (%rax) ; AVX1-NEXT: vmovdqu %xmm0, (%rax) ; AVX1-NEXT: retq ; ; AVX2-LABEL: avg_v16i16_2: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 ; AVX2-NEXT: vpavgw (%rsi), %ymm0, %ymm0 ; AVX2-NEXT: vmovdqu %ymm0, (%rax) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: avg_v16i16_2: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovdqa (%rdi), %ymm0 ; AVX512-NEXT: vpavgw (%rsi), %ymm0, %ymm0 ; AVX512-NEXT: vmovdqu %ymm0, (%rax) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = load <16 x i16>, <16 x i16>* %a %2 = load <16 x i16>, <16 x i16>* %b %3 = zext <16 x i16> %1 to <16 x i32> %4 = zext <16 x i16> %2 to <16 x i32> %5 = add nuw nsw <16 x i32> %3, %4 %6 = add nuw nsw <16 x i32> %5, %7 = lshr <16 x i32> %6, %8 = trunc <16 x i32> %7 to <16 x i16> store <16 x i16> %8, <16 x i16>* undef, align 4 ret void } define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) nounwind { ; SSE2-LABEL: avg_v32i16_2: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa (%rdi), %xmm0 ; SSE2-NEXT: movdqa 16(%rdi), %xmm1 ; SSE2-NEXT: movdqa 32(%rdi), %xmm2 ; SSE2-NEXT: movdqa 48(%rdi), %xmm3 ; SSE2-NEXT: pavgw (%rsi), %xmm0 ; SSE2-NEXT: pavgw 16(%rsi), %xmm1 ; SSE2-NEXT: pavgw 32(%rsi), %xmm2 ; SSE2-NEXT: pavgw 48(%rsi), %xmm3 ; SSE2-NEXT: movdqu %xmm3, (%rax) ; SSE2-NEXT: movdqu %xmm2, (%rax) ; SSE2-NEXT: movdqu %xmm1, (%rax) ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; ; AVX1-LABEL: avg_v32i16_2: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovdqa (%rdi), %xmm0 ; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1 ; AVX1-NEXT: vmovdqa 32(%rdi), %xmm2 ; AVX1-NEXT: vmovdqa 48(%rdi), %xmm3 ; AVX1-NEXT: vpavgw (%rsi), %xmm0, %xmm0 ; AVX1-NEXT: vpavgw 16(%rsi), %xmm1, %xmm1 ; AVX1-NEXT: vpavgw 32(%rsi), %xmm2, %xmm2 ; AVX1-NEXT: vpavgw 48(%rsi), %xmm3, %xmm3 ; AVX1-NEXT: vmovdqu %xmm3, (%rax) ; AVX1-NEXT: vmovdqu %xmm2, (%rax) ; AVX1-NEXT: vmovdqu %xmm1, (%rax) ; AVX1-NEXT: vmovdqu %xmm0, (%rax) ; AVX1-NEXT: retq ; ; AVX2-LABEL: avg_v32i16_2: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1 ; AVX2-NEXT: vpavgw (%rsi), %ymm0, %ymm0 ; AVX2-NEXT: vpavgw 32(%rsi), %ymm1, %ymm1 ; AVX2-NEXT: vmovdqu %ymm1, (%rax) ; AVX2-NEXT: vmovdqu %ymm0, (%rax) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512F-LABEL: avg_v32i16_2: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1 ; AVX512F-NEXT: vpavgw (%rsi), %ymm0, %ymm0 ; AVX512F-NEXT: vpavgw 32(%rsi), %ymm1, %ymm1 ; AVX512F-NEXT: vmovdqu %ymm1, (%rax) ; AVX512F-NEXT: vmovdqu %ymm0, (%rax) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: avg_v32i16_2: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 ; AVX512BW-NEXT: vpavgw (%rsi), %zmm0, %zmm0 ; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax) ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %1 = load <32 x i16>, <32 x i16>* %a %2 = load <32 x i16>, <32 x i16>* %b %3 = zext <32 x i16> %1 to <32 x i32> %4 = zext <32 x i16> %2 to <32 x i32> %5 = add nuw nsw <32 x i32> %3, %4 %6 = add nuw nsw <32 x i32> %5, %7 = lshr <32 x i32> %6, %8 = trunc <32 x i32> %7 to <32 x i16> store <32 x i16> %8, <32 x i16>* undef, align 4 ret void } define void @avg_v4i8_const(<4 x i8>* %a) nounwind { ; SSE2-LABEL: avg_v4i8_const: ; SSE2: # %bb.0: ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2-NEXT: pavgb {{.*}}(%rip), %xmm0 ; SSE2-NEXT: movd %xmm0, (%rax) ; SSE2-NEXT: retq ; ; AVX-LABEL: avg_v4i8_const: ; AVX: # %bb.0: ; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; AVX-NEXT: vpavgb {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: vmovd %xmm0, (%rax) ; AVX-NEXT: retq %1 = load <4 x i8>, <4 x i8>* %a %2 = zext <4 x i8> %1 to <4 x i32> %3 = add nuw nsw <4 x i32> %2, %4 = lshr <4 x i32> %3, %5 = trunc <4 x i32> %4 to <4 x i8> store <4 x i8> %5, <4 x i8>* undef, align 4 ret void } define void @avg_v8i8_const(<8 x i8>* %a) nounwind { ; SSE2-LABEL: avg_v8i8_const: ; SSE2: # %bb.0: ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; SSE2-NEXT: pavgb {{.*}}(%rip), %xmm0 ; SSE2-NEXT: movq %xmm0, (%rax) ; SSE2-NEXT: retq ; ; AVX-LABEL: avg_v8i8_const: ; AVX: # %bb.0: ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX-NEXT: vpavgb {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: vmovq %xmm0, (%rax) ; AVX-NEXT: retq %1 = load <8 x i8>, <8 x i8>* %a %2 = zext <8 x i8> %1 to <8 x i32> %3 = add nuw nsw <8 x i32> %2, %4 = lshr <8 x i32> %3, %5 = trunc <8 x i32> %4 to <8 x i8> store <8 x i8> %5, <8 x i8>* undef, align 4 ret void } define void @avg_v16i8_const(<16 x i8>* %a) nounwind { ; SSE2-LABEL: avg_v16i8_const: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa (%rdi), %xmm0 ; SSE2-NEXT: pavgb {{.*}}(%rip), %xmm0 ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; ; AVX-LABEL: avg_v16i8_const: ; AVX: # %bb.0: ; AVX-NEXT: vmovdqa (%rdi), %xmm0 ; AVX-NEXT: vpavgb {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: vmovdqu %xmm0, (%rax) ; AVX-NEXT: retq %1 = load <16 x i8>, <16 x i8>* %a %2 = zext <16 x i8> %1 to <16 x i32> %3 = add nuw nsw <16 x i32> %2, %4 = lshr <16 x i32> %3, %5 = trunc <16 x i32> %4 to <16 x i8> store <16 x i8> %5, <16 x i8>* undef, align 4 ret void } define void @avg_v32i8_const(<32 x i8>* %a) nounwind { ; SSE2-LABEL: avg_v32i8_const: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] ; SSE2-NEXT: movdqa (%rdi), %xmm1 ; SSE2-NEXT: pavgb %xmm0, %xmm1 ; SSE2-NEXT: pavgb 16(%rdi), %xmm0 ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: movdqu %xmm1, (%rax) ; SSE2-NEXT: retq ; ; AVX1-LABEL: avg_v32i8_const: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = [506097522914230528,506097522914230528] ; AVX1-NEXT: # xmm0 = mem[0,0] ; AVX1-NEXT: vpavgb (%rdi), %xmm0, %xmm1 ; AVX1-NEXT: vpavgb 16(%rdi), %xmm0, %xmm0 ; AVX1-NEXT: vmovdqu %xmm0, (%rax) ; AVX1-NEXT: vmovdqu %xmm1, (%rax) ; AVX1-NEXT: retq ; ; AVX2-LABEL: avg_v32i8_const: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 ; AVX2-NEXT: vpavgb {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: vmovdqu %ymm0, (%rax) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: avg_v32i8_const: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovdqa (%rdi), %ymm0 ; AVX512-NEXT: vpavgb {{.*}}(%rip), %ymm0, %ymm0 ; AVX512-NEXT: vmovdqu %ymm0, (%rax) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = load <32 x i8>, <32 x i8>* %a %2 = zext <32 x i8> %1 to <32 x i32> %3 = add nuw nsw <32 x i32> %2, %4 = lshr <32 x i32> %3, %5 = trunc <32 x i32> %4 to <32 x i8> store <32 x i8> %5, <32 x i8>* undef, align 4 ret void } define void @avg_v64i8_const(<64 x i8>* %a) nounwind { ; SSE2-LABEL: avg_v64i8_const: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] ; SSE2-NEXT: movdqa (%rdi), %xmm1 ; SSE2-NEXT: pavgb %xmm0, %xmm1 ; SSE2-NEXT: movdqa 16(%rdi), %xmm2 ; SSE2-NEXT: pavgb %xmm0, %xmm2 ; SSE2-NEXT: movdqa 32(%rdi), %xmm3 ; SSE2-NEXT: pavgb %xmm0, %xmm3 ; SSE2-NEXT: pavgb 48(%rdi), %xmm0 ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: movdqu %xmm3, (%rax) ; SSE2-NEXT: movdqu %xmm2, (%rax) ; SSE2-NEXT: movdqu %xmm1, (%rax) ; SSE2-NEXT: retq ; ; AVX1-LABEL: avg_v64i8_const: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = [506097522914230528,506097522914230528] ; AVX1-NEXT: # xmm0 = mem[0,0] ; AVX1-NEXT: vpavgb (%rdi), %xmm0, %xmm1 ; AVX1-NEXT: vpavgb 16(%rdi), %xmm0, %xmm2 ; AVX1-NEXT: vpavgb 32(%rdi), %xmm0, %xmm3 ; AVX1-NEXT: vpavgb 48(%rdi), %xmm0, %xmm0 ; AVX1-NEXT: vmovdqu %xmm0, (%rax) ; AVX1-NEXT: vmovdqu %xmm3, (%rax) ; AVX1-NEXT: vmovdqu %xmm2, (%rax) ; AVX1-NEXT: vmovdqu %xmm1, (%rax) ; AVX1-NEXT: retq ; ; AVX2-LABEL: avg_v64i8_const: ; AVX2: # %bb.0: ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm0 = [506097522914230528,506097522914230528,506097522914230528,506097522914230528] ; AVX2-NEXT: vpavgb (%rdi), %ymm0, %ymm1 ; AVX2-NEXT: vpavgb 32(%rdi), %ymm0, %ymm0 ; AVX2-NEXT: vmovdqu %ymm0, (%rax) ; AVX2-NEXT: vmovdqu %ymm1, (%rax) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512F-LABEL: avg_v64i8_const: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpbroadcastq {{.*#+}} ymm0 = [506097522914230528,506097522914230528,506097522914230528,506097522914230528] ; AVX512F-NEXT: vpavgb (%rdi), %ymm0, %ymm1 ; AVX512F-NEXT: vpavgb 32(%rdi), %ymm0, %ymm0 ; AVX512F-NEXT: vmovdqu %ymm0, (%rax) ; AVX512F-NEXT: vmovdqu %ymm1, (%rax) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: avg_v64i8_const: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 ; AVX512BW-NEXT: vpavgb {{.*}}(%rip), %zmm0, %zmm0 ; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax) ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %1 = load <64 x i8>, <64 x i8>* %a %2 = zext <64 x i8> %1 to <64 x i32> %3 = add nuw nsw <64 x i32> %2, %4 = lshr <64 x i32> %3, %5 = trunc <64 x i32> %4 to <64 x i8> store <64 x i8> %5, <64 x i8>* undef, align 4 ret void } define void @avg_v4i16_const(<4 x i16>* %a) nounwind { ; SSE2-LABEL: avg_v4i16_const: ; SSE2: # %bb.0: ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; SSE2-NEXT: pavgw {{.*}}(%rip), %xmm0 ; SSE2-NEXT: movq %xmm0, (%rax) ; SSE2-NEXT: retq ; ; AVX-LABEL: avg_v4i16_const: ; AVX: # %bb.0: ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX-NEXT: vpavgw {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: vmovq %xmm0, (%rax) ; AVX-NEXT: retq %1 = load <4 x i16>, <4 x i16>* %a %2 = zext <4 x i16> %1 to <4 x i32> %3 = add nuw nsw <4 x i32> %2, %4 = lshr <4 x i32> %3, %5 = trunc <4 x i32> %4 to <4 x i16> store <4 x i16> %5, <4 x i16>* undef, align 4 ret void } define void @avg_v8i16_const(<8 x i16>* %a) nounwind { ; SSE2-LABEL: avg_v8i16_const: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa (%rdi), %xmm0 ; SSE2-NEXT: pavgw {{.*}}(%rip), %xmm0 ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; ; AVX-LABEL: avg_v8i16_const: ; AVX: # %bb.0: ; AVX-NEXT: vmovdqa (%rdi), %xmm0 ; AVX-NEXT: vpavgw {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: vmovdqu %xmm0, (%rax) ; AVX-NEXT: retq %1 = load <8 x i16>, <8 x i16>* %a %2 = zext <8 x i16> %1 to <8 x i32> %3 = add nuw nsw <8 x i32> %2, %4 = lshr <8 x i32> %3, %5 = trunc <8 x i32> %4 to <8 x i16> store <8 x i16> %5, <8 x i16>* undef, align 4 ret void } define void @avg_v16i16_const(<16 x i16>* %a) nounwind { ; SSE2-LABEL: avg_v16i16_const: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7] ; SSE2-NEXT: movdqa (%rdi), %xmm1 ; SSE2-NEXT: pavgw %xmm0, %xmm1 ; SSE2-NEXT: pavgw 16(%rdi), %xmm0 ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: movdqu %xmm1, (%rax) ; SSE2-NEXT: retq ; ; AVX1-LABEL: avg_v16i16_const: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7] ; AVX1-NEXT: vpavgw (%rdi), %xmm0, %xmm1 ; AVX1-NEXT: vpavgw 16(%rdi), %xmm0, %xmm0 ; AVX1-NEXT: vmovdqu %xmm0, (%rax) ; AVX1-NEXT: vmovdqu %xmm1, (%rax) ; AVX1-NEXT: retq ; ; AVX2-LABEL: avg_v16i16_const: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 ; AVX2-NEXT: vpavgw {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: vmovdqu %ymm0, (%rax) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: avg_v16i16_const: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovdqa (%rdi), %ymm0 ; AVX512-NEXT: vpavgw {{.*}}(%rip), %ymm0, %ymm0 ; AVX512-NEXT: vmovdqu %ymm0, (%rax) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = load <16 x i16>, <16 x i16>* %a %2 = zext <16 x i16> %1 to <16 x i32> %3 = add nuw nsw <16 x i32> %2, %4 = lshr <16 x i32> %3, %5 = trunc <16 x i32> %4 to <16 x i16> store <16 x i16> %5, <16 x i16>* undef, align 4 ret void } define void @avg_v32i16_const(<32 x i16>* %a) nounwind { ; SSE2-LABEL: avg_v32i16_const: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7] ; SSE2-NEXT: movdqa (%rdi), %xmm1 ; SSE2-NEXT: pavgw %xmm0, %xmm1 ; SSE2-NEXT: movdqa 16(%rdi), %xmm2 ; SSE2-NEXT: pavgw %xmm0, %xmm2 ; SSE2-NEXT: movdqa 32(%rdi), %xmm3 ; SSE2-NEXT: pavgw %xmm0, %xmm3 ; SSE2-NEXT: pavgw 48(%rdi), %xmm0 ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: movdqu %xmm3, (%rax) ; SSE2-NEXT: movdqu %xmm2, (%rax) ; SSE2-NEXT: movdqu %xmm1, (%rax) ; SSE2-NEXT: retq ; ; AVX1-LABEL: avg_v32i16_const: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7] ; AVX1-NEXT: vpavgw (%rdi), %xmm0, %xmm1 ; AVX1-NEXT: vpavgw 16(%rdi), %xmm0, %xmm2 ; AVX1-NEXT: vpavgw 32(%rdi), %xmm0, %xmm3 ; AVX1-NEXT: vpavgw 48(%rdi), %xmm0, %xmm0 ; AVX1-NEXT: vmovdqu %xmm0, (%rax) ; AVX1-NEXT: vmovdqu %xmm3, (%rax) ; AVX1-NEXT: vmovdqu %xmm2, (%rax) ; AVX1-NEXT: vmovdqu %xmm1, (%rax) ; AVX1-NEXT: retq ; ; AVX2-LABEL: avg_v32i16_const: ; AVX2: # %bb.0: ; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] ; AVX2-NEXT: # ymm0 = mem[0,1,0,1] ; AVX2-NEXT: vpavgw (%rdi), %ymm0, %ymm1 ; AVX2-NEXT: vpavgw 32(%rdi), %ymm0, %ymm0 ; AVX2-NEXT: vmovdqu %ymm0, (%rax) ; AVX2-NEXT: vmovdqu %ymm1, (%rax) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512F-LABEL: avg_v32i16_const: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] ; AVX512F-NEXT: # ymm0 = mem[0,1,0,1] ; AVX512F-NEXT: vpavgw (%rdi), %ymm0, %ymm1 ; AVX512F-NEXT: vpavgw 32(%rdi), %ymm0, %ymm0 ; AVX512F-NEXT: vmovdqu %ymm0, (%rax) ; AVX512F-NEXT: vmovdqu %ymm1, (%rax) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: avg_v32i16_const: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 ; AVX512BW-NEXT: vpavgw {{.*}}(%rip), %zmm0, %zmm0 ; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax) ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %1 = load <32 x i16>, <32 x i16>* %a %2 = zext <32 x i16> %1 to <32 x i32> %3 = add nuw nsw <32 x i32> %2, %4 = lshr <32 x i32> %3, %5 = trunc <32 x i32> %4 to <32 x i16> store <32 x i16> %5, <32 x i16>* undef, align 4 ret void } define <16 x i8> @avg_v16i8_3(<16 x i8> %a, <16 x i8> %b) nounwind { ; SSE2-LABEL: avg_v16i8_3: ; SSE2: # %bb.0: ; SSE2-NEXT: pavgb %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; AVX-LABEL: avg_v16i8_3: ; AVX: # %bb.0: ; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %za = zext <16 x i8> %a to <16 x i16> %zb = zext <16 x i8> %b to <16 x i16> %add = add nuw nsw <16 x i16> %za, %zb %add1 = add nuw nsw <16 x i16> %add, %lshr = lshr <16 x i16> %add1, %res = trunc <16 x i16> %lshr to <16 x i8> ret <16 x i8> %res } define <32 x i8> @avg_v32i8_3(<32 x i8> %a, <32 x i8> %b) nounwind { ; SSE2-LABEL: avg_v32i8_3: ; SSE2: # %bb.0: ; SSE2-NEXT: pavgb %xmm2, %xmm0 ; SSE2-NEXT: pavgb %xmm3, %xmm1 ; SSE2-NEXT: retq ; ; AVX1-LABEL: avg_v32i8_3: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 ; AVX1-NEXT: vpavgb %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpavgb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: avg_v32i8_3: ; AVX2: # %bb.0: ; AVX2-NEXT: vpavgb %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: avg_v32i8_3: ; AVX512: # %bb.0: ; AVX512-NEXT: vpavgb %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: retq %za = zext <32 x i8> %a to <32 x i16> %zb = zext <32 x i8> %b to <32 x i16> %add = add nuw nsw <32 x i16> %za, %zb %add1 = add nuw nsw <32 x i16> %add, %lshr = lshr <32 x i16> %add1, %res = trunc <32 x i16> %lshr to <32 x i8> ret <32 x i8> %res } define <64 x i8> @avg_v64i8_3(<64 x i8> %a, <64 x i8> %b) nounwind { ; SSE2-LABEL: avg_v64i8_3: ; SSE2: # %bb.0: ; SSE2-NEXT: pavgb %xmm4, %xmm0 ; SSE2-NEXT: pavgb %xmm5, %xmm1 ; SSE2-NEXT: pavgb %xmm6, %xmm2 ; SSE2-NEXT: pavgb %xmm7, %xmm3 ; SSE2-NEXT: retq ; ; AVX1-LABEL: avg_v64i8_3: ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 ; AVX1-NEXT: vpavgb %xmm4, %xmm5, %xmm4 ; AVX1-NEXT: vpavgb %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 ; AVX1-NEXT: vpavgb %xmm2, %xmm4, %xmm2 ; AVX1-NEXT: vpavgb %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: avg_v64i8_3: ; AVX2: # %bb.0: ; AVX2-NEXT: vpavgb %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vpavgb %ymm3, %ymm1, %ymm1 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: avg_v64i8_3: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3 ; AVX512F-NEXT: vpavgb %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpavgb %ymm1, %ymm0, %ymm0 ; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: avg_v64i8_3: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpavgb %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retq %za = zext <64 x i8> %a to <64 x i16> %zb = zext <64 x i8> %b to <64 x i16> %add = add nuw nsw <64 x i16> %za, %zb %add1 = add nuw nsw <64 x i16> %add, %lshr = lshr <64 x i16> %add1, %res = trunc <64 x i16> %lshr to <64 x i8> ret <64 x i8> %res } define <512 x i8> @avg_v512i8_3(<512 x i8> %a, <512 x i8> %b) nounwind { ; SSE2-LABEL: avg_v512i8_3: ; SSE2: # %bb.0: ; SSE2-NEXT: movq %rdi, %rax ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: movdqa %xmm8, 496(%rdi) ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: movdqa %xmm8, 480(%rdi) ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: movdqa %xmm8, 464(%rdi) ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: movdqa %xmm8, 448(%rdi) ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: movdqa %xmm8, 432(%rdi) ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: movdqa %xmm8, 416(%rdi) ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: movdqa %xmm8, 400(%rdi) ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: movdqa %xmm8, 384(%rdi) ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: movdqa %xmm8, 368(%rdi) ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: movdqa %xmm8, 352(%rdi) ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: movdqa %xmm8, 336(%rdi) ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: movdqa %xmm8, 320(%rdi) ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: movdqa %xmm8, 304(%rdi) ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: movdqa %xmm8, 288(%rdi) ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: movdqa %xmm8, 272(%rdi) ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: movdqa %xmm8, 256(%rdi) ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: movdqa %xmm8, 240(%rdi) ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: movdqa %xmm8, 224(%rdi) ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: movdqa %xmm8, 208(%rdi) ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: movdqa %xmm8, 192(%rdi) ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: movdqa %xmm8, 176(%rdi) ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: movdqa %xmm8, 160(%rdi) ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: movdqa %xmm8, 144(%rdi) ; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm8 ; SSE2-NEXT: movdqa %xmm8, 128(%rdi) ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm7 ; SSE2-NEXT: movdqa %xmm7, 112(%rdi) ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm6 ; SSE2-NEXT: movdqa %xmm6, 96(%rdi) ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm5 ; SSE2-NEXT: movdqa %xmm5, 80(%rdi) ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm4 ; SSE2-NEXT: movdqa %xmm4, 64(%rdi) ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm3 ; SSE2-NEXT: movdqa %xmm3, 48(%rdi) ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm2 ; SSE2-NEXT: movdqa %xmm2, 32(%rdi) ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm1 ; SSE2-NEXT: movdqa %xmm1, 16(%rdi) ; SSE2-NEXT: pavgb {{[0-9]+}}(%rsp), %xmm0 ; SSE2-NEXT: movdqa %xmm0, (%rdi) ; SSE2-NEXT: retq ; ; AVX1-LABEL: avg_v512i8_3: ; AVX1: # %bb.0: ; AVX1-NEXT: pushq %rbp ; AVX1-NEXT: movq %rsp, %rbp ; AVX1-NEXT: andq $-32, %rsp ; AVX1-NEXT: subq $32, %rsp ; AVX1-NEXT: movq %rdi, %rax ; AVX1-NEXT: vmovdqa 256(%rbp), %xmm8 ; AVX1-NEXT: vpavgb 768(%rbp), %xmm8, %xmm8 ; AVX1-NEXT: vmovdqa %xmm8, 496(%rdi) ; AVX1-NEXT: vmovdqa 240(%rbp), %xmm8 ; AVX1-NEXT: vpavgb 752(%rbp), %xmm8, %xmm8 ; AVX1-NEXT: vmovdqa %xmm8, 480(%rdi) ; AVX1-NEXT: vmovdqa 224(%rbp), %xmm8 ; AVX1-NEXT: vpavgb 736(%rbp), %xmm8, %xmm8 ; AVX1-NEXT: vmovdqa %xmm8, 464(%rdi) ; AVX1-NEXT: vmovdqa 208(%rbp), %xmm8 ; AVX1-NEXT: vpavgb 720(%rbp), %xmm8, %xmm8 ; AVX1-NEXT: vmovdqa %xmm8, 448(%rdi) ; AVX1-NEXT: vmovdqa 192(%rbp), %xmm8 ; AVX1-NEXT: vpavgb 704(%rbp), %xmm8, %xmm8 ; AVX1-NEXT: vmovdqa %xmm8, 432(%rdi) ; AVX1-NEXT: vmovdqa 176(%rbp), %xmm8 ; AVX1-NEXT: vpavgb 688(%rbp), %xmm8, %xmm8 ; AVX1-NEXT: vmovdqa %xmm8, 416(%rdi) ; AVX1-NEXT: vmovdqa 160(%rbp), %xmm8 ; AVX1-NEXT: vpavgb 672(%rbp), %xmm8, %xmm8 ; AVX1-NEXT: vmovdqa %xmm8, 400(%rdi) ; AVX1-NEXT: vmovdqa 144(%rbp), %xmm8 ; AVX1-NEXT: vpavgb 656(%rbp), %xmm8, %xmm8 ; AVX1-NEXT: vmovdqa %xmm8, 384(%rdi) ; AVX1-NEXT: vmovdqa 128(%rbp), %xmm8 ; AVX1-NEXT: vpavgb 640(%rbp), %xmm8, %xmm8 ; AVX1-NEXT: vmovdqa %xmm8, 368(%rdi) ; AVX1-NEXT: vmovdqa 112(%rbp), %xmm8 ; AVX1-NEXT: vpavgb 624(%rbp), %xmm8, %xmm8 ; AVX1-NEXT: vmovdqa %xmm8, 352(%rdi) ; AVX1-NEXT: vmovdqa 96(%rbp), %xmm8 ; AVX1-NEXT: vpavgb 608(%rbp), %xmm8, %xmm8 ; AVX1-NEXT: vmovdqa %xmm8, 336(%rdi) ; AVX1-NEXT: vmovdqa 80(%rbp), %xmm8 ; AVX1-NEXT: vpavgb 592(%rbp), %xmm8, %xmm8 ; AVX1-NEXT: vmovdqa %xmm8, 320(%rdi) ; AVX1-NEXT: vmovdqa 64(%rbp), %xmm8 ; AVX1-NEXT: vpavgb 576(%rbp), %xmm8, %xmm8 ; AVX1-NEXT: vmovdqa %xmm8, 304(%rdi) ; AVX1-NEXT: vmovdqa 48(%rbp), %xmm8 ; AVX1-NEXT: vpavgb 560(%rbp), %xmm8, %xmm8 ; AVX1-NEXT: vmovdqa %xmm8, 288(%rdi) ; AVX1-NEXT: vmovdqa 32(%rbp), %xmm8 ; AVX1-NEXT: vpavgb 544(%rbp), %xmm8, %xmm8 ; AVX1-NEXT: vmovdqa %xmm8, 272(%rdi) ; AVX1-NEXT: vmovdqa 16(%rbp), %xmm8 ; AVX1-NEXT: vpavgb 528(%rbp), %xmm8, %xmm8 ; AVX1-NEXT: vmovdqa %xmm8, 256(%rdi) ; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm8 ; AVX1-NEXT: vpavgb 512(%rbp), %xmm8, %xmm8 ; AVX1-NEXT: vmovdqa %xmm8, 240(%rdi) ; AVX1-NEXT: vpavgb 496(%rbp), %xmm7, %xmm7 ; AVX1-NEXT: vmovdqa %xmm7, 224(%rdi) ; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm7 ; AVX1-NEXT: vpavgb 480(%rbp), %xmm7, %xmm7 ; AVX1-NEXT: vmovdqa %xmm7, 208(%rdi) ; AVX1-NEXT: vpavgb 464(%rbp), %xmm6, %xmm6 ; AVX1-NEXT: vmovdqa %xmm6, 192(%rdi) ; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6 ; AVX1-NEXT: vpavgb 448(%rbp), %xmm6, %xmm6 ; AVX1-NEXT: vmovdqa %xmm6, 176(%rdi) ; AVX1-NEXT: vpavgb 432(%rbp), %xmm5, %xmm5 ; AVX1-NEXT: vmovdqa %xmm5, 160(%rdi) ; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5 ; AVX1-NEXT: vpavgb 416(%rbp), %xmm5, %xmm5 ; AVX1-NEXT: vmovdqa %xmm5, 144(%rdi) ; AVX1-NEXT: vpavgb 400(%rbp), %xmm4, %xmm4 ; AVX1-NEXT: vmovdqa %xmm4, 128(%rdi) ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 ; AVX1-NEXT: vpavgb 384(%rbp), %xmm4, %xmm4 ; AVX1-NEXT: vmovdqa %xmm4, 112(%rdi) ; AVX1-NEXT: vpavgb 368(%rbp), %xmm3, %xmm3 ; AVX1-NEXT: vmovdqa %xmm3, 96(%rdi) ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3 ; AVX1-NEXT: vpavgb 352(%rbp), %xmm3, %xmm3 ; AVX1-NEXT: vmovdqa %xmm3, 80(%rdi) ; AVX1-NEXT: vpavgb 336(%rbp), %xmm2, %xmm2 ; AVX1-NEXT: vmovdqa %xmm2, 64(%rdi) ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vpavgb 320(%rbp), %xmm2, %xmm2 ; AVX1-NEXT: vmovdqa %xmm2, 48(%rdi) ; AVX1-NEXT: vpavgb 304(%rbp), %xmm1, %xmm1 ; AVX1-NEXT: vmovdqa %xmm1, 32(%rdi) ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpavgb 288(%rbp), %xmm1, %xmm1 ; AVX1-NEXT: vmovdqa %xmm1, 16(%rdi) ; AVX1-NEXT: vpavgb 272(%rbp), %xmm0, %xmm0 ; AVX1-NEXT: vmovdqa %xmm0, (%rdi) ; AVX1-NEXT: movq %rbp, %rsp ; AVX1-NEXT: popq %rbp ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: avg_v512i8_3: ; AVX2: # %bb.0: ; AVX2-NEXT: pushq %rbp ; AVX2-NEXT: movq %rsp, %rbp ; AVX2-NEXT: andq $-32, %rsp ; AVX2-NEXT: subq $32, %rsp ; AVX2-NEXT: movq %rdi, %rax ; AVX2-NEXT: vmovdqa 240(%rbp), %ymm8 ; AVX2-NEXT: vmovdqa 208(%rbp), %ymm9 ; AVX2-NEXT: vmovdqa 176(%rbp), %ymm10 ; AVX2-NEXT: vmovdqa 144(%rbp), %ymm11 ; AVX2-NEXT: vmovdqa 112(%rbp), %ymm12 ; AVX2-NEXT: vmovdqa 80(%rbp), %ymm13 ; AVX2-NEXT: vmovdqa 48(%rbp), %ymm14 ; AVX2-NEXT: vmovdqa 16(%rbp), %ymm15 ; AVX2-NEXT: vpavgb 272(%rbp), %ymm0, %ymm0 ; AVX2-NEXT: vpavgb 304(%rbp), %ymm1, %ymm1 ; AVX2-NEXT: vpavgb 336(%rbp), %ymm2, %ymm2 ; AVX2-NEXT: vpavgb 368(%rbp), %ymm3, %ymm3 ; AVX2-NEXT: vpavgb 400(%rbp), %ymm4, %ymm4 ; AVX2-NEXT: vpavgb 432(%rbp), %ymm5, %ymm5 ; AVX2-NEXT: vpavgb 464(%rbp), %ymm6, %ymm6 ; AVX2-NEXT: vpavgb 496(%rbp), %ymm7, %ymm7 ; AVX2-NEXT: vpavgb 528(%rbp), %ymm15, %ymm15 ; AVX2-NEXT: vpavgb 560(%rbp), %ymm14, %ymm14 ; AVX2-NEXT: vpavgb 592(%rbp), %ymm13, %ymm13 ; AVX2-NEXT: vpavgb 624(%rbp), %ymm12, %ymm12 ; AVX2-NEXT: vpavgb 656(%rbp), %ymm11, %ymm11 ; AVX2-NEXT: vpavgb 688(%rbp), %ymm10, %ymm10 ; AVX2-NEXT: vpavgb 720(%rbp), %ymm9, %ymm9 ; AVX2-NEXT: vpavgb 752(%rbp), %ymm8, %ymm8 ; AVX2-NEXT: vmovdqa %ymm8, 480(%rdi) ; AVX2-NEXT: vmovdqa %ymm9, 448(%rdi) ; AVX2-NEXT: vmovdqa %ymm10, 416(%rdi) ; AVX2-NEXT: vmovdqa %ymm11, 384(%rdi) ; AVX2-NEXT: vmovdqa %ymm12, 352(%rdi) ; AVX2-NEXT: vmovdqa %ymm13, 320(%rdi) ; AVX2-NEXT: vmovdqa %ymm14, 288(%rdi) ; AVX2-NEXT: vmovdqa %ymm15, 256(%rdi) ; AVX2-NEXT: vmovdqa %ymm7, 224(%rdi) ; AVX2-NEXT: vmovdqa %ymm6, 192(%rdi) ; AVX2-NEXT: vmovdqa %ymm5, 160(%rdi) ; AVX2-NEXT: vmovdqa %ymm4, 128(%rdi) ; AVX2-NEXT: vmovdqa %ymm3, 96(%rdi) ; AVX2-NEXT: vmovdqa %ymm2, 64(%rdi) ; AVX2-NEXT: vmovdqa %ymm1, 32(%rdi) ; AVX2-NEXT: vmovdqa %ymm0, (%rdi) ; AVX2-NEXT: movq %rbp, %rsp ; AVX2-NEXT: popq %rbp ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512F-LABEL: avg_v512i8_3: ; AVX512F: # %bb.0: ; AVX512F-NEXT: pushq %rbp ; AVX512F-NEXT: movq %rsp, %rbp ; AVX512F-NEXT: andq $-64, %rsp ; AVX512F-NEXT: subq $64, %rsp ; AVX512F-NEXT: movq %rdi, %rax ; AVX512F-NEXT: vpavgb 16(%rbp), %ymm0, %ymm8 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm0 ; AVX512F-NEXT: vpavgb 48(%rbp), %ymm0, %ymm0 ; AVX512F-NEXT: vpavgb 80(%rbp), %ymm1, %ymm9 ; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm1 ; AVX512F-NEXT: vpavgb 112(%rbp), %ymm1, %ymm1 ; AVX512F-NEXT: vpavgb 144(%rbp), %ymm2, %ymm10 ; AVX512F-NEXT: vextracti64x4 $1, %zmm2, %ymm2 ; AVX512F-NEXT: vpavgb 176(%rbp), %ymm2, %ymm2 ; AVX512F-NEXT: vpavgb 208(%rbp), %ymm3, %ymm11 ; AVX512F-NEXT: vextracti64x4 $1, %zmm3, %ymm3 ; AVX512F-NEXT: vpavgb 240(%rbp), %ymm3, %ymm3 ; AVX512F-NEXT: vpavgb 272(%rbp), %ymm4, %ymm12 ; AVX512F-NEXT: vextracti64x4 $1, %zmm4, %ymm4 ; AVX512F-NEXT: vpavgb 304(%rbp), %ymm4, %ymm4 ; AVX512F-NEXT: vpavgb 336(%rbp), %ymm5, %ymm13 ; AVX512F-NEXT: vextracti64x4 $1, %zmm5, %ymm5 ; AVX512F-NEXT: vpavgb 368(%rbp), %ymm5, %ymm5 ; AVX512F-NEXT: vpavgb 400(%rbp), %ymm6, %ymm14 ; AVX512F-NEXT: vextracti64x4 $1, %zmm6, %ymm6 ; AVX512F-NEXT: vpavgb 432(%rbp), %ymm6, %ymm6 ; AVX512F-NEXT: vpavgb 464(%rbp), %ymm7, %ymm15 ; AVX512F-NEXT: vextracti64x4 $1, %zmm7, %ymm7 ; AVX512F-NEXT: vpavgb 496(%rbp), %ymm7, %ymm7 ; AVX512F-NEXT: vmovdqa %ymm7, 480(%rdi) ; AVX512F-NEXT: vmovdqa %ymm15, 448(%rdi) ; AVX512F-NEXT: vmovdqa %ymm6, 416(%rdi) ; AVX512F-NEXT: vmovdqa %ymm14, 384(%rdi) ; AVX512F-NEXT: vmovdqa %ymm5, 352(%rdi) ; AVX512F-NEXT: vmovdqa %ymm13, 320(%rdi) ; AVX512F-NEXT: vmovdqa %ymm4, 288(%rdi) ; AVX512F-NEXT: vmovdqa %ymm12, 256(%rdi) ; AVX512F-NEXT: vmovdqa %ymm3, 224(%rdi) ; AVX512F-NEXT: vmovdqa %ymm11, 192(%rdi) ; AVX512F-NEXT: vmovdqa %ymm2, 160(%rdi) ; AVX512F-NEXT: vmovdqa %ymm10, 128(%rdi) ; AVX512F-NEXT: vmovdqa %ymm1, 96(%rdi) ; AVX512F-NEXT: vmovdqa %ymm9, 64(%rdi) ; AVX512F-NEXT: vmovdqa %ymm0, 32(%rdi) ; AVX512F-NEXT: vmovdqa %ymm8, (%rdi) ; AVX512F-NEXT: movq %rbp, %rsp ; AVX512F-NEXT: popq %rbp ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: avg_v512i8_3: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: pushq %rbp ; AVX512BW-NEXT: movq %rsp, %rbp ; AVX512BW-NEXT: andq $-64, %rsp ; AVX512BW-NEXT: subq $64, %rsp ; AVX512BW-NEXT: movq %rdi, %rax ; AVX512BW-NEXT: vpavgb 16(%rbp), %zmm0, %zmm0 ; AVX512BW-NEXT: vpavgb 80(%rbp), %zmm1, %zmm1 ; AVX512BW-NEXT: vpavgb 144(%rbp), %zmm2, %zmm2 ; AVX512BW-NEXT: vpavgb 208(%rbp), %zmm3, %zmm3 ; AVX512BW-NEXT: vpavgb 272(%rbp), %zmm4, %zmm4 ; AVX512BW-NEXT: vpavgb 336(%rbp), %zmm5, %zmm5 ; AVX512BW-NEXT: vpavgb 400(%rbp), %zmm6, %zmm6 ; AVX512BW-NEXT: vpavgb 464(%rbp), %zmm7, %zmm7 ; AVX512BW-NEXT: vmovdqa64 %zmm7, 448(%rdi) ; AVX512BW-NEXT: vmovdqa64 %zmm6, 384(%rdi) ; AVX512BW-NEXT: vmovdqa64 %zmm5, 320(%rdi) ; AVX512BW-NEXT: vmovdqa64 %zmm4, 256(%rdi) ; AVX512BW-NEXT: vmovdqa64 %zmm3, 192(%rdi) ; AVX512BW-NEXT: vmovdqa64 %zmm2, 128(%rdi) ; AVX512BW-NEXT: vmovdqa64 %zmm1, 64(%rdi) ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rdi) ; AVX512BW-NEXT: movq %rbp, %rsp ; AVX512BW-NEXT: popq %rbp ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %za = zext <512 x i8> %a to <512 x i16> %zb = zext <512 x i8> %b to <512 x i16> %add = add nuw nsw <512 x i16> %za, %zb %add1 = add nuw nsw <512 x i16> %add, %lshr = lshr <512 x i16> %add1, %res = trunc <512 x i16> %lshr to <512 x i8> ret <512 x i8> %res } ; This is not an avg, but its structurally similar and previously caused a crash ; because the constants can't be read with APInt::getZExtValue. define void @not_avg_v16i8_wide_constants(<16 x i8>* %a, <16 x i8>* %b) nounwind { ; SSE2-LABEL: not_avg_v16i8_wide_constants: ; SSE2: # %bb.0: ; SSE2-NEXT: pushq %rbp ; SSE2-NEXT: pushq %r15 ; SSE2-NEXT: pushq %r14 ; SSE2-NEXT: pushq %r13 ; SSE2-NEXT: pushq %r12 ; SSE2-NEXT: pushq %rbx ; SSE2-NEXT: movaps (%rdi), %xmm1 ; SSE2-NEXT: movaps (%rsi), %xmm0 ; SSE2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r15d ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r14d ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r12d ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r13d ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebp ; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx ; SSE2-NEXT: addq %rsi, %rcx ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; SSE2-NEXT: addq %rbp, %rax ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi ; SSE2-NEXT: leaq -1(%rdx,%rsi), %r11 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx ; SSE2-NEXT: leaq -1(%rdi,%rdx), %rsi ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx ; SSE2-NEXT: leaq -1(%r8,%rdx), %rdi ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx ; SSE2-NEXT: leaq -1(%r9,%rdx), %r8 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx ; SSE2-NEXT: leaq -1(%rbx,%rdx), %rbx ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx ; SSE2-NEXT: leaq -1(%r10,%rdx), %r9 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx ; SSE2-NEXT: leaq -1(%r13,%rdx), %r13 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx ; SSE2-NEXT: leaq -1(%r12,%rdx), %r12 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx ; SSE2-NEXT: leaq -1(%r14,%rdx), %r14 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx ; SSE2-NEXT: leaq -1(%r15,%rdx), %r15 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload ; SSE2-NEXT: leaq -1(%rbp,%rdx), %rdx ; SSE2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload ; SSE2-NEXT: leaq -1(%rbp,%rdx), %r10 ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload ; SSE2-NEXT: leaq -1(%rbp,%rdx), %rdx ; SSE2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload ; SSE2-NEXT: leaq -1(%rbp,%rdx), %rdx ; SSE2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; SSE2-NEXT: xorl %ebp, %ebp ; SSE2-NEXT: addq $-1, %rcx ; SSE2-NEXT: movl $0, %edx ; SSE2-NEXT: adcq $-1, %rdx ; SSE2-NEXT: addq $-1, %rax ; SSE2-NEXT: adcq $-1, %rbp ; SSE2-NEXT: shldq $63, %rax, %rbp ; SSE2-NEXT: shldq $63, %rcx, %rdx ; SSE2-NEXT: movq %rdx, %xmm8 ; SSE2-NEXT: movq %rbp, %xmm0 ; SSE2-NEXT: shrq %r11 ; SSE2-NEXT: movq %r11, %xmm9 ; SSE2-NEXT: shrq %rsi ; SSE2-NEXT: movq %rsi, %xmm2 ; SSE2-NEXT: shrq %rdi ; SSE2-NEXT: movq %rdi, %xmm10 ; SSE2-NEXT: shrq %r8 ; SSE2-NEXT: movq %r8, %xmm4 ; SSE2-NEXT: shrq %rbx ; SSE2-NEXT: movq %rbx, %xmm11 ; SSE2-NEXT: shrq %r9 ; SSE2-NEXT: movq %r9, %xmm7 ; SSE2-NEXT: shrq %r13 ; SSE2-NEXT: movq %r13, %xmm12 ; SSE2-NEXT: shrq %r12 ; SSE2-NEXT: movq %r12, %xmm1 ; SSE2-NEXT: shrq %r14 ; SSE2-NEXT: movq %r14, %xmm13 ; SSE2-NEXT: shrq %r15 ; SSE2-NEXT: movq %r15, %xmm6 ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; SSE2-NEXT: shrq %rax ; SSE2-NEXT: movq %rax, %xmm14 ; SSE2-NEXT: shrq %r10 ; SSE2-NEXT: movq %r10, %xmm5 ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; SSE2-NEXT: shrq %rax ; SSE2-NEXT: movq %rax, %xmm15 ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; SSE2-NEXT: shrq %rax ; SSE2-NEXT: movq %rax, %xmm3 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm9[0],xmm2[1],xmm9[1],xmm2[2],xmm9[2],xmm2[3],xmm9[3],xmm2[4],xmm9[4],xmm2[5],xmm9[5],xmm2[6],xmm9[6],xmm2[7],xmm9[7] ; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [65535,0,65535,65535,65535,65535,65535,65535] ; SSE2-NEXT: pand %xmm8, %xmm0 ; SSE2-NEXT: pslld $16, %xmm2 ; SSE2-NEXT: pandn %xmm2, %xmm8 ; SSE2-NEXT: por %xmm0, %xmm8 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm10[0],xmm4[1],xmm10[1],xmm4[2],xmm10[2],xmm4[3],xmm10[3],xmm4[4],xmm10[4],xmm4[5],xmm10[5],xmm4[6],xmm10[6],xmm4[7],xmm10[7] ; SSE2-NEXT: psllq $48, %xmm4 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7] ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,0,65535,65535,65535,65535] ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm7[0,0,1,1] ; SSE2-NEXT: pand %xmm0, %xmm2 ; SSE2-NEXT: pandn %xmm4, %xmm0 ; SSE2-NEXT: por %xmm2, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] ; SSE2-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm12[0],xmm1[1],xmm12[1],xmm1[2],xmm12[2],xmm1[3],xmm12[3],xmm1[4],xmm12[4],xmm1[5],xmm12[5],xmm1[6],xmm12[6],xmm1[7],xmm12[7] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7] ; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,0,0] ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE2-NEXT: por %xmm1, %xmm0 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3],xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7] ; SSE2-NEXT: pslldq {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm5[0,1,2,3,4,5] ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535,65535,0,65535,65535] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3],xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7] ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,1,0,1] ; SSE2-NEXT: pand %xmm1, %xmm2 ; SSE2-NEXT: pandn %xmm5, %xmm1 ; SSE2-NEXT: por %xmm2, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,2,2] ; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm0[2],xmm1[3],xmm0[3] ; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm8[0],xmm1[1] ; SSE2-NEXT: movupd %xmm1, (%rax) ; SSE2-NEXT: popq %rbx ; SSE2-NEXT: popq %r12 ; SSE2-NEXT: popq %r13 ; SSE2-NEXT: popq %r14 ; SSE2-NEXT: popq %r15 ; SSE2-NEXT: popq %rbp ; SSE2-NEXT: retq ; ; AVX1-LABEL: not_avg_v16i8_wide_constants: ; AVX1: # %bb.0: ; AVX1-NEXT: pushq %rbp ; AVX1-NEXT: pushq %r15 ; AVX1-NEXT: pushq %r14 ; AVX1-NEXT: pushq %r13 ; AVX1-NEXT: pushq %r12 ; AVX1-NEXT: pushq %rbx ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm9 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm5 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero ; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm5[2],xmm3[2],xmm5[3],xmm3[3] ; AVX1-NEXT: vmovq %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX1-NEXT: vpextrq $1, %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm6 = xmm5[0],zero,xmm5[1],zero ; AVX1-NEXT: vmovq %xmm6, %rcx ; AVX1-NEXT: vpextrq $1, %xmm6, %r11 ; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm5[2],xmm3[2],xmm5[3],xmm3[3] ; AVX1-NEXT: vmovq %xmm5, %r14 ; AVX1-NEXT: vpextrq $1, %xmm5, %r15 ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero ; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm5[2],xmm3[2],xmm5[3],xmm3[3] ; AVX1-NEXT: vmovq %xmm5, %r10 ; AVX1-NEXT: vpextrq $1, %xmm5, %r12 ; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7] ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm6 = xmm5[0],zero,xmm5[1],zero ; AVX1-NEXT: vmovq %xmm6, %r8 ; AVX1-NEXT: vpextrq $1, %xmm6, %r9 ; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm5[2],xmm3[2],xmm5[3],xmm3[3] ; AVX1-NEXT: vmovq %xmm5, %rsi ; AVX1-NEXT: vpextrq $1, %xmm5, %rdi ; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm8 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero ; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero ; AVX1-NEXT: vpextrq $1, %xmm4, %rax ; AVX1-NEXT: vmovq %xmm4, %rbx ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm9[0],zero,xmm9[1],zero,xmm9[2],zero,xmm9[3],zero ; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3] ; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm9[4],xmm3[4],xmm9[5],xmm3[5],xmm9[6],xmm3[6],xmm9[7],xmm3[7] ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm6[0],zero,xmm6[1],zero ; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm6[2],xmm3[2],xmm6[3],xmm3[3] ; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero ; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm7 = xmm7[2],xmm3[2],xmm7[3],xmm3[3] ; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] ; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero ; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3] ; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero ; AVX1-NEXT: vpextrq $1, %xmm1, %rdx ; AVX1-NEXT: addq %rax, %rdx ; AVX1-NEXT: vmovq %xmm1, %rax ; AVX1-NEXT: addq %rbx, %rax ; AVX1-NEXT: vpextrq $1, %xmm2, %rbx ; AVX1-NEXT: leaq -1(%rdi,%rbx), %rdi ; AVX1-NEXT: vmovq %xmm2, %rbx ; AVX1-NEXT: leaq -1(%rsi,%rbx), %rsi ; AVX1-NEXT: vpextrq $1, %xmm0, %rbx ; AVX1-NEXT: leaq -1(%r9,%rbx), %r9 ; AVX1-NEXT: vmovq %xmm0, %rbx ; AVX1-NEXT: leaq -1(%r8,%rbx), %r8 ; AVX1-NEXT: vpextrq $1, %xmm7, %rbx ; AVX1-NEXT: leaq -1(%r12,%rbx), %rbx ; AVX1-NEXT: vmovq %xmm7, %rbp ; AVX1-NEXT: leaq -1(%r10,%rbp), %r10 ; AVX1-NEXT: vpextrq $1, %xmm6, %rbp ; AVX1-NEXT: leaq -1(%r15,%rbp), %r13 ; AVX1-NEXT: vmovq %xmm6, %rbp ; AVX1-NEXT: leaq -1(%r14,%rbp), %r12 ; AVX1-NEXT: vpextrq $1, %xmm5, %rbp ; AVX1-NEXT: leaq -1(%r11,%rbp), %r14 ; AVX1-NEXT: vmovq %xmm5, %rbp ; AVX1-NEXT: leaq -1(%rcx,%rbp), %r15 ; AVX1-NEXT: vpextrq $1, %xmm4, %rcx ; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload ; AVX1-NEXT: leaq -1(%rbp,%rcx), %r11 ; AVX1-NEXT: vmovq %xmm4, %rcx ; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload ; AVX1-NEXT: leaq -1(%rbp,%rcx), %rcx ; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX1-NEXT: vpextrq $1, %xmm8, %rcx ; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero ; AVX1-NEXT: vpextrq $1, %xmm0, %rbp ; AVX1-NEXT: leaq -1(%rcx,%rbp), %rcx ; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX1-NEXT: vmovq %xmm8, %rcx ; AVX1-NEXT: vmovq %xmm0, %rbp ; AVX1-NEXT: leaq -1(%rcx,%rbp), %rcx ; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX1-NEXT: xorl %ebp, %ebp ; AVX1-NEXT: addq $-1, %rdx ; AVX1-NEXT: movl $0, %ecx ; AVX1-NEXT: adcq $-1, %rcx ; AVX1-NEXT: addq $-1, %rax ; AVX1-NEXT: adcq $-1, %rbp ; AVX1-NEXT: shldq $63, %rax, %rbp ; AVX1-NEXT: shldq $63, %rdx, %rcx ; AVX1-NEXT: shrq %rdi ; AVX1-NEXT: vmovq %rdi, %xmm8 ; AVX1-NEXT: shrq %rsi ; AVX1-NEXT: vmovq %rsi, %xmm9 ; AVX1-NEXT: shrq %r9 ; AVX1-NEXT: vmovq %r9, %xmm0 ; AVX1-NEXT: shrq %r8 ; AVX1-NEXT: vmovq %r8, %xmm1 ; AVX1-NEXT: vmovq %rcx, %xmm12 ; AVX1-NEXT: vmovq %rbp, %xmm13 ; AVX1-NEXT: shrq %rbx ; AVX1-NEXT: vmovq %rbx, %xmm14 ; AVX1-NEXT: shrq %r10 ; AVX1-NEXT: vmovq %r10, %xmm15 ; AVX1-NEXT: shrq %r13 ; AVX1-NEXT: vmovq %r13, %xmm10 ; AVX1-NEXT: shrq %r12 ; AVX1-NEXT: vmovq %r12, %xmm11 ; AVX1-NEXT: shrq %r14 ; AVX1-NEXT: vmovq %r14, %xmm2 ; AVX1-NEXT: shrq %r15 ; AVX1-NEXT: vmovq %r15, %xmm3 ; AVX1-NEXT: shrq %r11 ; AVX1-NEXT: vmovq %r11, %xmm4 ; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX1-NEXT: shrq %rax ; AVX1-NEXT: vmovq %rax, %xmm5 ; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX1-NEXT: shrq %rax ; AVX1-NEXT: vmovq %rax, %xmm6 ; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX1-NEXT: shrq %rax ; AVX1-NEXT: vmovq %rax, %xmm7 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7] ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; AVX1-NEXT: vpsllq $48, %xmm8, %xmm8 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm9[0,0,1,1] ; AVX1-NEXT: vpblendw {{.*#+}} xmm8 = xmm0[0,1,2],xmm8[3],xmm0[4,5,6,7] ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7] ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7] ; AVX1-NEXT: vpslld $16, %xmm1, %xmm1 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6,7] ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm8[2,3],xmm0[4,5,6,7] ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7] ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] ; AVX1-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1] ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4,5,6],xmm1[7] ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7] ; AVX1-NEXT: vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5] ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7] ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,1,0,1] ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5],xmm3[6,7] ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4,5],xmm1[6,7] ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; AVX1-NEXT: vmovdqu %xmm0, (%rax) ; AVX1-NEXT: popq %rbx ; AVX1-NEXT: popq %r12 ; AVX1-NEXT: popq %r13 ; AVX1-NEXT: popq %r14 ; AVX1-NEXT: popq %r15 ; AVX1-NEXT: popq %rbp ; AVX1-NEXT: retq ; ; AVX2-LABEL: not_avg_v16i8_wide_constants: ; AVX2: # %bb.0: ; AVX2-NEXT: pushq %rbp ; AVX2-NEXT: pushq %r15 ; AVX2-NEXT: pushq %r14 ; AVX2-NEXT: pushq %r13 ; AVX2-NEXT: pushq %r12 ; AVX2-NEXT: pushq %rbx ; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero ; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero ; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm9 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm5 ; AVX2-NEXT: vmovq %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm6 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero ; AVX2-NEXT: vmovq %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX2-NEXT: vextracti128 $1, %ymm6, %xmm7 ; AVX2-NEXT: vmovq %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero ; AVX2-NEXT: vmovq %xmm2, %r11 ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm0 ; AVX2-NEXT: vmovq %xmm0, %r14 ; AVX2-NEXT: vpextrq $1, %xmm0, %rbx ; AVX2-NEXT: vpextrq $1, %xmm2, %rsi ; AVX2-NEXT: vpextrq $1, %xmm7, %r12 ; AVX2-NEXT: vpextrq $1, %xmm6, %r15 ; AVX2-NEXT: vpextrq $1, %xmm5, %rdx ; AVX2-NEXT: vpextrq $1, %xmm1, %rcx ; AVX2-NEXT: vpextrq $1, %xmm3, %rax ; AVX2-NEXT: vmovq %xmm3, %rbp ; AVX2-NEXT: vpextrq $1, %xmm9, %r9 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm6 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; AVX2-NEXT: vextracti128 $1, %ymm6, %xmm2 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm5 ; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm0 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm7 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm8 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; AVX2-NEXT: vextracti128 $1, %ymm8, %xmm0 ; AVX2-NEXT: vpextrq $1, %xmm0, %rdi ; AVX2-NEXT: addq %rbx, %rdi ; AVX2-NEXT: movq %rdi, %rbx ; AVX2-NEXT: vpextrq $1, %xmm8, %r10 ; AVX2-NEXT: addq %rsi, %r10 ; AVX2-NEXT: vpextrq $1, %xmm7, %rsi ; AVX2-NEXT: addq %r12, %rsi ; AVX2-NEXT: movq %rsi, %r12 ; AVX2-NEXT: vpextrq $1, %xmm4, %r13 ; AVX2-NEXT: addq %r15, %r13 ; AVX2-NEXT: vpextrq $1, %xmm5, %r15 ; AVX2-NEXT: addq %rdx, %r15 ; AVX2-NEXT: vpextrq $1, %xmm3, %r8 ; AVX2-NEXT: addq %rcx, %r8 ; AVX2-NEXT: vpextrq $1, %xmm6, %rsi ; AVX2-NEXT: addq %rax, %rsi ; AVX2-NEXT: vmovq %xmm6, %rdx ; AVX2-NEXT: addq %rbp, %rdx ; AVX2-NEXT: vpextrq $1, %xmm2, %rcx ; AVX2-NEXT: addq %r9, %rcx ; AVX2-NEXT: vmovq %xmm0, %rdi ; AVX2-NEXT: leaq -1(%r14,%rdi), %rax ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX2-NEXT: vmovq %xmm8, %rdi ; AVX2-NEXT: leaq -1(%r11,%rdi), %rax ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX2-NEXT: vmovq %xmm7, %rdi ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX2-NEXT: leaq -1(%rax,%rdi), %rax ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX2-NEXT: vmovq %xmm4, %rdi ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX2-NEXT: leaq -1(%rax,%rdi), %rax ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX2-NEXT: vmovq %xmm5, %rdi ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX2-NEXT: leaq -1(%rax,%rdi), %rax ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX2-NEXT: vmovq %xmm1, %rdi ; AVX2-NEXT: vmovq %xmm3, %rbp ; AVX2-NEXT: leaq -1(%rdi,%rbp), %rax ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX2-NEXT: vmovq %xmm9, %rdi ; AVX2-NEXT: vmovq %xmm2, %rbp ; AVX2-NEXT: leaq -1(%rdi,%rbp), %rdi ; AVX2-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX2-NEXT: addq $-1, %rbx ; AVX2-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX2-NEXT: movl $0, %r9d ; AVX2-NEXT: adcq $-1, %r9 ; AVX2-NEXT: addq $-1, %r10 ; AVX2-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX2-NEXT: movl $0, %edi ; AVX2-NEXT: adcq $-1, %rdi ; AVX2-NEXT: addq $-1, %r12 ; AVX2-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX2-NEXT: movl $0, %r11d ; AVX2-NEXT: adcq $-1, %r11 ; AVX2-NEXT: addq $-1, %r13 ; AVX2-NEXT: movl $0, %r10d ; AVX2-NEXT: adcq $-1, %r10 ; AVX2-NEXT: addq $-1, %r15 ; AVX2-NEXT: movl $0, %r14d ; AVX2-NEXT: adcq $-1, %r14 ; AVX2-NEXT: addq $-1, %r8 ; AVX2-NEXT: movl $0, %ebp ; AVX2-NEXT: adcq $-1, %rbp ; AVX2-NEXT: addq $-1, %rsi ; AVX2-NEXT: movl $0, %r12d ; AVX2-NEXT: adcq $-1, %r12 ; AVX2-NEXT: addq $-1, %rdx ; AVX2-NEXT: movl $0, %ebx ; AVX2-NEXT: adcq $-1, %rbx ; AVX2-NEXT: addq $-1, %rcx ; AVX2-NEXT: movl $0, %eax ; AVX2-NEXT: adcq $-1, %rax ; AVX2-NEXT: shldq $63, %rcx, %rax ; AVX2-NEXT: shldq $63, %rdx, %rbx ; AVX2-NEXT: shldq $63, %rsi, %r12 ; AVX2-NEXT: shldq $63, %r8, %rbp ; AVX2-NEXT: shldq $63, %r15, %r14 ; AVX2-NEXT: shldq $63, %r13, %r10 ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload ; AVX2-NEXT: shldq $63, %rcx, %r11 ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload ; AVX2-NEXT: shldq $63, %rcx, %rdi ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload ; AVX2-NEXT: shldq $63, %rcx, %r9 ; AVX2-NEXT: vmovq %r9, %xmm8 ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload ; AVX2-NEXT: shrq %rcx ; AVX2-NEXT: vmovq %rcx, %xmm9 ; AVX2-NEXT: vmovq %rdi, %xmm0 ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload ; AVX2-NEXT: shrq %rcx ; AVX2-NEXT: vmovq %rcx, %xmm1 ; AVX2-NEXT: vmovq %r11, %xmm12 ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload ; AVX2-NEXT: shrq %rcx ; AVX2-NEXT: vmovq %rcx, %xmm13 ; AVX2-NEXT: vmovq %r10, %xmm14 ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload ; AVX2-NEXT: shrq %rcx ; AVX2-NEXT: vmovq %rcx, %xmm15 ; AVX2-NEXT: vmovq %r14, %xmm10 ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload ; AVX2-NEXT: shrq %rcx ; AVX2-NEXT: vmovq %rcx, %xmm11 ; AVX2-NEXT: vmovq %rbp, %xmm2 ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload ; AVX2-NEXT: shrq %rcx ; AVX2-NEXT: vmovq %rcx, %xmm3 ; AVX2-NEXT: vmovq %r12, %xmm4 ; AVX2-NEXT: vmovq %rbx, %xmm5 ; AVX2-NEXT: vmovq %rax, %xmm6 ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX2-NEXT: shrq %rax ; AVX2-NEXT: vmovq %rax, %xmm7 ; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7] ; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; AVX2-NEXT: vpbroadcastw %xmm8, %xmm8 ; AVX2-NEXT: vpbroadcastw %xmm9, %xmm0 ; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm8 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3] ; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7] ; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7] ; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0 ; AVX2-NEXT: vpbroadcastw %xmm9, %xmm1 ; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4],xmm0[5],xmm1[6,7] ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm8[3] ; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7] ; AVX2-NEXT: vpsllq $48, %xmm1, %xmm1 ; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] ; AVX2-NEXT: vpbroadcastw %xmm2, %xmm2 ; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5,6,7] ; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7] ; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7] ; AVX2-NEXT: vpslld $16, %xmm3, %xmm3 ; AVX2-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3,4,5,6,7] ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2,3] ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] ; AVX2-NEXT: vmovdqu %xmm0, (%rax) ; AVX2-NEXT: popq %rbx ; AVX2-NEXT: popq %r12 ; AVX2-NEXT: popq %r13 ; AVX2-NEXT: popq %r14 ; AVX2-NEXT: popq %r15 ; AVX2-NEXT: popq %rbp ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: not_avg_v16i8_wide_constants: ; AVX512: # %bb.0: ; AVX512-NEXT: pushq %rbp ; AVX512-NEXT: pushq %r15 ; AVX512-NEXT: pushq %r14 ; AVX512-NEXT: pushq %r13 ; AVX512-NEXT: pushq %r12 ; AVX512-NEXT: pushq %rbx ; AVX512-NEXT: subq $24, %rsp ; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero ; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero ; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero ; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero ; AVX512-NEXT: vmovq %xmm3, %rbx ; AVX512-NEXT: vpextrq $1, %xmm3, %rbp ; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm3 ; AVX512-NEXT: vmovq %xmm3, %rdi ; AVX512-NEXT: vpextrq $1, %xmm3, %rsi ; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2 ; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero ; AVX512-NEXT: vmovq %xmm2, %rdx ; AVX512-NEXT: vpextrq $1, %xmm2, %r15 ; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2 ; AVX512-NEXT: vmovq %xmm2, %r8 ; AVX512-NEXT: vpextrq $1, %xmm2, %r9 ; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm1 ; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero ; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero ; AVX512-NEXT: vmovq %xmm2, %r11 ; AVX512-NEXT: vpextrq $1, %xmm2, %r10 ; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2 ; AVX512-NEXT: vmovq %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX512-NEXT: vpextrq $1, %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm1 ; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero ; AVX512-NEXT: vmovq %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX512-NEXT: vpextrq $1, %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm1 ; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero ; AVX512-NEXT: vmovq %xmm3, %rcx ; AVX512-NEXT: addq %rbx, %rcx ; AVX512-NEXT: vpextrq $1, %xmm3, %rax ; AVX512-NEXT: addq %rbp, %rax ; AVX512-NEXT: movq %rax, %rbp ; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm3 ; AVX512-NEXT: vmovq %xmm3, %r14 ; AVX512-NEXT: addq %rdi, %r14 ; AVX512-NEXT: vpextrq $1, %xmm3, %rax ; AVX512-NEXT: addq %rsi, %rax ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2 ; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero ; AVX512-NEXT: vmovq %xmm2, %rax ; AVX512-NEXT: addq %rdx, %rax ; AVX512-NEXT: movq %rax, %rdx ; AVX512-NEXT: vpextrq $1, %xmm2, %r12 ; AVX512-NEXT: addq %r15, %r12 ; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2 ; AVX512-NEXT: vmovq %xmm2, %rax ; AVX512-NEXT: addq %r8, %rax ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: vpextrq $1, %xmm2, %rax ; AVX512-NEXT: addq %r9, %rax ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0 ; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; AVX512-NEXT: vmovq %xmm2, %rax ; AVX512-NEXT: addq %r11, %rax ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: vpextrq $1, %xmm2, %rax ; AVX512-NEXT: addq %r10, %rax ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2 ; AVX512-NEXT: vmovq %xmm2, %r13 ; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload ; AVX512-NEXT: vpextrq $1, %xmm2, %rbx ; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0 ; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; AVX512-NEXT: vmovq %xmm0, %r10 ; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload ; AVX512-NEXT: vpextrq $1, %xmm0, %r9 ; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload ; AVX512-NEXT: vmovq %xmm1, %rax ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0 ; AVX512-NEXT: vmovq %xmm0, %r8 ; AVX512-NEXT: addq %rax, %r8 ; AVX512-NEXT: vpextrq $1, %xmm1, %rdi ; AVX512-NEXT: vpextrq $1, %xmm0, %rsi ; AVX512-NEXT: addq %rdi, %rsi ; AVX512-NEXT: addq $-1, %rcx ; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: movl $0, %r11d ; AVX512-NEXT: adcq $-1, %r11 ; AVX512-NEXT: addq $-1, %rbp ; AVX512-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: movl $0, %edi ; AVX512-NEXT: adcq $-1, %rdi ; AVX512-NEXT: addq $-1, %r14 ; AVX512-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: movl $0, %r15d ; AVX512-NEXT: adcq $-1, %r15 ; AVX512-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX512-NEXT: movl $0, %r14d ; AVX512-NEXT: adcq $-1, %r14 ; AVX512-NEXT: addq $-1, %rdx ; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: movl $0, %eax ; AVX512-NEXT: adcq $-1, %rax ; AVX512-NEXT: movq %rax, (%rsp) # 8-byte Spill ; AVX512-NEXT: addq $-1, %r12 ; AVX512-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: movl $0, %r12d ; AVX512-NEXT: adcq $-1, %r12 ; AVX512-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX512-NEXT: movl $0, %eax ; AVX512-NEXT: adcq $-1, %rax ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX512-NEXT: movl $0, %eax ; AVX512-NEXT: adcq $-1, %rax ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX512-NEXT: movl $0, %eax ; AVX512-NEXT: adcq $-1, %rax ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill ; AVX512-NEXT: movl $0, %eax ; AVX512-NEXT: adcq $-1, %rax ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: addq $-1, %r13 ; AVX512-NEXT: movl $0, %eax ; AVX512-NEXT: adcq $-1, %rax ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: addq $-1, %rbx ; AVX512-NEXT: movl $0, %eax ; AVX512-NEXT: adcq $-1, %rax ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: addq $-1, %r10 ; AVX512-NEXT: movl $0, %edx ; AVX512-NEXT: adcq $-1, %rdx ; AVX512-NEXT: addq $-1, %r9 ; AVX512-NEXT: movl $0, %ecx ; AVX512-NEXT: adcq $-1, %rcx ; AVX512-NEXT: addq $-1, %r8 ; AVX512-NEXT: movl $0, %eax ; AVX512-NEXT: adcq $-1, %rax ; AVX512-NEXT: addq $-1, %rsi ; AVX512-NEXT: movl $0, %ebp ; AVX512-NEXT: adcq $-1, %rbp ; AVX512-NEXT: shldq $63, %rsi, %rbp ; AVX512-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: shldq $63, %r8, %rax ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: shldq $63, %r9, %rcx ; AVX512-NEXT: movq %rcx, %rbp ; AVX512-NEXT: shldq $63, %r10, %rdx ; AVX512-NEXT: movq %rdx, %r9 ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload ; AVX512-NEXT: shldq $63, %rbx, %r10 ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload ; AVX512-NEXT: shldq $63, %r13, %r8 ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload ; AVX512-NEXT: shldq $63, %rax, %r13 ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload ; AVX512-NEXT: shldq $63, %rax, %rbx ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload ; AVX512-NEXT: shldq $63, %rax, %rsi ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload ; AVX512-NEXT: shldq $63, %rax, %rdx ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX512-NEXT: shldq $63, %rax, %r12 ; AVX512-NEXT: movq (%rsp), %rcx # 8-byte Reload ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX512-NEXT: shldq $63, %rax, %rcx ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX512-NEXT: shldq $63, %rax, %r14 ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX512-NEXT: shldq $63, %rax, %r15 ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX512-NEXT: shldq $63, %rax, %rdi ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; AVX512-NEXT: shldq $63, %rax, %r11 ; AVX512-NEXT: vmovq %r11, %xmm0 ; AVX512-NEXT: vmovq %rdi, %xmm1 ; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX512-NEXT: vmovd %xmm1, %eax ; AVX512-NEXT: vpinsrb $1, %eax, %xmm0, %xmm1 ; AVX512-NEXT: vmovq %r15, %xmm2 ; AVX512-NEXT: vmovq %r14, %xmm3 ; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2 ; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 ; AVX512-NEXT: vextracti32x4 $2, %zmm0, %xmm2 ; AVX512-NEXT: vmovd %xmm2, %eax ; AVX512-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 ; AVX512-NEXT: vextracti32x4 $3, %zmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax ; AVX512-NEXT: vpinsrb $3, %eax, %xmm1, %xmm0 ; AVX512-NEXT: vmovq %rcx, %xmm1 ; AVX512-NEXT: vmovq %r12, %xmm2 ; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 ; AVX512-NEXT: vmovd %xmm1, %eax ; AVX512-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2 ; AVX512-NEXT: vmovd %xmm2, %eax ; AVX512-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 ; AVX512-NEXT: vmovq %rdx, %xmm2 ; AVX512-NEXT: vmovq %rsi, %xmm3 ; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2 ; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 ; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm2 ; AVX512-NEXT: vmovd %xmm2, %eax ; AVX512-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ; AVX512-NEXT: vextracti32x4 $3, %zmm1, %xmm1 ; AVX512-NEXT: vmovd %xmm1, %eax ; AVX512-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 ; AVX512-NEXT: vmovq %rbx, %xmm1 ; AVX512-NEXT: vmovq %r13, %xmm2 ; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 ; AVX512-NEXT: vmovd %xmm1, %eax ; AVX512-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2 ; AVX512-NEXT: vmovd %xmm2, %eax ; AVX512-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 ; AVX512-NEXT: vmovq %r8, %xmm2 ; AVX512-NEXT: vmovq %r10, %xmm3 ; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2 ; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 ; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm2 ; AVX512-NEXT: vmovd %xmm2, %eax ; AVX512-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ; AVX512-NEXT: vextracti32x4 $3, %zmm1, %xmm1 ; AVX512-NEXT: vmovd %xmm1, %eax ; AVX512-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 ; AVX512-NEXT: vmovq %r9, %xmm1 ; AVX512-NEXT: vmovq %rbp, %xmm2 ; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 ; AVX512-NEXT: vmovd %xmm1, %eax ; AVX512-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2 ; AVX512-NEXT: vmovd %xmm2, %eax ; AVX512-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 ; AVX512-NEXT: vmovq {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 8-byte Folded Reload ; AVX512-NEXT: # xmm2 = mem[0],zero ; AVX512-NEXT: vmovq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 8-byte Folded Reload ; AVX512-NEXT: # xmm3 = mem[0],zero ; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2 ; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 ; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm2 ; AVX512-NEXT: vmovd %xmm2, %eax ; AVX512-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ; AVX512-NEXT: vextracti32x4 $3, %zmm1, %xmm1 ; AVX512-NEXT: vmovd %xmm1, %eax ; AVX512-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 ; AVX512-NEXT: vmovdqu %xmm0, (%rax) ; AVX512-NEXT: addq $24, %rsp ; AVX512-NEXT: popq %rbx ; AVX512-NEXT: popq %r12 ; AVX512-NEXT: popq %r13 ; AVX512-NEXT: popq %r14 ; AVX512-NEXT: popq %r15 ; AVX512-NEXT: popq %rbp ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = load <16 x i8>, <16 x i8>* %a %2 = load <16 x i8>, <16 x i8>* %b %3 = zext <16 x i8> %1 to <16 x i128> %4 = zext <16 x i8> %2 to <16 x i128> %5 = add <16 x i128> %3, %6 = add <16 x i128> %5, %4 %7 = lshr <16 x i128> %6, %8 = trunc <16 x i128> %7 to <16 x i8> store <16 x i8> %8, <16 x i8>* undef, align 4 ret void } ; Make sure we don't fail on single element vectors. define <1 x i8> @avg_v1i8(<1 x i8> %x, <1 x i8> %y) { ; SSE2-LABEL: avg_v1i8: ; SSE2: # %bb.0: ; SSE2-NEXT: movzbl %dil, %eax ; SSE2-NEXT: movzbl %sil, %ecx ; SSE2-NEXT: leal 1(%rax,%rcx), %eax ; SSE2-NEXT: shrl %eax ; SSE2-NEXT: # kill: def $al killed $al killed $eax ; SSE2-NEXT: retq ; ; AVX-LABEL: avg_v1i8: ; AVX: # %bb.0: ; AVX-NEXT: movzbl %dil, %eax ; AVX-NEXT: movzbl %sil, %ecx ; AVX-NEXT: leal 1(%rax,%rcx), %eax ; AVX-NEXT: shrl %eax ; AVX-NEXT: # kill: def $al killed $al killed $eax ; AVX-NEXT: retq %a = zext <1 x i8> %x to <1 x i16> %b = zext <1 x i8> %y to <1 x i16> %c = add <1 x i16> %a, %b %d = add <1 x i16> %c, %e = lshr <1 x i16> %d, %f = trunc <1 x i16> %e to <1 x i8> ret <1 x i8> %f } ; _mm_avg_epu16( _mm_slli_epi16(a, 2), _mm_slli_epi16(b, 2)) define <2 x i64> @PR41316(<2 x i64>, <2 x i64>) { ; SSE2-LABEL: PR41316: ; SSE2: # %bb.0: ; SSE2-NEXT: psllw $2, %xmm0 ; SSE2-NEXT: psllw $2, %xmm1 ; SSE2-NEXT: pavgw %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; AVX-LABEL: PR41316: ; AVX: # %bb.0: ; AVX-NEXT: vpsllw $2, %xmm0, %xmm0 ; AVX-NEXT: vpsllw $2, %xmm1, %xmm1 ; AVX-NEXT: vpavgw %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq %3 = bitcast <2 x i64> %0 to <8 x i16> %4 = shl <8 x i16> %3, %5 = bitcast <2 x i64> %1 to <8 x i16> %6 = shl <8 x i16> %5, %7 = zext <8 x i16> %6 to <8 x i32> %8 = or <8 x i16> %4, %9 = zext <8 x i16> %8 to <8 x i32> %10 = add nuw nsw <8 x i32> %9, %7 %11 = lshr <8 x i32> %10, %12 = trunc <8 x i32> %11 to <8 x i16> %13 = bitcast <8 x i16> %12 to <2 x i64> ret <2 x i64> %13 }