; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=i686-- -mattr=+sse2,-avx | FileCheck %s --check-prefix=i686_SSE2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2,-avx | FileCheck %s --check-prefix=x86_64_SSE2 ; RUN: llc < %s -mtriple=i686-- -mattr=+avx | FileCheck %s --check-prefix=i686_AVX ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefix=x86_64_AVX ; rdar://6573467 define void @test(<16 x i8> %a, <16 x i8> %b, i32 %dummy, i8* %c) nounwind { ; i686_SSE2-LABEL: test: ; i686_SSE2: # %bb.0: # %entry ; i686_SSE2-NEXT: pushl %edi ; i686_SSE2-NEXT: movl {{[0-9]+}}(%esp), %edi ; i686_SSE2-NEXT: maskmovdqu %xmm1, %xmm0 ; i686_SSE2-NEXT: popl %edi ; i686_SSE2-NEXT: retl ; ; x86_64_SSE2-LABEL: test: ; x86_64_SSE2: # %bb.0: # %entry ; x86_64_SSE2-NEXT: movq %rsi, %rdi ; x86_64_SSE2-NEXT: maskmovdqu %xmm1, %xmm0 ; x86_64_SSE2-NEXT: retq ; ; i686_AVX-LABEL: test: ; i686_AVX: # %bb.0: # %entry ; i686_AVX-NEXT: pushl %edi ; i686_AVX-NEXT: movl {{[0-9]+}}(%esp), %edi ; i686_AVX-NEXT: vmaskmovdqu %xmm1, %xmm0 ; i686_AVX-NEXT: popl %edi ; i686_AVX-NEXT: retl ; ; x86_64_AVX-LABEL: test: ; x86_64_AVX: # %bb.0: # %entry ; x86_64_AVX-NEXT: movq %rsi, %rdi ; x86_64_AVX-NEXT: vmaskmovdqu %xmm1, %xmm0 ; x86_64_AVX-NEXT: retq entry: tail call void @llvm.x86.sse2.maskmov.dqu( <16 x i8> %a, <16 x i8> %b, i8* %c ) ret void } declare void @llvm.x86.sse2.maskmov.dqu(<16 x i8>, <16 x i8>, i8*) nounwind