442 lines
16 KiB
LLVM
442 lines
16 KiB
LLVM
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||
|
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s 2>%t | FileCheck %s
|
||
|
; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t
|
||
|
|
||
|
; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it.
|
||
|
; WARN-NOT: warning
|
||
|
|
||
|
define i64 @saddv_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
|
||
|
; CHECK-LABEL: saddv_i8:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: saddv d0, p0, z0.b
|
||
|
; CHECK-NEXT: fmov x0, d0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i64 @llvm.aarch64.sve.saddv.nxv16i8(<vscale x 16 x i1> %pg,
|
||
|
<vscale x 16 x i8> %a)
|
||
|
ret i64 %out
|
||
|
}
|
||
|
|
||
|
define i64 @saddv_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
|
||
|
; CHECK-LABEL: saddv_i16:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: saddv d0, p0, z0.h
|
||
|
; CHECK-NEXT: fmov x0, d0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i64 @llvm.aarch64.sve.saddv.nxv8i16(<vscale x 8 x i1> %pg,
|
||
|
<vscale x 8 x i16> %a)
|
||
|
ret i64 %out
|
||
|
}
|
||
|
|
||
|
|
||
|
define i64 @saddv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
|
||
|
; CHECK-LABEL: saddv_i32:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: saddv d0, p0, z0.s
|
||
|
; CHECK-NEXT: fmov x0, d0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i64 @llvm.aarch64.sve.saddv.nxv4i32(<vscale x 4 x i1> %pg,
|
||
|
<vscale x 4 x i32> %a)
|
||
|
ret i64 %out
|
||
|
}
|
||
|
|
||
|
define i64 @saddv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
|
||
|
; CHECK-LABEL: saddv_i64:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: uaddv d0, p0, z0.d
|
||
|
; CHECK-NEXT: fmov x0, d0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i64 @llvm.aarch64.sve.saddv.nxv2i64(<vscale x 2 x i1> %pg,
|
||
|
<vscale x 2 x i64> %a)
|
||
|
ret i64 %out
|
||
|
}
|
||
|
|
||
|
define i64 @uaddv_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
|
||
|
; CHECK-LABEL: uaddv_i8:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: uaddv d0, p0, z0.b
|
||
|
; CHECK-NEXT: fmov x0, d0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i64 @llvm.aarch64.sve.uaddv.nxv16i8(<vscale x 16 x i1> %pg,
|
||
|
<vscale x 16 x i8> %a)
|
||
|
ret i64 %out
|
||
|
}
|
||
|
|
||
|
define i64 @uaddv_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
|
||
|
; CHECK-LABEL: uaddv_i16:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: uaddv d0, p0, z0.h
|
||
|
; CHECK-NEXT: fmov x0, d0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i64 @llvm.aarch64.sve.uaddv.nxv8i16(<vscale x 8 x i1> %pg,
|
||
|
<vscale x 8 x i16> %a)
|
||
|
ret i64 %out
|
||
|
}
|
||
|
|
||
|
|
||
|
define i64 @uaddv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
|
||
|
; CHECK-LABEL: uaddv_i32:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: uaddv d0, p0, z0.s
|
||
|
; CHECK-NEXT: fmov x0, d0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i64 @llvm.aarch64.sve.uaddv.nxv4i32(<vscale x 4 x i1> %pg,
|
||
|
<vscale x 4 x i32> %a)
|
||
|
ret i64 %out
|
||
|
}
|
||
|
|
||
|
define i64 @uaddv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
|
||
|
; CHECK-LABEL: uaddv_i64:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: uaddv d0, p0, z0.d
|
||
|
; CHECK-NEXT: fmov x0, d0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i64 @llvm.aarch64.sve.uaddv.nxv2i64(<vscale x 2 x i1> %pg,
|
||
|
<vscale x 2 x i64> %a)
|
||
|
ret i64 %out
|
||
|
}
|
||
|
|
||
|
define i8 @smaxv_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
|
||
|
; CHECK-LABEL: smaxv_i8:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: smaxv b0, p0, z0.b
|
||
|
; CHECK-NEXT: fmov w0, s0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i8 @llvm.aarch64.sve.smaxv.nxv16i8(<vscale x 16 x i1> %pg,
|
||
|
<vscale x 16 x i8> %a)
|
||
|
ret i8 %out
|
||
|
}
|
||
|
|
||
|
define i16 @smaxv_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
|
||
|
; CHECK-LABEL: smaxv_i16:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: smaxv h0, p0, z0.h
|
||
|
; CHECK-NEXT: fmov w0, s0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i16 @llvm.aarch64.sve.smaxv.nxv8i16(<vscale x 8 x i1> %pg,
|
||
|
<vscale x 8 x i16> %a)
|
||
|
ret i16 %out
|
||
|
}
|
||
|
|
||
|
define i32 @smaxv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
|
||
|
; CHECK-LABEL: smaxv_i32:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: smaxv s0, p0, z0.s
|
||
|
; CHECK-NEXT: fmov w0, s0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i32 @llvm.aarch64.sve.smaxv.nxv4i32(<vscale x 4 x i1> %pg,
|
||
|
<vscale x 4 x i32> %a)
|
||
|
ret i32 %out
|
||
|
}
|
||
|
|
||
|
define i64 @smaxv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
|
||
|
; CHECK-LABEL: smaxv_i64:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: smaxv d0, p0, z0.d
|
||
|
; CHECK-NEXT: fmov x0, d0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i64 @llvm.aarch64.sve.smaxv.nxv2i64(<vscale x 2 x i1> %pg,
|
||
|
<vscale x 2 x i64> %a)
|
||
|
ret i64 %out
|
||
|
}
|
||
|
|
||
|
define i8 @umaxv_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
|
||
|
; CHECK-LABEL: umaxv_i8:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: umaxv b0, p0, z0.b
|
||
|
; CHECK-NEXT: fmov w0, s0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i8 @llvm.aarch64.sve.umaxv.nxv16i8(<vscale x 16 x i1> %pg,
|
||
|
<vscale x 16 x i8> %a)
|
||
|
ret i8 %out
|
||
|
}
|
||
|
|
||
|
define i16 @umaxv_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
|
||
|
; CHECK-LABEL: umaxv_i16:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: umaxv h0, p0, z0.h
|
||
|
; CHECK-NEXT: fmov w0, s0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i16 @llvm.aarch64.sve.umaxv.nxv8i16(<vscale x 8 x i1> %pg,
|
||
|
<vscale x 8 x i16> %a)
|
||
|
ret i16 %out
|
||
|
}
|
||
|
|
||
|
define i32 @umaxv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
|
||
|
; CHECK-LABEL: umaxv_i32:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: umaxv s0, p0, z0.s
|
||
|
; CHECK-NEXT: fmov w0, s0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i32 @llvm.aarch64.sve.umaxv.nxv4i32(<vscale x 4 x i1> %pg,
|
||
|
<vscale x 4 x i32> %a)
|
||
|
ret i32 %out
|
||
|
}
|
||
|
|
||
|
define i64 @umaxv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
|
||
|
; CHECK-LABEL: umaxv_i64:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: umaxv d0, p0, z0.d
|
||
|
; CHECK-NEXT: fmov x0, d0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i64 @llvm.aarch64.sve.umaxv.nxv2i64(<vscale x 2 x i1> %pg,
|
||
|
<vscale x 2 x i64> %a)
|
||
|
ret i64 %out
|
||
|
}
|
||
|
|
||
|
define i8 @sminv_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
|
||
|
; CHECK-LABEL: sminv_i8:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: sminv b0, p0, z0.b
|
||
|
; CHECK-NEXT: fmov w0, s0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i8 @llvm.aarch64.sve.sminv.nxv16i8(<vscale x 16 x i1> %pg,
|
||
|
<vscale x 16 x i8> %a)
|
||
|
ret i8 %out
|
||
|
}
|
||
|
|
||
|
define i16 @sminv_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
|
||
|
; CHECK-LABEL: sminv_i16:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: sminv h0, p0, z0.h
|
||
|
; CHECK-NEXT: fmov w0, s0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i16 @llvm.aarch64.sve.sminv.nxv8i16(<vscale x 8 x i1> %pg,
|
||
|
<vscale x 8 x i16> %a)
|
||
|
ret i16 %out
|
||
|
}
|
||
|
|
||
|
define i32 @sminv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
|
||
|
; CHECK-LABEL: sminv_i32:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: sminv s0, p0, z0.s
|
||
|
; CHECK-NEXT: fmov w0, s0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i32 @llvm.aarch64.sve.sminv.nxv4i32(<vscale x 4 x i1> %pg,
|
||
|
<vscale x 4 x i32> %a)
|
||
|
ret i32 %out
|
||
|
}
|
||
|
|
||
|
define i64 @sminv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
|
||
|
; CHECK-LABEL: sminv_i64:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: sminv d0, p0, z0.d
|
||
|
; CHECK-NEXT: fmov x0, d0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i64 @llvm.aarch64.sve.sminv.nxv2i64(<vscale x 2 x i1> %pg,
|
||
|
<vscale x 2 x i64> %a)
|
||
|
ret i64 %out
|
||
|
}
|
||
|
|
||
|
define i8 @uminv_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
|
||
|
; CHECK-LABEL: uminv_i8:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: uminv b0, p0, z0.b
|
||
|
; CHECK-NEXT: fmov w0, s0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i8 @llvm.aarch64.sve.uminv.nxv16i8(<vscale x 16 x i1> %pg,
|
||
|
<vscale x 16 x i8> %a)
|
||
|
ret i8 %out
|
||
|
}
|
||
|
|
||
|
define i16 @uminv_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
|
||
|
; CHECK-LABEL: uminv_i16:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: uminv h0, p0, z0.h
|
||
|
; CHECK-NEXT: fmov w0, s0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i16 @llvm.aarch64.sve.uminv.nxv8i16(<vscale x 8 x i1> %pg,
|
||
|
<vscale x 8 x i16> %a)
|
||
|
ret i16 %out
|
||
|
}
|
||
|
|
||
|
define i32 @uminv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
|
||
|
; CHECK-LABEL: uminv_i32:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: uminv s0, p0, z0.s
|
||
|
; CHECK-NEXT: fmov w0, s0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i32 @llvm.aarch64.sve.uminv.nxv4i32(<vscale x 4 x i1> %pg,
|
||
|
<vscale x 4 x i32> %a)
|
||
|
ret i32 %out
|
||
|
}
|
||
|
|
||
|
define i64 @uminv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
|
||
|
; CHECK-LABEL: uminv_i64:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: uminv d0, p0, z0.d
|
||
|
; CHECK-NEXT: fmov x0, d0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i64 @llvm.aarch64.sve.uminv.nxv2i64(<vscale x 2 x i1> %pg,
|
||
|
<vscale x 2 x i64> %a)
|
||
|
ret i64 %out
|
||
|
}
|
||
|
|
||
|
define i8 @orv_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
|
||
|
; CHECK-LABEL: orv_i8:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: orv b0, p0, z0.b
|
||
|
; CHECK-NEXT: fmov w0, s0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i8 @llvm.aarch64.sve.orv.nxv16i8(<vscale x 16 x i1> %pg,
|
||
|
<vscale x 16 x i8> %a)
|
||
|
ret i8 %out
|
||
|
}
|
||
|
|
||
|
define i16 @orv_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
|
||
|
; CHECK-LABEL: orv_i16:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: orv h0, p0, z0.h
|
||
|
; CHECK-NEXT: fmov w0, s0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i16 @llvm.aarch64.sve.orv.nxv8i16(<vscale x 8 x i1> %pg,
|
||
|
<vscale x 8 x i16> %a)
|
||
|
ret i16 %out
|
||
|
}
|
||
|
|
||
|
define i32 @orv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
|
||
|
; CHECK-LABEL: orv_i32:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: orv s0, p0, z0.s
|
||
|
; CHECK-NEXT: fmov w0, s0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i32 @llvm.aarch64.sve.orv.nxv4i32(<vscale x 4 x i1> %pg,
|
||
|
<vscale x 4 x i32> %a)
|
||
|
ret i32 %out
|
||
|
}
|
||
|
|
||
|
define i64 @orv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
|
||
|
; CHECK-LABEL: orv_i64:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: orv d0, p0, z0.d
|
||
|
; CHECK-NEXT: fmov x0, d0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i64 @llvm.aarch64.sve.orv.nxv2i64(<vscale x 2 x i1> %pg,
|
||
|
<vscale x 2 x i64> %a)
|
||
|
ret i64 %out
|
||
|
}
|
||
|
|
||
|
define i8 @eorv_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
|
||
|
; CHECK-LABEL: eorv_i8:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: eorv b0, p0, z0.b
|
||
|
; CHECK-NEXT: fmov w0, s0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i8 @llvm.aarch64.sve.eorv.nxv16i8(<vscale x 16 x i1> %pg,
|
||
|
<vscale x 16 x i8> %a)
|
||
|
ret i8 %out
|
||
|
}
|
||
|
|
||
|
define i16 @eorv_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
|
||
|
; CHECK-LABEL: eorv_i16:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: eorv h0, p0, z0.h
|
||
|
; CHECK-NEXT: fmov w0, s0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i16 @llvm.aarch64.sve.eorv.nxv8i16(<vscale x 8 x i1> %pg,
|
||
|
<vscale x 8 x i16> %a)
|
||
|
ret i16 %out
|
||
|
}
|
||
|
|
||
|
define i32 @eorv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
|
||
|
; CHECK-LABEL: eorv_i32:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: eorv s0, p0, z0.s
|
||
|
; CHECK-NEXT: fmov w0, s0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i32 @llvm.aarch64.sve.eorv.nxv4i32(<vscale x 4 x i1> %pg,
|
||
|
<vscale x 4 x i32> %a)
|
||
|
ret i32 %out
|
||
|
}
|
||
|
|
||
|
define i64 @eorv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
|
||
|
; CHECK-LABEL: eorv_i64:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: eorv d0, p0, z0.d
|
||
|
; CHECK-NEXT: fmov x0, d0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i64 @llvm.aarch64.sve.eorv.nxv2i64(<vscale x 2 x i1> %pg,
|
||
|
<vscale x 2 x i64> %a)
|
||
|
ret i64 %out
|
||
|
}
|
||
|
|
||
|
define i8 @andv_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
|
||
|
; CHECK-LABEL: andv_i8:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: andv b0, p0, z0.b
|
||
|
; CHECK-NEXT: fmov w0, s0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i8 @llvm.aarch64.sve.andv.nxv16i8(<vscale x 16 x i1> %pg,
|
||
|
<vscale x 16 x i8> %a)
|
||
|
ret i8 %out
|
||
|
}
|
||
|
|
||
|
define i16 @andv_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
|
||
|
; CHECK-LABEL: andv_i16:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: andv h0, p0, z0.h
|
||
|
; CHECK-NEXT: fmov w0, s0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i16 @llvm.aarch64.sve.andv.nxv8i16(<vscale x 8 x i1> %pg,
|
||
|
<vscale x 8 x i16> %a)
|
||
|
ret i16 %out
|
||
|
}
|
||
|
|
||
|
define i32 @andv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
|
||
|
; CHECK-LABEL: andv_i32:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: andv s0, p0, z0.s
|
||
|
; CHECK-NEXT: fmov w0, s0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i32 @llvm.aarch64.sve.andv.nxv4i32(<vscale x 4 x i1> %pg,
|
||
|
<vscale x 4 x i32> %a)
|
||
|
ret i32 %out
|
||
|
}
|
||
|
|
||
|
define i64 @andv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
|
||
|
; CHECK-LABEL: andv_i64:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: andv d0, p0, z0.d
|
||
|
; CHECK-NEXT: fmov x0, d0
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call i64 @llvm.aarch64.sve.andv.nxv2i64(<vscale x 2 x i1> %pg,
|
||
|
<vscale x 2 x i64> %a)
|
||
|
ret i64 %out
|
||
|
}
|
||
|
|
||
|
declare i64 @llvm.aarch64.sve.saddv.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
|
||
|
declare i64 @llvm.aarch64.sve.saddv.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
|
||
|
declare i64 @llvm.aarch64.sve.saddv.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>)
|
||
|
declare i64 @llvm.aarch64.sve.saddv.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>)
|
||
|
declare i64 @llvm.aarch64.sve.uaddv.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
|
||
|
declare i64 @llvm.aarch64.sve.uaddv.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
|
||
|
declare i64 @llvm.aarch64.sve.uaddv.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>)
|
||
|
declare i64 @llvm.aarch64.sve.uaddv.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>)
|
||
|
declare i8 @llvm.aarch64.sve.smaxv.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
|
||
|
declare i16 @llvm.aarch64.sve.smaxv.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
|
||
|
declare i32 @llvm.aarch64.sve.smaxv.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>)
|
||
|
declare i64 @llvm.aarch64.sve.smaxv.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>)
|
||
|
declare i8 @llvm.aarch64.sve.umaxv.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
|
||
|
declare i16 @llvm.aarch64.sve.umaxv.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
|
||
|
declare i32 @llvm.aarch64.sve.umaxv.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>)
|
||
|
declare i64 @llvm.aarch64.sve.umaxv.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>)
|
||
|
declare i8 @llvm.aarch64.sve.sminv.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
|
||
|
declare i16 @llvm.aarch64.sve.sminv.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
|
||
|
declare i32 @llvm.aarch64.sve.sminv.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>)
|
||
|
declare i64 @llvm.aarch64.sve.sminv.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>)
|
||
|
declare i8 @llvm.aarch64.sve.uminv.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
|
||
|
declare i16 @llvm.aarch64.sve.uminv.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
|
||
|
declare i32 @llvm.aarch64.sve.uminv.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>)
|
||
|
declare i64 @llvm.aarch64.sve.uminv.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>)
|
||
|
declare i8 @llvm.aarch64.sve.orv.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
|
||
|
declare i16 @llvm.aarch64.sve.orv.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
|
||
|
declare i32 @llvm.aarch64.sve.orv.nxv4i32 (<vscale x 4 x i1>, <vscale x 4 x i32>)
|
||
|
declare i64 @llvm.aarch64.sve.orv.nxv2i64 (<vscale x 2 x i1>, <vscale x 2 x i64>)
|
||
|
declare i8 @llvm.aarch64.sve.eorv.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
|
||
|
declare i16 @llvm.aarch64.sve.eorv.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
|
||
|
declare i32 @llvm.aarch64.sve.eorv.nxv4i32 (<vscale x 4 x i1>, <vscale x 4 x i32>)
|
||
|
declare i64 @llvm.aarch64.sve.eorv.nxv2i64 (<vscale x 2 x i1>, <vscale x 2 x i64>)
|
||
|
declare i8 @llvm.aarch64.sve.andv.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>)
|
||
|
declare i16 @llvm.aarch64.sve.andv.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>)
|
||
|
declare i32 @llvm.aarch64.sve.andv.nxv4i32 (<vscale x 4 x i1>, <vscale x 4 x i32>)
|
||
|
declare i64 @llvm.aarch64.sve.andv.nxv2i64 (<vscale x 2 x i1>, <vscale x 2 x i64>)
|