llvm-for-llvmta/test/CodeGen/RISCV/half-mem.ll

186 lines
6.0 KiB
LLVM
Raw Permalink Normal View History

2022-04-25 10:02:23 +02:00
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zfh -verify-machineinstrs \
; RUN: -target-abi ilp32f < %s | FileCheck -check-prefix=RV32IZFH %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zfh -verify-machineinstrs \
; RUN: -target-abi lp64f < %s | FileCheck -check-prefix=RV64IZFH %s
define half @flh(half *%a) nounwind {
; RV32IZFH-LABEL: flh:
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: flh ft0, 0(a0)
; RV32IZFH-NEXT: flh ft1, 6(a0)
; RV32IZFH-NEXT: fadd.h fa0, ft0, ft1
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: flh:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: flh ft0, 0(a0)
; RV64IZFH-NEXT: flh ft1, 6(a0)
; RV64IZFH-NEXT: fadd.h fa0, ft0, ft1
; RV64IZFH-NEXT: ret
%1 = load half, half* %a
%2 = getelementptr half, half* %a, i32 3
%3 = load half, half* %2
; Use both loaded values in an FP op to ensure an flh is used, even for the
; soft half ABI
%4 = fadd half %1, %3
ret half %4
}
define dso_local void @fsh(half *%a, half %b, half %c) nounwind {
; Use %b and %c in an FP op to ensure half precision floating point registers
; are used, even for the soft half ABI
; RV32IZFH-LABEL: fsh:
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: fadd.h ft0, fa0, fa1
; RV32IZFH-NEXT: fsh ft0, 0(a0)
; RV32IZFH-NEXT: fsh ft0, 16(a0)
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: fsh:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: fadd.h ft0, fa0, fa1
; RV64IZFH-NEXT: fsh ft0, 0(a0)
; RV64IZFH-NEXT: fsh ft0, 16(a0)
; RV64IZFH-NEXT: ret
%1 = fadd half %b, %c
store half %1, half* %a
%2 = getelementptr half, half* %a, i32 8
store half %1, half* %2
ret void
}
; Check load and store to a global
@G = dso_local global half 0.0
define half @flh_fsh_global(half %a, half %b) nounwind {
; Use %a and %b in an FP op to ensure half precision floating point registers
; are used, even for the soft half ABI
; RV32IZFH-LABEL: flh_fsh_global:
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: fadd.h fa0, fa0, fa1
; RV32IZFH-NEXT: lui a0, %hi(G)
; RV32IZFH-NEXT: flh ft0, %lo(G)(a0)
; RV32IZFH-NEXT: fsh fa0, %lo(G)(a0)
; RV32IZFH-NEXT: addi a0, a0, %lo(G)
; RV32IZFH-NEXT: flh ft0, 18(a0)
; RV32IZFH-NEXT: fsh fa0, 18(a0)
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: flh_fsh_global:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: fadd.h fa0, fa0, fa1
; RV64IZFH-NEXT: lui a0, %hi(G)
; RV64IZFH-NEXT: flh ft0, %lo(G)(a0)
; RV64IZFH-NEXT: fsh fa0, %lo(G)(a0)
; RV64IZFH-NEXT: addi a0, a0, %lo(G)
; RV64IZFH-NEXT: flh ft0, 18(a0)
; RV64IZFH-NEXT: fsh fa0, 18(a0)
; RV64IZFH-NEXT: ret
%1 = fadd half %a, %b
%2 = load volatile half, half* @G
store half %1, half* @G
%3 = getelementptr half, half* @G, i32 9
%4 = load volatile half, half* %3
store half %1, half* %3
ret half %1
}
; Ensure that 1 is added to the high 20 bits if bit 11 of the low part is 1
define half @flh_fsh_constant(half %a) nounwind {
; RV32IZFH-LABEL: flh_fsh_constant:
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: lui a0, 912092
; RV32IZFH-NEXT: flh ft0, -273(a0)
; RV32IZFH-NEXT: fadd.h fa0, fa0, ft0
; RV32IZFH-NEXT: fsh fa0, -273(a0)
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: flh_fsh_constant:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: lui a0, 56
; RV64IZFH-NEXT: addiw a0, a0, -1353
; RV64IZFH-NEXT: slli a0, a0, 14
; RV64IZFH-NEXT: flh ft0, -273(a0)
; RV64IZFH-NEXT: fadd.h fa0, fa0, ft0
; RV64IZFH-NEXT: fsh fa0, -273(a0)
; RV64IZFH-NEXT: ret
%1 = inttoptr i32 3735928559 to half*
%2 = load volatile half, half* %1
%3 = fadd half %a, %2
store half %3, half* %1
ret half %3
}
declare void @notdead(i8*)
define half @flh_stack(half %a) nounwind {
; RV32IZFH-LABEL: flh_stack:
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fmv.h fs0, fa0
; RV32IZFH-NEXT: addi a0, sp, 4
; RV32IZFH-NEXT: call notdead@plt
; RV32IZFH-NEXT: flh ft0, 4(sp)
; RV32IZFH-NEXT: fadd.h fa0, ft0, fs0
; RV32IZFH-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: flh_stack:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill
; RV64IZFH-NEXT: fmv.h fs0, fa0
; RV64IZFH-NEXT: mv a0, sp
; RV64IZFH-NEXT: call notdead@plt
; RV64IZFH-NEXT: flh ft0, 0(sp)
; RV64IZFH-NEXT: fadd.h fa0, ft0, fs0
; RV64IZFH-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: ret
%1 = alloca half, align 4
%2 = bitcast half* %1 to i8*
call void @notdead(i8* %2)
%3 = load half, half* %1
%4 = fadd half %3, %a ; force load in to FPR16
ret half %4
}
define dso_local void @fsh_stack(half %a, half %b) nounwind {
; RV32IZFH-LABEL: fsh_stack:
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fadd.h ft0, fa0, fa1
; RV32IZFH-NEXT: fsh ft0, 8(sp)
; RV32IZFH-NEXT: addi a0, sp, 8
; RV32IZFH-NEXT: call notdead@plt
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: fsh_stack:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: fadd.h ft0, fa0, fa1
; RV64IZFH-NEXT: fsh ft0, 4(sp)
; RV64IZFH-NEXT: addi a0, sp, 4
; RV64IZFH-NEXT: call notdead@plt
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: ret
%1 = fadd half %a, %b ; force store from FPR16
%2 = alloca half, align 4
store half %1, half* %2
%3 = bitcast half* %2 to i8*
call void @notdead(i8* %3)
ret void
}