# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc -mtriple=riscv32 -run-pass=finalize-isel -simplify-mir -o - %s \ # RUN: | FileCheck -check-prefix=RV32I %s # RUN: llc -mtriple=riscv32 -mattr=+experimental-zbt -run-pass=finalize-isel -simplify-mir -o - %s \ # RUN: | FileCheck -check-prefix=RV32IBT %s # RUN: llc -mtriple=riscv64 -run-pass=finalize-isel -simplify-mir -o - %s \ # RUN: | FileCheck -check-prefix=RV64I %s # RUN: llc -mtriple=riscv64 -mattr=+experimental-zbt -run-pass=finalize-isel -simplify-mir -o - %s \ # RUN: | FileCheck -check-prefix=RV64IBT %s # Provide dummy definitions of functions and just enough metadata to create a # DBG_VALUE. --- | define void @cmov_interleaved_bad() { ret void } define void @cmov_interleaved_debug_value() { ret void } ... --- # Here we have a sequence of select instructions with a non-select instruction # in the middle. Because the non-select depends on the result of a previous # select, we cannot optimize the sequence to share control-flow. name: cmov_interleaved_bad alignment: 4 tracksRegLiveness: true registers: - { id: 0, class: gpr } - { id: 1, class: gpr } - { id: 2, class: gpr } - { id: 3, class: gpr } - { id: 4, class: gpr } - { id: 5, class: gpr } - { id: 6, class: gpr } - { id: 7, class: gpr } - { id: 8, class: gpr } - { id: 9, class: gpr } - { id: 10, class: gpr } liveins: - { reg: '$x10', virtual-reg: '%0' } - { reg: '$x11', virtual-reg: '%1' } - { reg: '$x12', virtual-reg: '%2' } - { reg: '$x13', virtual-reg: '%3' } body: | bb.0: liveins: $x10, $x11, $x12, $x13 ; RV32I-LABEL: name: cmov_interleaved_bad ; RV32I: successors: %bb.1, %bb.2 ; RV32I: liveins: $x10, $x11, $x12, $x13 ; RV32I: [[COPY:%[0-9]+]]:gpr = COPY $x13 ; RV32I: [[COPY1:%[0-9]+]]:gpr = COPY $x12 ; RV32I: [[COPY2:%[0-9]+]]:gpr = COPY $x11 ; RV32I: [[COPY3:%[0-9]+]]:gpr = COPY $x10 ; RV32I: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY3]], 1 ; RV32I: [[COPY4:%[0-9]+]]:gpr = COPY $x0 ; RV32I: BNE [[ANDI]], [[COPY4]], %bb.2 ; RV32I: .1: ; RV32I: .2: ; RV32I: successors: %bb.3, %bb.4 ; RV32I: [[PHI:%[0-9]+]]:gpr = PHI [[COPY2]], %bb.0, [[COPY1]], %bb.1 ; RV32I: [[ADDI:%[0-9]+]]:gpr = ADDI [[PHI]], 1 ; RV32I: BNE [[ANDI]], [[COPY4]], %bb.4 ; RV32I: .3: ; RV32I: .4: ; RV32I: [[PHI1:%[0-9]+]]:gpr = PHI [[COPY]], %bb.2, [[COPY1]], %bb.3 ; RV32I: [[ADD:%[0-9]+]]:gpr = ADD [[PHI]], killed [[PHI1]] ; RV32I: $x10 = COPY [[ADD]] ; RV32I: PseudoRET implicit $x10 ; RV32IBT-LABEL: name: cmov_interleaved_bad ; RV32IBT: successors: %bb.1, %bb.2 ; RV32IBT: liveins: $x10, $x11, $x12, $x13 ; RV32IBT: [[COPY:%[0-9]+]]:gpr = COPY $x13 ; RV32IBT: [[COPY1:%[0-9]+]]:gpr = COPY $x12 ; RV32IBT: [[COPY2:%[0-9]+]]:gpr = COPY $x11 ; RV32IBT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 ; RV32IBT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY3]], 1 ; RV32IBT: [[COPY4:%[0-9]+]]:gpr = COPY $x0 ; RV32IBT: BNE [[ANDI]], [[COPY4]], %bb.2 ; RV32IBT: .1: ; RV32IBT: .2: ; RV32IBT: successors: %bb.3, %bb.4 ; RV32IBT: [[PHI:%[0-9]+]]:gpr = PHI [[COPY2]], %bb.0, [[COPY1]], %bb.1 ; RV32IBT: [[ADDI:%[0-9]+]]:gpr = ADDI [[PHI]], 1 ; RV32IBT: BNE [[ANDI]], [[COPY4]], %bb.4 ; RV32IBT: .3: ; RV32IBT: .4: ; RV32IBT: [[PHI1:%[0-9]+]]:gpr = PHI [[COPY]], %bb.2, [[COPY1]], %bb.3 ; RV32IBT: [[ADD:%[0-9]+]]:gpr = ADD [[PHI]], killed [[PHI1]] ; RV32IBT: $x10 = COPY [[ADD]] ; RV32IBT: PseudoRET implicit $x10 ; RV64I-LABEL: name: cmov_interleaved_bad ; RV64I: successors: %bb.1, %bb.2 ; RV64I: liveins: $x10, $x11, $x12, $x13 ; RV64I: [[COPY:%[0-9]+]]:gpr = COPY $x13 ; RV64I: [[COPY1:%[0-9]+]]:gpr = COPY $x12 ; RV64I: [[COPY2:%[0-9]+]]:gpr = COPY $x11 ; RV64I: [[COPY3:%[0-9]+]]:gpr = COPY $x10 ; RV64I: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY3]], 1 ; RV64I: [[COPY4:%[0-9]+]]:gpr = COPY $x0 ; RV64I: BNE [[ANDI]], [[COPY4]], %bb.2 ; RV64I: .1: ; RV64I: .2: ; RV64I: successors: %bb.3, %bb.4 ; RV64I: [[PHI:%[0-9]+]]:gpr = PHI [[COPY2]], %bb.0, [[COPY1]], %bb.1 ; RV64I: [[ADDI:%[0-9]+]]:gpr = ADDI [[PHI]], 1 ; RV64I: BNE [[ANDI]], [[COPY4]], %bb.4 ; RV64I: .3: ; RV64I: .4: ; RV64I: [[PHI1:%[0-9]+]]:gpr = PHI [[COPY]], %bb.2, [[COPY1]], %bb.3 ; RV64I: [[ADD:%[0-9]+]]:gpr = ADD [[PHI]], killed [[PHI1]] ; RV64I: $x10 = COPY [[ADD]] ; RV64I: PseudoRET implicit $x10 ; RV64IBT-LABEL: name: cmov_interleaved_bad ; RV64IBT: successors: %bb.1, %bb.2 ; RV64IBT: liveins: $x10, $x11, $x12, $x13 ; RV64IBT: [[COPY:%[0-9]+]]:gpr = COPY $x13 ; RV64IBT: [[COPY1:%[0-9]+]]:gpr = COPY $x12 ; RV64IBT: [[COPY2:%[0-9]+]]:gpr = COPY $x11 ; RV64IBT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 ; RV64IBT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY3]], 1 ; RV64IBT: [[COPY4:%[0-9]+]]:gpr = COPY $x0 ; RV64IBT: BNE [[ANDI]], [[COPY4]], %bb.2 ; RV64IBT: .1: ; RV64IBT: .2: ; RV64IBT: successors: %bb.3, %bb.4 ; RV64IBT: [[PHI:%[0-9]+]]:gpr = PHI [[COPY2]], %bb.0, [[COPY1]], %bb.1 ; RV64IBT: [[ADDI:%[0-9]+]]:gpr = ADDI [[PHI]], 1 ; RV64IBT: BNE [[ANDI]], [[COPY4]], %bb.4 ; RV64IBT: .3: ; RV64IBT: .4: ; RV64IBT: [[PHI1:%[0-9]+]]:gpr = PHI [[COPY]], %bb.2, [[COPY1]], %bb.3 ; RV64IBT: [[ADD:%[0-9]+]]:gpr = ADD [[PHI]], killed [[PHI1]] ; RV64IBT: $x10 = COPY [[ADD]] ; RV64IBT: PseudoRET implicit $x10 %3:gpr = COPY $x13 %2:gpr = COPY $x12 %1:gpr = COPY $x11 %0:gpr = COPY $x10 %5:gpr = ANDI %0, 1 %6:gpr = COPY $x0 %7:gpr = Select_GPR_Using_CC_GPR %5, %6, 22, %1, %2 %8:gpr = ADDI %7, 1 %9:gpr = Select_GPR_Using_CC_GPR %5, %6, 22, %3, %2 %10:gpr = ADD %7, killed %9 $x10 = COPY %10 PseudoRET implicit $x10 ... --- # Demonstrate that debug info associated with selects is correctly moved to # the tail basic block, while debug info associated with non-selects is left # in the head basic block. name: cmov_interleaved_debug_value alignment: 4 tracksRegLiveness: true registers: - { id: 0, class: gpr } - { id: 1, class: gpr } - { id: 2, class: gpr } - { id: 3, class: gpr } - { id: 4, class: gpr } - { id: 5, class: gpr } - { id: 6, class: gpr } - { id: 7, class: gpr } - { id: 8, class: gpr } - { id: 9, class: gpr } - { id: 10, class: gpr } liveins: - { reg: '$x10', virtual-reg: '%0' } - { reg: '$x11', virtual-reg: '%1' } - { reg: '$x12', virtual-reg: '%2' } - { reg: '$x13', virtual-reg: '%3' } body: | bb.0: liveins: $x10, $x11, $x12, $x13 ; RV32I-LABEL: name: cmov_interleaved_debug_value ; RV32I: successors: %bb.1, %bb.2 ; RV32I: liveins: $x10, $x11, $x12, $x13 ; RV32I: [[COPY:%[0-9]+]]:gpr = COPY $x13 ; RV32I: [[COPY1:%[0-9]+]]:gpr = COPY $x12 ; RV32I: [[COPY2:%[0-9]+]]:gpr = COPY $x11 ; RV32I: [[COPY3:%[0-9]+]]:gpr = COPY $x10 ; RV32I: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY3]], 1 ; RV32I: [[COPY4:%[0-9]+]]:gpr = COPY $x0 ; RV32I: [[ADDI:%[0-9]+]]:gpr = ADDI [[COPY3]], 1 ; RV32I: DBG_VALUE [[ADDI]], $noreg ; RV32I: BNE [[ANDI]], [[COPY4]], %bb.2 ; RV32I: .1: ; RV32I: .2: ; RV32I: [[PHI:%[0-9]+]]:gpr = PHI [[COPY2]], %bb.0, [[COPY1]], %bb.1 ; RV32I: [[PHI1:%[0-9]+]]:gpr = PHI [[COPY]], %bb.0, [[COPY1]], %bb.1 ; RV32I: DBG_VALUE [[PHI]], $noreg ; RV32I: DBG_VALUE [[PHI1]], $noreg ; RV32I: [[ADD:%[0-9]+]]:gpr = ADD [[PHI]], killed [[PHI1]] ; RV32I: $x10 = COPY [[ADD]] ; RV32I: PseudoRET implicit $x10 ; RV32IBT-LABEL: name: cmov_interleaved_debug_value ; RV32IBT: successors: %bb.1, %bb.2 ; RV32IBT: liveins: $x10, $x11, $x12, $x13 ; RV32IBT: [[COPY:%[0-9]+]]:gpr = COPY $x13 ; RV32IBT: [[COPY1:%[0-9]+]]:gpr = COPY $x12 ; RV32IBT: [[COPY2:%[0-9]+]]:gpr = COPY $x11 ; RV32IBT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 ; RV32IBT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY3]], 1 ; RV32IBT: [[COPY4:%[0-9]+]]:gpr = COPY $x0 ; RV32IBT: [[ADDI:%[0-9]+]]:gpr = ADDI [[COPY3]], 1 ; RV32IBT: DBG_VALUE [[ADDI]], $noreg ; RV32IBT: BNE [[ANDI]], [[COPY4]], %bb.2 ; RV32IBT: .1: ; RV32IBT: .2: ; RV32IBT: [[PHI:%[0-9]+]]:gpr = PHI [[COPY2]], %bb.0, [[COPY1]], %bb.1 ; RV32IBT: [[PHI1:%[0-9]+]]:gpr = PHI [[COPY]], %bb.0, [[COPY1]], %bb.1 ; RV32IBT: DBG_VALUE [[PHI]], $noreg ; RV32IBT: DBG_VALUE [[PHI1]], $noreg ; RV32IBT: [[ADD:%[0-9]+]]:gpr = ADD [[PHI]], killed [[PHI1]] ; RV32IBT: $x10 = COPY [[ADD]] ; RV32IBT: PseudoRET implicit $x10 ; RV64I-LABEL: name: cmov_interleaved_debug_value ; RV64I: successors: %bb.1, %bb.2 ; RV64I: liveins: $x10, $x11, $x12, $x13 ; RV64I: [[COPY:%[0-9]+]]:gpr = COPY $x13 ; RV64I: [[COPY1:%[0-9]+]]:gpr = COPY $x12 ; RV64I: [[COPY2:%[0-9]+]]:gpr = COPY $x11 ; RV64I: [[COPY3:%[0-9]+]]:gpr = COPY $x10 ; RV64I: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY3]], 1 ; RV64I: [[COPY4:%[0-9]+]]:gpr = COPY $x0 ; RV64I: [[ADDI:%[0-9]+]]:gpr = ADDI [[COPY3]], 1 ; RV64I: DBG_VALUE [[ADDI]], $noreg ; RV64I: BNE [[ANDI]], [[COPY4]], %bb.2 ; RV64I: .1: ; RV64I: .2: ; RV64I: [[PHI:%[0-9]+]]:gpr = PHI [[COPY2]], %bb.0, [[COPY1]], %bb.1 ; RV64I: [[PHI1:%[0-9]+]]:gpr = PHI [[COPY]], %bb.0, [[COPY1]], %bb.1 ; RV64I: DBG_VALUE [[PHI]], $noreg ; RV64I: DBG_VALUE [[PHI1]], $noreg ; RV64I: [[ADD:%[0-9]+]]:gpr = ADD [[PHI]], killed [[PHI1]] ; RV64I: $x10 = COPY [[ADD]] ; RV64I: PseudoRET implicit $x10 ; RV64IBT-LABEL: name: cmov_interleaved_debug_value ; RV64IBT: successors: %bb.1, %bb.2 ; RV64IBT: liveins: $x10, $x11, $x12, $x13 ; RV64IBT: [[COPY:%[0-9]+]]:gpr = COPY $x13 ; RV64IBT: [[COPY1:%[0-9]+]]:gpr = COPY $x12 ; RV64IBT: [[COPY2:%[0-9]+]]:gpr = COPY $x11 ; RV64IBT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 ; RV64IBT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY3]], 1 ; RV64IBT: [[COPY4:%[0-9]+]]:gpr = COPY $x0 ; RV64IBT: [[ADDI:%[0-9]+]]:gpr = ADDI [[COPY3]], 1 ; RV64IBT: DBG_VALUE [[ADDI]], $noreg ; RV64IBT: BNE [[ANDI]], [[COPY4]], %bb.2 ; RV64IBT: .1: ; RV64IBT: .2: ; RV64IBT: [[PHI:%[0-9]+]]:gpr = PHI [[COPY2]], %bb.0, [[COPY1]], %bb.1 ; RV64IBT: [[PHI1:%[0-9]+]]:gpr = PHI [[COPY]], %bb.0, [[COPY1]], %bb.1 ; RV64IBT: DBG_VALUE [[PHI]], $noreg ; RV64IBT: DBG_VALUE [[PHI1]], $noreg ; RV64IBT: [[ADD:%[0-9]+]]:gpr = ADD [[PHI]], killed [[PHI1]] ; RV64IBT: $x10 = COPY [[ADD]] ; RV64IBT: PseudoRET implicit $x10 %3:gpr = COPY $x13 %2:gpr = COPY $x12 %1:gpr = COPY $x11 %0:gpr = COPY $x10 %5:gpr = ANDI %0, 1 %6:gpr = COPY $x0 %7:gpr = Select_GPR_Using_CC_GPR %5, %6, 22, %1, %2 DBG_VALUE %7, $noreg %8:gpr = ADDI %0, 1 DBG_VALUE %8, $noreg %9:gpr = Select_GPR_Using_CC_GPR %5, %6, 22, %3, %2 DBG_VALUE %9, $noreg %10:gpr = ADD %7, killed %9 $x10 = COPY %10 PseudoRET implicit $x10 ... ---