diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td index 7afd6def4e4d2..cd4c3b6be19b5 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -41,24 +41,6 @@ multiclass VPatUSLoadStoreSDNode; } -multiclass VPatUSLoadStoreWholeVRSDNode { - defvar load_instr = - !cast("VL"#!substr(vlmul.MX, 1)#"RE"#sew#"_V"); - defvar store_instr = - !cast("VS"#!substr(vlmul.MX, 1)#"R_V"); - - // Load - def : Pat<(type (load GPR:$rs1)), - (load_instr GPR:$rs1)>; - // Store - def : Pat<(store type:$rs2, GPR:$rs1), - (store_instr reg_class:$rs2, GPR:$rs1)>; -} - multiclass VPatUSLoadStoreMaskSDNode { defvar load_instr = !cast("PseudoVLM_V_"#m.BX); defvar store_instr = !cast("PseudoVSM_V_"#m.BX); @@ -895,23 +877,11 @@ multiclass VPatAVGADD_VV_VX_RM { //===----------------------------------------------------------------------===// // 7.4. Vector Unit-Stride Instructions -foreach vti = !listconcat(FractionalGroupIntegerVectors, - FractionalGroupFloatVectors, - FractionalGroupBFloatVectors) in +foreach vti = AllVectors in let Predicates = !if(!eq(vti.Scalar, f16), [HasVInstructionsF16Minimal], GetVTypePredicates.Predicates) in defm : VPatUSLoadStoreSDNode; -foreach vti = [VI8M1, VI16M1, VI32M1, VI64M1, VBF16M1, VF16M1, VF32M1, VF64M1] in - let Predicates = !if(!eq(vti.Scalar, f16), [HasVInstructionsF16Minimal], - GetVTypePredicates.Predicates) in - defm : VPatUSLoadStoreWholeVRSDNode; -foreach vti = !listconcat(GroupIntegerVectors, GroupFloatVectors, GroupBFloatVectors) in - let Predicates = !if(!eq(vti.Scalar, f16), [HasVInstructionsF16Minimal], - GetVTypePredicates.Predicates) in - defm : VPatUSLoadStoreWholeVRSDNode; foreach mti = AllMasks in let Predicates = [HasVInstructions] in defm : VPatUSLoadStoreMaskSDNode; diff --git a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp index 3d8fb6b7bbbef..b083e64cfc8d7 100644 --- a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp +++ b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp @@ -59,6 +59,7 @@ class RISCVVectorPeephole : public MachineFunctionPass { private: bool convertToVLMAX(MachineInstr &MI) const; + bool convertToWholeRegister(MachineInstr &MI) const; bool convertToUnmasked(MachineInstr &MI) const; bool convertVMergeToVMv(MachineInstr &MI) const; @@ -155,6 +156,58 @@ bool RISCVVectorPeephole::isAllOnesMask(const MachineInstr *MaskDef) const { } } +/// Convert unit strided unmasked loads and stores to whole-register equivalents +/// to avoid the dependency on $vl and $vtype. +/// +/// %x = PseudoVLE8_V_M1 %passthru, %ptr, %vlmax, policy +/// PseudoVSE8_V_M1 %v, %ptr, %vlmax +/// +/// -> +/// +/// %x = VL1RE8_V %ptr +/// VS1R_V %v, %ptr +bool RISCVVectorPeephole::convertToWholeRegister(MachineInstr &MI) const { +#define CASE_WHOLE_REGISTER_LMUL_SEW(lmul, sew) \ + case RISCV::PseudoVLE##sew##_V_M##lmul: \ + NewOpc = RISCV::VL##lmul##RE##sew##_V; \ + break; \ + case RISCV::PseudoVSE##sew##_V_M##lmul: \ + NewOpc = RISCV::VS##lmul##R_V; \ + break; +#define CASE_WHOLE_REGISTER_LMUL(lmul) \ + CASE_WHOLE_REGISTER_LMUL_SEW(lmul, 8) \ + CASE_WHOLE_REGISTER_LMUL_SEW(lmul, 16) \ + CASE_WHOLE_REGISTER_LMUL_SEW(lmul, 32) \ + CASE_WHOLE_REGISTER_LMUL_SEW(lmul, 64) + + unsigned NewOpc; + switch (MI.getOpcode()) { + CASE_WHOLE_REGISTER_LMUL(1) + CASE_WHOLE_REGISTER_LMUL(2) + CASE_WHOLE_REGISTER_LMUL(4) + CASE_WHOLE_REGISTER_LMUL(8) + default: + return false; + } + + MachineOperand &VLOp = MI.getOperand(RISCVII::getVLOpNum(MI.getDesc())); + if (!VLOp.isImm() || VLOp.getImm() != RISCV::VLMaxSentinel) + return false; + + // Whole register instructions aren't pseudos so they don't have + // policy/SEW/AVL ops, and they don't have passthrus. + if (RISCVII::hasVecPolicyOp(MI.getDesc().TSFlags)) + MI.removeOperand(RISCVII::getVecPolicyOpNum(MI.getDesc())); + MI.removeOperand(RISCVII::getSEWOpNum(MI.getDesc())); + MI.removeOperand(RISCVII::getVLOpNum(MI.getDesc())); + if (RISCVII::isFirstDefTiedToFirstUse(MI.getDesc())) + MI.removeOperand(1); + + MI.setDesc(TII->get(NewOpc)); + + return true; +} + // Transform (VMERGE_VVM_ false, false, true, allones, vl, sew) to // (VMV_V_V_ false, true, vl, sew). It may decrease uses of VMSET. bool RISCVVectorPeephole::convertVMergeToVMv(MachineInstr &MI) const { @@ -281,6 +334,7 @@ bool RISCVVectorPeephole::runOnMachineFunction(MachineFunction &MF) { for (MachineInstr &MI : MBB) { Changed |= convertToVLMAX(MI); Changed |= convertToUnmasked(MI); + Changed |= convertToWholeRegister(MI); Changed |= convertVMergeToVMv(MI); } } diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll index d5ba11c8d19d0..3850d5d6d2c74 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll @@ -20,7 +20,8 @@ define dso_local void @lots_args(i32 signext %x0, i32 signext %x1, %passthru, ptr %p, ) into %ir.p) + ; CHECK-NEXT: PseudoVSE32_V_M1 killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]], -1, 5 /* e32 */ :: (store () into %ir.p) ; CHECK-NEXT: PseudoRET %a = call @llvm.vp.load.nxv2i32.p0(ptr %p, splat (i1 -1), i32 %vl) %b = call @llvm.vp.merge.nxv2i32( %m, %a, %passthru, i32 %vl) @@ -36,7 +36,7 @@ define void @vpselect_vpload_store( %passthru, ptr %p, ) into %ir.p) + ; CHECK-NEXT: PseudoVSE32_V_M1 killed [[PseudoVLE32_V_M1_MASK]], [[COPY2]], -1, 5 /* e32 */ :: (store () into %ir.p) ; CHECK-NEXT: PseudoRET %a = call @llvm.vp.load.nxv2i32.p0(ptr %p, splat (i1 -1), i32 %vl) %b = call @llvm.vp.select.nxv2i32( %m, %a, %passthru, i32 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vpload.ll b/llvm/test/CodeGen/RISCV/rvv/vpload.ll index c0a210e680c79..d4f117fad37ee 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpload.ll @@ -536,3 +536,14 @@ define @vpload_nxv17f64(ptr %ptr, ptr %out, %hi, ptr %out ret %lo } + +define @vpload_all_active_nxv8i8(ptr %ptr) { +; CHECK-LABEL: vpload_all_active_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vl1r.v v8, (a0) +; CHECK-NEXT: ret + %vscale = call i32 @llvm.vscale() + %evl = mul i32 %vscale, 8 + %load = call @llvm.vp.load.nxv8i8.p0(ptr %ptr, splat (i1 true), i32 %evl) + ret %load +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll index ce0ee38bc7047..015d7645aaa29 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll @@ -459,3 +459,14 @@ define void @vpstore_nxv17f64( %val, ptr %ptr, %val, ptr %ptr, %m, i32 %evl) ret void } + +define void @vpstore_all_active_nxv8i8( %val, ptr %ptr) { +; CHECK-LABEL: vpstore_all_active_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vs1r.v v8, (a0) +; CHECK-NEXT: ret + %vscale = call i32 @llvm.vscale() + %evl = mul i32 %vscale, 8 + call void @llvm.vp.store.nxv8i8.p0( %val, ptr %ptr, splat (i1 true), i32 %evl) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll index f8274b4cf8037..b1980fcf420a8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll @@ -487,42 +487,18 @@ define @vfmerge_nzv_nxv8f64( %va, @vselect_combine_regression( %va, %vb) { ; CHECK-LABEL: vselect_combine_regression: ; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 -; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: vmv8r.v v24, v16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, a0, a1 -; CHECK-NEXT: vl8re64.v v8, (a1) -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 -; CHECK-NEXT: add a1, sp, a1 -; CHECK-NEXT: addi a1, a1, 16 -; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; CHECK-NEXT: vl8re64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma -; CHECK-NEXT: vmseq.vi v24, v16, 0 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vmv.v.i v16, 0 -; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 -; CHECK-NEXT: vmv1r.v v0, v24 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload -; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 -; CHECK-NEXT: add sp, sp, a0 -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vmseq.vi v7, v24, 0 +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: vmv1r.v v0, v7 +; CHECK-NEXT: vle64.v v16, (a1), v0.t ; CHECK-NEXT: ret %cond = icmp eq %va, zeroinitializer %sel = select %cond, %vb, zeroinitializer