Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Conversation

llvmbot
Copy link
Member

@llvmbot llvmbot commented Sep 13, 2025

Backport 058d96f

Requested by: @llvmbot

This adds the simplest implementation of `PreserveMost` calling
convention and we preserve `x5-x31` (except x6/x7/x28) registers.

Fixes llvm#148147.

(cherry picked from commit 058d96f)
@llvmbot
Copy link
Member Author

llvmbot commented Sep 13, 2025

@wangpc-pp What do you think about merging this PR to the release branch?

@llvmbot
Copy link
Member Author

llvmbot commented Sep 13, 2025

@llvm/pr-subscribers-llvm-ir

Author: None (llvmbot)

Changes

Backport 058d96f

Requested by: @llvmbot


Patch is 20.40 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/158403.diff

5 Files Affected:

  • (modified) llvm/docs/LangRef.rst (+2)
  • (modified) llvm/lib/Target/RISCV/RISCVCallingConv.td (+4)
  • (modified) llvm/lib/Target/RISCV/RISCVISelLowering.cpp (+1)
  • (modified) llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp (+10-1)
  • (added) llvm/test/CodeGen/RISCV/calling-conv-preserve-most.ll (+449)
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 8ea850af7a69b..2a9b67b671e11 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -413,6 +413,8 @@ added in the future:
     - On AArch64 the callee preserve all general purpose registers, except
       X0-X8 and X16-X18. Not allowed with ``nest``.
 
+    - On RISC-V the callee preserve x5-x31 except x6, x7 and x28 registers.
+
     The idea behind this convention is to support calls to runtime functions
     that have a hot path and a cold path. The hot path is usually a small piece
     of code that doesn't use many registers. The cold path might need to call out to
diff --git a/llvm/lib/Target/RISCV/RISCVCallingConv.td b/llvm/lib/Target/RISCV/RISCVCallingConv.td
index cbf039edec273..d8c52cbde04c7 100644
--- a/llvm/lib/Target/RISCV/RISCVCallingConv.td
+++ b/llvm/lib/Target/RISCV/RISCVCallingConv.td
@@ -93,3 +93,7 @@ def CSR_XLEN_F32_V_Interrupt_RVE: CalleeSavedRegs<(sub CSR_XLEN_F32_V_Interrupt,
 // Same as CSR_XLEN_F64_V_Interrupt, but excluding X16-X31.
 def CSR_XLEN_F64_V_Interrupt_RVE: CalleeSavedRegs<(sub CSR_XLEN_F64_V_Interrupt,
                                                    (sequence "X%u", 16, 31))>;
+
+def CSR_RT_MostRegs : CalleeSavedRegs<(sub CSR_Interrupt, X6, X7, X28)>;
+def CSR_RT_MostRegs_RVE : CalleeSavedRegs<(sub CSR_RT_MostRegs,
+                                               (sequence "X%u", 16, 31))>;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 5fb16f5ac6b9e..07a03792c2b23 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -22224,6 +22224,7 @@ SDValue RISCVTargetLowering::LowerFormalArguments(
   case CallingConv::C:
   case CallingConv::Fast:
   case CallingConv::SPIR_KERNEL:
+  case CallingConv::PreserveMost:
   case CallingConv::GRAAL:
   case CallingConv::RISCV_VectorCall:
 #define CC_VLS_CASE(ABI_VLEN) case CallingConv::RISCV_VLSCall_##ABI_VLEN:
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index 540412366026b..214536d7f3a74 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -68,6 +68,9 @@ RISCVRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
   auto &Subtarget = MF->getSubtarget<RISCVSubtarget>();
   if (MF->getFunction().getCallingConv() == CallingConv::GHC)
     return CSR_NoRegs_SaveList;
+  if (MF->getFunction().getCallingConv() == CallingConv::PreserveMost)
+    return Subtarget.hasStdExtE() ? CSR_RT_MostRegs_RVE_SaveList
+                                  : CSR_RT_MostRegs_SaveList;
   if (MF->getFunction().hasFnAttribute("interrupt")) {
     if (Subtarget.hasVInstructions()) {
       if (Subtarget.hasStdExtD())
@@ -811,7 +814,13 @@ RISCVRegisterInfo::getCallPreservedMask(const MachineFunction & MF,
 
   if (CC == CallingConv::GHC)
     return CSR_NoRegs_RegMask;
-  switch (Subtarget.getTargetABI()) {
+  RISCVABI::ABI ABI = Subtarget.getTargetABI();
+  if (CC == CallingConv::PreserveMost) {
+    if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
+      return CSR_RT_MostRegs_RVE_RegMask;
+    return CSR_RT_MostRegs_RegMask;
+  }
+  switch (ABI) {
   default:
     llvm_unreachable("Unrecognized ABI");
   case RISCVABI::ABI_ILP32E:
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-preserve-most.ll b/llvm/test/CodeGen/RISCV/calling-conv-preserve-most.ll
new file mode 100644
index 0000000000000..08340bbe0013a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/calling-conv-preserve-most.ll
@@ -0,0 +1,449 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 < %s | FileCheck %s -check-prefix=RV32I
+; RUN: llc -mtriple=riscv64 < %s | FileCheck %s -check-prefix=RV64I
+; RUN: llc -mtriple=riscv32 -mattr=+e -target-abi ilp32e < %s | FileCheck %s -check-prefix=RV32E
+; RUN: llc -mtriple=riscv64 -mattr=+e -target-abi lp64e < %s | FileCheck %s -check-prefix=RV64E
+
+; Check the PreserveMost calling convention works.
+
+declare void @standard_cc_func()
+declare preserve_mostcc void @preserve_mostcc_func()
+
+define preserve_mostcc void @preserve_mostcc1() nounwind {
+; RV32I-LABEL: preserve_mostcc1:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    addi sp, sp, -64
+; RV32I-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw t0, 56(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw a0, 52(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw a1, 48(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw a2, 44(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw a3, 40(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw a4, 36(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw a5, 32(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw a6, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw a7, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw t4, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw t5, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw t6, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call standard_cc_func
+; RV32I-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw t0, 56(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw a0, 52(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw a1, 48(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw a2, 44(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw a3, 40(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw a4, 36(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw a5, 32(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw a6, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw a7, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw t4, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw t5, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw t6, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 64
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: preserve_mostcc1:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    addi sp, sp, -112
+; RV64I-NEXT:    sd ra, 104(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd t0, 96(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd a0, 88(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd a1, 80(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd a2, 72(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd a3, 64(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd a4, 56(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd a5, 48(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd a6, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd a7, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd t4, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd t5, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd t6, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call standard_cc_func
+; RV64I-NEXT:    ld ra, 104(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld t0, 96(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld a0, 88(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld a1, 80(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld a2, 72(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld a3, 64(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld a4, 56(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld a5, 48(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld a6, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld a7, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld t4, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld t5, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld t6, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 112
+; RV64I-NEXT:    ret
+;
+; RV32E-LABEL: preserve_mostcc1:
+; RV32E:       # %bb.0: # %entry
+; RV32E-NEXT:    addi sp, sp, -32
+; RV32E-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw t0, 24(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw a0, 20(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw a1, 16(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw a2, 12(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw a3, 8(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw a4, 4(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw a5, 0(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    call standard_cc_func
+; RV32E-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw t0, 24(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw a0, 20(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw a1, 16(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw a2, 12(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw a3, 8(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw a4, 4(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw a5, 0(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    addi sp, sp, 32
+; RV32E-NEXT:    ret
+;
+; RV64E-LABEL: preserve_mostcc1:
+; RV64E:       # %bb.0: # %entry
+; RV64E-NEXT:    addi sp, sp, -64
+; RV64E-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd t0, 48(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd a0, 40(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd a1, 32(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd a2, 24(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd a3, 16(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd a4, 8(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd a5, 0(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    call standard_cc_func
+; RV64E-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld t0, 48(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld a0, 40(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld a1, 32(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld a2, 24(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld a3, 16(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld a4, 8(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld a5, 0(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    addi sp, sp, 64
+; RV64E-NEXT:    ret
+entry:
+  call void @standard_cc_func()
+  ret void
+}
+
+define preserve_mostcc void @preserve_mostcc2() nounwind {
+; RV32I-LABEL: preserve_mostcc2:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call preserve_mostcc_func
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: preserve_mostcc2:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call preserve_mostcc_func
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV32E-LABEL: preserve_mostcc2:
+; RV32E:       # %bb.0:
+; RV32E-NEXT:    addi sp, sp, -4
+; RV32E-NEXT:    sw ra, 0(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    call preserve_mostcc_func
+; RV32E-NEXT:    lw ra, 0(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    addi sp, sp, 4
+; RV32E-NEXT:    ret
+;
+; RV64E-LABEL: preserve_mostcc2:
+; RV64E:       # %bb.0:
+; RV64E-NEXT:    addi sp, sp, -8
+; RV64E-NEXT:    sd ra, 0(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    call preserve_mostcc_func
+; RV64E-NEXT:    ld ra, 0(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    addi sp, sp, 8
+; RV64E-NEXT:    ret
+  call preserve_mostcc void @preserve_mostcc_func()
+  ret void
+}
+
+; X6, X7 and X28 will be saved to registers.
+define void @preserve_mostcc3() nounwind {
+; RV32I-LABEL: preserve_mostcc3:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    mv a0, t1
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    mv a1, t2
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    mv a2, t3
+; RV32I-NEXT:    call preserve_mostcc_func
+; RV32I-NEXT:    mv t1, a0
+; RV32I-NEXT:    mv t2, a1
+; RV32I-NEXT:    mv t3, a2
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: preserve_mostcc3:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -32
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    mv a0, t1
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    mv a1, t2
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    mv a2, t3
+; RV64I-NEXT:    call preserve_mostcc_func
+; RV64I-NEXT:    mv t1, a0
+; RV64I-NEXT:    mv t2, a1
+; RV64I-NEXT:    mv t3, a2
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    ret
+;
+; RV32E-LABEL: preserve_mostcc3:
+; RV32E:       # %bb.0:
+; RV32E-NEXT:    addi sp, sp, -12
+; RV32E-NEXT:    sw ra, 8(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw s0, 4(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw s1, 0(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    #APP
+; RV32E-NEXT:    #NO_APP
+; RV32E-NEXT:    mv a0, t1
+; RV32E-NEXT:    #APP
+; RV32E-NEXT:    #NO_APP
+; RV32E-NEXT:    mv a1, t2
+; RV32E-NEXT:    #APP
+; RV32E-NEXT:    #NO_APP
+; RV32E-NEXT:    #APP
+; RV32E-NEXT:    #NO_APP
+; RV32E-NEXT:    #APP
+; RV32E-NEXT:    #NO_APP
+; RV32E-NEXT:    mv a2, t3
+; RV32E-NEXT:    call preserve_mostcc_func
+; RV32E-NEXT:    mv t1, a0
+; RV32E-NEXT:    mv t2, a1
+; RV32E-NEXT:    mv t3, a2
+; RV32E-NEXT:    #APP
+; RV32E-NEXT:    #NO_APP
+; RV32E-NEXT:    lw ra, 8(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw s0, 4(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw s1, 0(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    addi sp, sp, 12
+; RV32E-NEXT:    ret
+;
+; RV64E-LABEL: preserve_mostcc3:
+; RV64E:       # %bb.0:
+; RV64E-NEXT:    addi sp, sp, -24
+; RV64E-NEXT:    sd ra, 16(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd s0, 8(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd s1, 0(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    #APP
+; RV64E-NEXT:    #NO_APP
+; RV64E-NEXT:    mv a0, t1
+; RV64E-NEXT:    #APP
+; RV64E-NEXT:    #NO_APP
+; RV64E-NEXT:    mv a1, t2
+; RV64E-NEXT:    #APP
+; RV64E-NEXT:    #NO_APP
+; RV64E-NEXT:    #APP
+; RV64E-NEXT:    #NO_APP
+; RV64E-NEXT:    #APP
+; RV64E-NEXT:    #NO_APP
+; RV64E-NEXT:    mv a2, t3
+; RV64E-NEXT:    call preserve_mostcc_func
+; RV64E-NEXT:    mv t1, a0
+; RV64E-NEXT:    mv t2, a1
+; RV64E-NEXT:    mv t3, a2
+; RV64E-NEXT:    #APP
+; RV64E-NEXT:    #NO_APP
+; RV64E-NEXT:    ld ra, 16(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld s0, 8(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld s1, 0(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    addi sp, sp, 24
+; RV64E-NEXT:    ret
+  %1 = call i32 asm sideeffect "", "={x6}"() nounwind
+  %2 = call i32 asm sideeffect "", "={x7}"() nounwind
+  %3 = call i32 asm sideeffect "", "={x8}"() nounwind
+  %4 = call i32 asm sideeffect "", "={x9}"() nounwind
+  %5 = call i32 asm sideeffect "", "={x28}"() nounwind
+  call preserve_mostcc void @preserve_mostcc_func()
+  call void asm sideeffect "", "{x6},{x7},{x8},{x9},{x28}"(i32 %1, i32 %2, i32 %3, i32 %4, i32 %5)
+  ret void
+}
+
+; X6, X7 and X28 will be saved to the stack.
+define void @preserve_mostcc4() nounwind {
+; RV32I-LABEL: preserve_mostcc4:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    mv s2, t1
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    mv s3, t2
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    mv s4, t3
+; RV32I-NEXT:    call standard_cc_func
+; RV32I-NEXT:    mv t1, s2
+; RV32I-NEXT:    mv t2, s3
+; RV32I-NEXT:    mv t3, s4
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: preserve_mostcc4:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s4, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    mv s2, t1
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    mv s3, t2
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    mv s4, t3
+; RV64I-NEXT:    call standard_cc_func
+; RV64I-NEXT:    mv t1, s2
+; RV64I-NEXT:    mv t2, s3
+; RV64I-NEXT:    mv t3, s4
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s4, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
+;
+; RV32E-LABEL: preserve_mostcc4:
+; RV32E:       # %bb.0:
+; RV32E-NEXT:    addi sp, sp, -24
+; RV32E-NEXT:    sw ra, 20(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw s0, 16(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw s1, 12(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    #APP
+; RV32E-NEXT:    #NO_APP
+; RV32E-NEXT:    sw t1, 8(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    #APP
+; RV32E-NEXT:    #NO_APP
+; RV32E-NEXT:    sw t2, 4(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    #APP
+; RV32E-NEXT:    #NO_APP
+; RV32E-NEXT:    #APP
+; RV32E-NEXT:    #NO_APP
+; RV32E-NEXT:    #APP
+; RV32E-NEXT:    #NO_APP
+; RV32E-NEXT:    sw t3, 0(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    call standard_cc_func
+; RV32E-NEXT:    lw t1, 8(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw t2, 4(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw t3, 0(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    #APP
+; RV32E-NEXT:    #NO_APP
+; RV32E-NEXT:    lw ra, 20(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw s0, 16(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw s1, 12(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    addi sp, sp, 24
+; RV32E-NEXT:    ret
+;
+; RV64E-LABEL: preserve_mostcc4:
+; RV64E:       # %bb.0:
+; RV64E-NEXT:    addi sp, sp, -48
+; RV64E-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    #APP
+; RV64E-NEXT:    #NO_APP
+; RV64E-NEXT:    sd t1, 16(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    #APP
+; RV64E-NEXT:    #NO_APP
+; RV64E-NEXT:    sd t2, 8(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    #APP
+; RV64E-NEXT:    #NO_APP
+; RV64E-NEXT:    #APP
+; RV64E-NEXT:    #NO_APP
+; RV64E-NEXT:    #APP
+; RV64E-NEXT:    #NO_APP
+; RV64E-NEXT:    sd t3, 0(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    call standard_cc_func
+; RV64E-NEXT:    ld t1, 16(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld t2, 8(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld t3, 0(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    #APP
+; RV64E-NEXT:    #NO_APP
+; RV64E-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    addi sp, sp, 48
+; RV64E-NEXT:    ret
+  %1 = call...
[truncated]

@llvmbot
Copy link
Member Author

llvmbot commented Sep 13, 2025

@llvm/pr-subscribers-backend-risc-v

Author: None (llvmbot)

Changes

Backport 058d96f

Requested by: @llvmbot


Patch is 20.40 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/158403.diff

5 Files Affected:

  • (modified) llvm/docs/LangRef.rst (+2)
  • (modified) llvm/lib/Target/RISCV/RISCVCallingConv.td (+4)
  • (modified) llvm/lib/Target/RISCV/RISCVISelLowering.cpp (+1)
  • (modified) llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp (+10-1)
  • (added) llvm/test/CodeGen/RISCV/calling-conv-preserve-most.ll (+449)
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 8ea850af7a69b..2a9b67b671e11 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -413,6 +413,8 @@ added in the future:
     - On AArch64 the callee preserve all general purpose registers, except
       X0-X8 and X16-X18. Not allowed with ``nest``.
 
+    - On RISC-V the callee preserve x5-x31 except x6, x7 and x28 registers.
+
     The idea behind this convention is to support calls to runtime functions
     that have a hot path and a cold path. The hot path is usually a small piece
     of code that doesn't use many registers. The cold path might need to call out to
diff --git a/llvm/lib/Target/RISCV/RISCVCallingConv.td b/llvm/lib/Target/RISCV/RISCVCallingConv.td
index cbf039edec273..d8c52cbde04c7 100644
--- a/llvm/lib/Target/RISCV/RISCVCallingConv.td
+++ b/llvm/lib/Target/RISCV/RISCVCallingConv.td
@@ -93,3 +93,7 @@ def CSR_XLEN_F32_V_Interrupt_RVE: CalleeSavedRegs<(sub CSR_XLEN_F32_V_Interrupt,
 // Same as CSR_XLEN_F64_V_Interrupt, but excluding X16-X31.
 def CSR_XLEN_F64_V_Interrupt_RVE: CalleeSavedRegs<(sub CSR_XLEN_F64_V_Interrupt,
                                                    (sequence "X%u", 16, 31))>;
+
+def CSR_RT_MostRegs : CalleeSavedRegs<(sub CSR_Interrupt, X6, X7, X28)>;
+def CSR_RT_MostRegs_RVE : CalleeSavedRegs<(sub CSR_RT_MostRegs,
+                                               (sequence "X%u", 16, 31))>;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 5fb16f5ac6b9e..07a03792c2b23 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -22224,6 +22224,7 @@ SDValue RISCVTargetLowering::LowerFormalArguments(
   case CallingConv::C:
   case CallingConv::Fast:
   case CallingConv::SPIR_KERNEL:
+  case CallingConv::PreserveMost:
   case CallingConv::GRAAL:
   case CallingConv::RISCV_VectorCall:
 #define CC_VLS_CASE(ABI_VLEN) case CallingConv::RISCV_VLSCall_##ABI_VLEN:
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index 540412366026b..214536d7f3a74 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -68,6 +68,9 @@ RISCVRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
   auto &Subtarget = MF->getSubtarget<RISCVSubtarget>();
   if (MF->getFunction().getCallingConv() == CallingConv::GHC)
     return CSR_NoRegs_SaveList;
+  if (MF->getFunction().getCallingConv() == CallingConv::PreserveMost)
+    return Subtarget.hasStdExtE() ? CSR_RT_MostRegs_RVE_SaveList
+                                  : CSR_RT_MostRegs_SaveList;
   if (MF->getFunction().hasFnAttribute("interrupt")) {
     if (Subtarget.hasVInstructions()) {
       if (Subtarget.hasStdExtD())
@@ -811,7 +814,13 @@ RISCVRegisterInfo::getCallPreservedMask(const MachineFunction & MF,
 
   if (CC == CallingConv::GHC)
     return CSR_NoRegs_RegMask;
-  switch (Subtarget.getTargetABI()) {
+  RISCVABI::ABI ABI = Subtarget.getTargetABI();
+  if (CC == CallingConv::PreserveMost) {
+    if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
+      return CSR_RT_MostRegs_RVE_RegMask;
+    return CSR_RT_MostRegs_RegMask;
+  }
+  switch (ABI) {
   default:
     llvm_unreachable("Unrecognized ABI");
   case RISCVABI::ABI_ILP32E:
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-preserve-most.ll b/llvm/test/CodeGen/RISCV/calling-conv-preserve-most.ll
new file mode 100644
index 0000000000000..08340bbe0013a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/calling-conv-preserve-most.ll
@@ -0,0 +1,449 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 < %s | FileCheck %s -check-prefix=RV32I
+; RUN: llc -mtriple=riscv64 < %s | FileCheck %s -check-prefix=RV64I
+; RUN: llc -mtriple=riscv32 -mattr=+e -target-abi ilp32e < %s | FileCheck %s -check-prefix=RV32E
+; RUN: llc -mtriple=riscv64 -mattr=+e -target-abi lp64e < %s | FileCheck %s -check-prefix=RV64E
+
+; Check the PreserveMost calling convention works.
+
+declare void @standard_cc_func()
+declare preserve_mostcc void @preserve_mostcc_func()
+
+define preserve_mostcc void @preserve_mostcc1() nounwind {
+; RV32I-LABEL: preserve_mostcc1:
+; RV32I:       # %bb.0: # %entry
+; RV32I-NEXT:    addi sp, sp, -64
+; RV32I-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw t0, 56(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw a0, 52(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw a1, 48(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw a2, 44(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw a3, 40(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw a4, 36(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw a5, 32(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw a6, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw a7, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw t4, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw t5, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw t6, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call standard_cc_func
+; RV32I-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw t0, 56(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw a0, 52(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw a1, 48(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw a2, 44(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw a3, 40(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw a4, 36(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw a5, 32(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw a6, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw a7, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw t4, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw t5, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw t6, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 64
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: preserve_mostcc1:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    addi sp, sp, -112
+; RV64I-NEXT:    sd ra, 104(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd t0, 96(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd a0, 88(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd a1, 80(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd a2, 72(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd a3, 64(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd a4, 56(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd a5, 48(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd a6, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd a7, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd t4, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd t5, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd t6, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call standard_cc_func
+; RV64I-NEXT:    ld ra, 104(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld t0, 96(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld a0, 88(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld a1, 80(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld a2, 72(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld a3, 64(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld a4, 56(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld a5, 48(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld a6, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld a7, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld t4, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld t5, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld t6, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 112
+; RV64I-NEXT:    ret
+;
+; RV32E-LABEL: preserve_mostcc1:
+; RV32E:       # %bb.0: # %entry
+; RV32E-NEXT:    addi sp, sp, -32
+; RV32E-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw t0, 24(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw a0, 20(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw a1, 16(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw a2, 12(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw a3, 8(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw a4, 4(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw a5, 0(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    call standard_cc_func
+; RV32E-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw t0, 24(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw a0, 20(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw a1, 16(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw a2, 12(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw a3, 8(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw a4, 4(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw a5, 0(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    addi sp, sp, 32
+; RV32E-NEXT:    ret
+;
+; RV64E-LABEL: preserve_mostcc1:
+; RV64E:       # %bb.0: # %entry
+; RV64E-NEXT:    addi sp, sp, -64
+; RV64E-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd t0, 48(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd a0, 40(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd a1, 32(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd a2, 24(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd a3, 16(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd a4, 8(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd a5, 0(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    call standard_cc_func
+; RV64E-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld t0, 48(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld a0, 40(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld a1, 32(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld a2, 24(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld a3, 16(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld a4, 8(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld a5, 0(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    addi sp, sp, 64
+; RV64E-NEXT:    ret
+entry:
+  call void @standard_cc_func()
+  ret void
+}
+
+define preserve_mostcc void @preserve_mostcc2() nounwind {
+; RV32I-LABEL: preserve_mostcc2:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    call preserve_mostcc_func
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: preserve_mostcc2:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    call preserve_mostcc_func
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV32E-LABEL: preserve_mostcc2:
+; RV32E:       # %bb.0:
+; RV32E-NEXT:    addi sp, sp, -4
+; RV32E-NEXT:    sw ra, 0(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    call preserve_mostcc_func
+; RV32E-NEXT:    lw ra, 0(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    addi sp, sp, 4
+; RV32E-NEXT:    ret
+;
+; RV64E-LABEL: preserve_mostcc2:
+; RV64E:       # %bb.0:
+; RV64E-NEXT:    addi sp, sp, -8
+; RV64E-NEXT:    sd ra, 0(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    call preserve_mostcc_func
+; RV64E-NEXT:    ld ra, 0(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    addi sp, sp, 8
+; RV64E-NEXT:    ret
+  call preserve_mostcc void @preserve_mostcc_func()
+  ret void
+}
+
+; X6, X7 and X28 will be saved to registers.
+define void @preserve_mostcc3() nounwind {
+; RV32I-LABEL: preserve_mostcc3:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -16
+; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    mv a0, t1
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    mv a1, t2
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    mv a2, t3
+; RV32I-NEXT:    call preserve_mostcc_func
+; RV32I-NEXT:    mv t1, a0
+; RV32I-NEXT:    mv t2, a1
+; RV32I-NEXT:    mv t3, a2
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 16
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: preserve_mostcc3:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -32
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    mv a0, t1
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    mv a1, t2
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    mv a2, t3
+; RV64I-NEXT:    call preserve_mostcc_func
+; RV64I-NEXT:    mv t1, a0
+; RV64I-NEXT:    mv t2, a1
+; RV64I-NEXT:    mv t3, a2
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    ret
+;
+; RV32E-LABEL: preserve_mostcc3:
+; RV32E:       # %bb.0:
+; RV32E-NEXT:    addi sp, sp, -12
+; RV32E-NEXT:    sw ra, 8(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw s0, 4(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw s1, 0(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    #APP
+; RV32E-NEXT:    #NO_APP
+; RV32E-NEXT:    mv a0, t1
+; RV32E-NEXT:    #APP
+; RV32E-NEXT:    #NO_APP
+; RV32E-NEXT:    mv a1, t2
+; RV32E-NEXT:    #APP
+; RV32E-NEXT:    #NO_APP
+; RV32E-NEXT:    #APP
+; RV32E-NEXT:    #NO_APP
+; RV32E-NEXT:    #APP
+; RV32E-NEXT:    #NO_APP
+; RV32E-NEXT:    mv a2, t3
+; RV32E-NEXT:    call preserve_mostcc_func
+; RV32E-NEXT:    mv t1, a0
+; RV32E-NEXT:    mv t2, a1
+; RV32E-NEXT:    mv t3, a2
+; RV32E-NEXT:    #APP
+; RV32E-NEXT:    #NO_APP
+; RV32E-NEXT:    lw ra, 8(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw s0, 4(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw s1, 0(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    addi sp, sp, 12
+; RV32E-NEXT:    ret
+;
+; RV64E-LABEL: preserve_mostcc3:
+; RV64E:       # %bb.0:
+; RV64E-NEXT:    addi sp, sp, -24
+; RV64E-NEXT:    sd ra, 16(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd s0, 8(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd s1, 0(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    #APP
+; RV64E-NEXT:    #NO_APP
+; RV64E-NEXT:    mv a0, t1
+; RV64E-NEXT:    #APP
+; RV64E-NEXT:    #NO_APP
+; RV64E-NEXT:    mv a1, t2
+; RV64E-NEXT:    #APP
+; RV64E-NEXT:    #NO_APP
+; RV64E-NEXT:    #APP
+; RV64E-NEXT:    #NO_APP
+; RV64E-NEXT:    #APP
+; RV64E-NEXT:    #NO_APP
+; RV64E-NEXT:    mv a2, t3
+; RV64E-NEXT:    call preserve_mostcc_func
+; RV64E-NEXT:    mv t1, a0
+; RV64E-NEXT:    mv t2, a1
+; RV64E-NEXT:    mv t3, a2
+; RV64E-NEXT:    #APP
+; RV64E-NEXT:    #NO_APP
+; RV64E-NEXT:    ld ra, 16(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld s0, 8(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld s1, 0(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    addi sp, sp, 24
+; RV64E-NEXT:    ret
+  %1 = call i32 asm sideeffect "", "={x6}"() nounwind
+  %2 = call i32 asm sideeffect "", "={x7}"() nounwind
+  %3 = call i32 asm sideeffect "", "={x8}"() nounwind
+  %4 = call i32 asm sideeffect "", "={x9}"() nounwind
+  %5 = call i32 asm sideeffect "", "={x28}"() nounwind
+  call preserve_mostcc void @preserve_mostcc_func()
+  call void asm sideeffect "", "{x6},{x7},{x8},{x9},{x28}"(i32 %1, i32 %2, i32 %3, i32 %4, i32 %5)
+  ret void
+}
+
+; X6, X7 and X28 will be saved to the stack.
+define void @preserve_mostcc4() nounwind {
+; RV32I-LABEL: preserve_mostcc4:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    addi sp, sp, -32
+; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    mv s2, t1
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    mv s3, t2
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    mv s4, t3
+; RV32I-NEXT:    call standard_cc_func
+; RV32I-NEXT:    mv t1, s2
+; RV32I-NEXT:    mv t2, s3
+; RV32I-NEXT:    mv t3, s4
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT:    addi sp, sp, 32
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: preserve_mostcc4:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -48
+; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s4, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    mv s2, t1
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    mv s3, t2
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    mv s4, t3
+; RV64I-NEXT:    call standard_cc_func
+; RV64I-NEXT:    mv t1, s2
+; RV64I-NEXT:    mv t2, s3
+; RV64I-NEXT:    mv t3, s4
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s4, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 48
+; RV64I-NEXT:    ret
+;
+; RV32E-LABEL: preserve_mostcc4:
+; RV32E:       # %bb.0:
+; RV32E-NEXT:    addi sp, sp, -24
+; RV32E-NEXT:    sw ra, 20(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw s0, 16(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    sw s1, 12(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    #APP
+; RV32E-NEXT:    #NO_APP
+; RV32E-NEXT:    sw t1, 8(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    #APP
+; RV32E-NEXT:    #NO_APP
+; RV32E-NEXT:    sw t2, 4(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    #APP
+; RV32E-NEXT:    #NO_APP
+; RV32E-NEXT:    #APP
+; RV32E-NEXT:    #NO_APP
+; RV32E-NEXT:    #APP
+; RV32E-NEXT:    #NO_APP
+; RV32E-NEXT:    sw t3, 0(sp) # 4-byte Folded Spill
+; RV32E-NEXT:    call standard_cc_func
+; RV32E-NEXT:    lw t1, 8(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw t2, 4(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw t3, 0(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    #APP
+; RV32E-NEXT:    #NO_APP
+; RV32E-NEXT:    lw ra, 20(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw s0, 16(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    lw s1, 12(sp) # 4-byte Folded Reload
+; RV32E-NEXT:    addi sp, sp, 24
+; RV32E-NEXT:    ret
+;
+; RV64E-LABEL: preserve_mostcc4:
+; RV64E:       # %bb.0:
+; RV64E-NEXT:    addi sp, sp, -48
+; RV64E-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    #APP
+; RV64E-NEXT:    #NO_APP
+; RV64E-NEXT:    sd t1, 16(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    #APP
+; RV64E-NEXT:    #NO_APP
+; RV64E-NEXT:    sd t2, 8(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    #APP
+; RV64E-NEXT:    #NO_APP
+; RV64E-NEXT:    #APP
+; RV64E-NEXT:    #NO_APP
+; RV64E-NEXT:    #APP
+; RV64E-NEXT:    #NO_APP
+; RV64E-NEXT:    sd t3, 0(sp) # 8-byte Folded Spill
+; RV64E-NEXT:    call standard_cc_func
+; RV64E-NEXT:    ld t1, 16(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld t2, 8(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld t3, 0(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    #APP
+; RV64E-NEXT:    #NO_APP
+; RV64E-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64E-NEXT:    addi sp, sp, 48
+; RV64E-NEXT:    ret
+  %1 = call...
[truncated]

@nikic
Copy link
Contributor

nikic commented Sep 13, 2025

Duplicate of #158402.

@nikic nikic closed this Sep 13, 2025
@nikic nikic moved this from Needs Triage to Won't Merge in LLVM Release Status Sep 13, 2025
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
Status: Won't Merge
Development

Successfully merging this pull request may close these issues.

3 participants