diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp index 9deda5ca564b3..34fe79ed9af82 100644 --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp @@ -8562,13 +8562,14 @@ bool LoongArchTargetLowering::isEligibleForTailCallOptimization( return false; // Do not tail call opt if caller's and callee's byval arguments do not match. - for (unsigned i = 0, j = 0; i < Outs.size(); i++) { + for (unsigned i = 0, j = 0; i < Outs.size(); ++i) { if (!Outs[i].Flags.isByVal()) continue; - if (j++ >= LoongArchFI->getIncomingByValArgsSize()) + if (j >= LoongArchFI->getIncomingByValArgsSize()) return false; - if (LoongArchFI->getIncomingByValArgs(i).getValueType() != Outs[i].ArgVT) + if (LoongArchFI->getIncomingByValArgs(j).getValueType() != Outs[i].ArgVT) return false; + ++j; } // The callee has to preserve all registers the caller needs to preserve. diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 7c1eacbce3701..5c3807213fd97 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -24752,10 +24752,11 @@ bool RISCVTargetLowering::isEligibleForTailCallOptimization( for (unsigned i = 0, j = 0, e = Outs.size(); i != e; ++i) { if (!Outs[i].Flags.isByVal()) continue; - if (j++ >= RVFI->getIncomingByValArgsSize()) + if (j >= RVFI->getIncomingByValArgsSize()) return false; - if (RVFI->getIncomingByValArgs(i).getValueType() != Outs[i].ArgVT) + if (RVFI->getIncomingByValArgs(j).getValueType() != Outs[i].ArgVT) return false; + ++j; } // The callee has to preserve all registers the caller needs to preserve. diff --git a/llvm/test/CodeGen/LoongArch/issue187832.ll b/llvm/test/CodeGen/LoongArch/issue187832.ll new file mode 100644 index 0000000000000..b483a7640e171 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/issue187832.ll @@ -0,0 +1,48 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=loongarch32 %s -o - | FileCheck %s --check-prefix=LA32 +; RUN: llc -mtriple=loongarch64 %s -o - | FileCheck %s --check-prefix=LA64 + +%Box = type { i32, i32, i32, i8, [3 x i8], i32, i8, [1 x i8], i16, i16, i8, [5 x i8], { i64, ptr }, { i64, ptr }, { i64, ptr } } + +define void @test(ptr byval(%Box) %0) nounwind { +; LA32-LABEL: test: +; LA32: # %bb.0: +; LA32-NEXT: addi.w $sp, $sp, -112 +; LA32-NEXT: st.w $ra, $sp, 108 # 4-byte Folded Spill +; LA32-NEXT: st.w $fp, $sp, 104 # 4-byte Folded Spill +; LA32-NEXT: addi.w $fp, $sp, 24 +; LA32-NEXT: ori $a2, $zero, 80 +; LA32-NEXT: move $a0, $fp +; LA32-NEXT: move $a1, $zero +; LA32-NEXT: bl memcpy +; LA32-NEXT: st.w $zero, $sp, 8 +; LA32-NEXT: st.w $zero, $sp, 4 +; LA32-NEXT: st.w $zero, $sp, 0 +; LA32-NEXT: move $a0, $zero +; LA32-NEXT: move $a1, $zero +; LA32-NEXT: move $a2, $zero +; LA32-NEXT: move $a3, $fp +; LA32-NEXT: move $a4, $zero +; LA32-NEXT: move $a5, $zero +; LA32-NEXT: move $a6, $zero +; LA32-NEXT: move $a7, $zero +; LA32-NEXT: jirl $ra, $zero, 0 +; LA32-NEXT: ld.w $fp, $sp, 104 # 4-byte Folded Reload +; LA32-NEXT: ld.w $ra, $sp, 108 # 4-byte Folded Reload +; LA32-NEXT: addi.w $sp, $sp, 112 +; LA32-NEXT: ret +; +; LA64-LABEL: test: +; LA64: # %bb.0: +; LA64-NEXT: movgr2fr.d $fa0, $zero +; LA64-NEXT: move $a0, $zero +; LA64-NEXT: move $a1, $zero +; LA64-NEXT: move $a2, $zero +; LA64-NEXT: move $a3, $zero +; LA64-NEXT: move $a4, $zero +; LA64-NEXT: move $a5, $zero +; LA64-NEXT: move $a6, $zero +; LA64-NEXT: jr $a0 + tail call void null(ptr null, double 0.000000e+00, ptr byval(%Box) null, { i64, ptr } zeroinitializer, i32 0, i64 0, i1 false) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/pr187832.ll b/llvm/test/CodeGen/RISCV/pr187832.ll new file mode 100644 index 0000000000000..dd4c3c6e3487e --- /dev/null +++ b/llvm/test/CodeGen/RISCV/pr187832.ll @@ -0,0 +1,48 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=riscv32 %s -o - | FileCheck %s --check-prefix=RV32 +; RUN: llc -mtriple=riscv64 %s -o - | FileCheck %s --check-prefix=RV64 + +%Box = type { i32, i32, i32, i8, [3 x i8], i32, i8, [1 x i8], i16, i16, i8, [5 x i8], { i64, ptr }, { i64, ptr }, { i64, ptr } } + +define void @test(ptr byval(%Box) %0) nounwind { +; RV32-LABEL: test: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -112 +; RV32-NEXT: sw ra, 108(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 104(sp) # 4-byte Folded Spill +; RV32-NEXT: addi a0, sp, 24 +; RV32-NEXT: li a2, 80 +; RV32-NEXT: li s0, 0 +; RV32-NEXT: li a1, 0 +; RV32-NEXT: call memcpy +; RV32-NEXT: addi a3, sp, 24 +; RV32-NEXT: sw zero, 0(sp) +; RV32-NEXT: sw zero, 4(sp) +; RV32-NEXT: sw zero, 8(sp) +; RV32-NEXT: li a0, 0 +; RV32-NEXT: li a1, 0 +; RV32-NEXT: li a2, 0 +; RV32-NEXT: li a4, 0 +; RV32-NEXT: li a5, 0 +; RV32-NEXT: li a6, 0 +; RV32-NEXT: li a7, 0 +; RV32-NEXT: jalr s0 +; RV32-NEXT: lw ra, 108(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 104(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 112 +; RV32-NEXT: ret +; +; RV64-LABEL: test: +; RV64: # %bb.0: +; RV64-NEXT: li a0, 0 +; RV64-NEXT: li a1, 0 +; RV64-NEXT: li a2, 0 +; RV64-NEXT: li a3, 0 +; RV64-NEXT: li a4, 0 +; RV64-NEXT: li a5, 0 +; RV64-NEXT: li a6, 0 +; RV64-NEXT: li a7, 0 +; RV64-NEXT: jr a0 + tail call void null(ptr null, double 0.000000e+00, ptr byval(%Box) null, { i64, ptr } zeroinitializer, i32 0, i64 0, i1 false) + ret void +}