Thanks to visit codestin.com
Credit goes to llvm.org

LLVM 22.0.0git
RISCVInstructionSelector.cpp
Go to the documentation of this file.
1//===-- RISCVInstructionSelector.cpp -----------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// RISC-V.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
16#include "RISCVSubtarget.h"
17#include "RISCVTargetMachine.h"
25#include "llvm/IR/IntrinsicsRISCV.h"
26#include "llvm/Support/Debug.h"
27
28#define DEBUG_TYPE "riscv-isel"
29
30using namespace llvm;
31using namespace MIPatternMatch;
32
33#define GET_GLOBALISEL_PREDICATE_BITSET
34#include "RISCVGenGlobalISel.inc"
35#undef GET_GLOBALISEL_PREDICATE_BITSET
36
37namespace {
38
39class RISCVInstructionSelector : public InstructionSelector {
40public:
41 RISCVInstructionSelector(const RISCVTargetMachine &TM,
42 const RISCVSubtarget &STI,
43 const RISCVRegisterBankInfo &RBI);
44
45 bool select(MachineInstr &MI) override;
46
47 void setupMF(MachineFunction &MF, GISelValueTracking *VT,
48 CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
49 BlockFrequencyInfo *BFI) override {
50 InstructionSelector::setupMF(MF, VT, CoverageInfo, PSI, BFI);
51 MRI = &MF.getRegInfo();
52 }
53
54 static const char *getName() { return DEBUG_TYPE; }
55
56private:
58 getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB) const;
59
60 static constexpr unsigned MaxRecursionDepth = 6;
61
62 bool hasAllNBitUsers(const MachineInstr &MI, unsigned Bits,
63 const unsigned Depth = 0) const;
64 bool hasAllHUsers(const MachineInstr &MI) const {
65 return hasAllNBitUsers(MI, 16);
66 }
67 bool hasAllWUsers(const MachineInstr &MI) const {
68 return hasAllNBitUsers(MI, 32);
69 }
70
71 bool isRegInGprb(Register Reg) const;
72 bool isRegInFprb(Register Reg) const;
73
74 // tblgen-erated 'select' implementation, used as the initial selector for
75 // the patterns that don't require complex C++.
76 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
77
78 // A lowering phase that runs before any selection attempts.
79 // Returns true if the instruction was modified.
80 void preISelLower(MachineInstr &MI, MachineIRBuilder &MIB);
81
82 bool replacePtrWithInt(MachineOperand &Op, MachineIRBuilder &MIB);
83
84 // Custom selection methods
85 bool selectCopy(MachineInstr &MI) const;
86 bool selectImplicitDef(MachineInstr &MI, MachineIRBuilder &MIB) const;
87 bool materializeImm(Register Reg, int64_t Imm, MachineIRBuilder &MIB) const;
88 bool selectAddr(MachineInstr &MI, MachineIRBuilder &MIB, bool IsLocal = true,
89 bool IsExternWeak = false) const;
90 bool selectSelect(MachineInstr &MI, MachineIRBuilder &MIB) const;
91 bool selectFPCompare(MachineInstr &MI, MachineIRBuilder &MIB) const;
92 void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID,
93 MachineIRBuilder &MIB) const;
95
96 ComplexRendererFns selectShiftMask(MachineOperand &Root,
97 unsigned ShiftWidth) const;
98 ComplexRendererFns selectShiftMaskXLen(MachineOperand &Root) const {
99 return selectShiftMask(Root, STI.getXLen());
100 }
101 ComplexRendererFns selectShiftMask32(MachineOperand &Root) const {
102 return selectShiftMask(Root, 32);
103 }
104 ComplexRendererFns selectAddrRegImm(MachineOperand &Root) const;
105
106 ComplexRendererFns selectSExtBits(MachineOperand &Root, unsigned Bits) const;
107 template <unsigned Bits>
108 ComplexRendererFns selectSExtBits(MachineOperand &Root) const {
109 return selectSExtBits(Root, Bits);
110 }
111
112 ComplexRendererFns selectZExtBits(MachineOperand &Root, unsigned Bits) const;
113 template <unsigned Bits>
114 ComplexRendererFns selectZExtBits(MachineOperand &Root) const {
115 return selectZExtBits(Root, Bits);
116 }
117
118 ComplexRendererFns selectSHXADDOp(MachineOperand &Root, unsigned ShAmt) const;
119 template <unsigned ShAmt>
120 ComplexRendererFns selectSHXADDOp(MachineOperand &Root) const {
121 return selectSHXADDOp(Root, ShAmt);
122 }
123
124 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root,
125 unsigned ShAmt) const;
126 template <unsigned ShAmt>
127 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root) const {
128 return selectSHXADD_UWOp(Root, ShAmt);
129 }
130
131 ComplexRendererFns renderVLOp(MachineOperand &Root) const;
132
133 // Custom renderers for tablegen
134 void renderNegImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
135 int OpIdx) const;
136 void renderImmSubFromXLen(MachineInstrBuilder &MIB, const MachineInstr &MI,
137 int OpIdx) const;
138 void renderImmSubFrom32(MachineInstrBuilder &MIB, const MachineInstr &MI,
139 int OpIdx) const;
140 void renderImmPlus1(MachineInstrBuilder &MIB, const MachineInstr &MI,
141 int OpIdx) const;
142 void renderFrameIndex(MachineInstrBuilder &MIB, const MachineInstr &MI,
143 int OpIdx) const;
144
145 void renderTrailingZeros(MachineInstrBuilder &MIB, const MachineInstr &MI,
146 int OpIdx) const;
147 void renderXLenSubTrailingOnes(MachineInstrBuilder &MIB,
148 const MachineInstr &MI, int OpIdx) const;
149
150 void renderAddiPairImmLarge(MachineInstrBuilder &MIB, const MachineInstr &MI,
151 int OpIdx) const;
152 void renderAddiPairImmSmall(MachineInstrBuilder &MIB, const MachineInstr &MI,
153 int OpIdx) const;
154
155 const RISCVSubtarget &STI;
156 const RISCVInstrInfo &TII;
157 const RISCVRegisterInfo &TRI;
158 const RISCVRegisterBankInfo &RBI;
159 const RISCVTargetMachine &TM;
160
161 MachineRegisterInfo *MRI = nullptr;
162
163 // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel
164 // uses "STI." in the code generated by TableGen. We need to unify the name of
165 // Subtarget variable.
166 const RISCVSubtarget *Subtarget = &STI;
167
168#define GET_GLOBALISEL_PREDICATES_DECL
169#include "RISCVGenGlobalISel.inc"
170#undef GET_GLOBALISEL_PREDICATES_DECL
171
172#define GET_GLOBALISEL_TEMPORARIES_DECL
173#include "RISCVGenGlobalISel.inc"
174#undef GET_GLOBALISEL_TEMPORARIES_DECL
175};
176
177} // end anonymous namespace
178
179#define GET_GLOBALISEL_IMPL
180#include "RISCVGenGlobalISel.inc"
181#undef GET_GLOBALISEL_IMPL
182
183RISCVInstructionSelector::RISCVInstructionSelector(
184 const RISCVTargetMachine &TM, const RISCVSubtarget &STI,
185 const RISCVRegisterBankInfo &RBI)
186 : STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI),
187 TM(TM),
188
190#include "RISCVGenGlobalISel.inc"
193#include "RISCVGenGlobalISel.inc"
195{
196}
197
198// Mimics optimizations in ISel and RISCVOptWInst Pass
199bool RISCVInstructionSelector::hasAllNBitUsers(const MachineInstr &MI,
200 unsigned Bits,
201 const unsigned Depth) const {
202
203 assert((MI.getOpcode() == TargetOpcode::G_ADD ||
204 MI.getOpcode() == TargetOpcode::G_SUB ||
205 MI.getOpcode() == TargetOpcode::G_MUL ||
206 MI.getOpcode() == TargetOpcode::G_SHL ||
207 MI.getOpcode() == TargetOpcode::G_LSHR ||
208 MI.getOpcode() == TargetOpcode::G_AND ||
209 MI.getOpcode() == TargetOpcode::G_OR ||
210 MI.getOpcode() == TargetOpcode::G_XOR ||
211 MI.getOpcode() == TargetOpcode::G_SEXT_INREG || Depth != 0) &&
212 "Unexpected opcode");
213
214 if (Depth >= RISCVInstructionSelector::MaxRecursionDepth)
215 return false;
216
217 auto DestReg = MI.getOperand(0).getReg();
218 for (auto &UserOp : MRI->use_nodbg_operands(DestReg)) {
219 assert(UserOp.getParent() && "UserOp must have a parent");
220 const MachineInstr &UserMI = *UserOp.getParent();
221 unsigned OpIdx = UserOp.getOperandNo();
222
223 switch (UserMI.getOpcode()) {
224 default:
225 return false;
226 case RISCV::ADDW:
227 case RISCV::ADDIW:
228 case RISCV::SUBW:
229 case RISCV::FCVT_D_W:
230 case RISCV::FCVT_S_W:
231 if (Bits >= 32)
232 break;
233 return false;
234 case RISCV::SLL:
235 case RISCV::SRA:
236 case RISCV::SRL:
237 // Shift amount operands only use log2(Xlen) bits.
238 if (OpIdx == 2 && Bits >= Log2_32(Subtarget->getXLen()))
239 break;
240 return false;
241 case RISCV::SLLI:
242 // SLLI only uses the lower (XLen - ShAmt) bits.
243 if (Bits >= Subtarget->getXLen() - UserMI.getOperand(2).getImm())
244 break;
245 return false;
246 case RISCV::ANDI:
247 if (Bits >= (unsigned)llvm::bit_width<uint64_t>(
248 (uint64_t)UserMI.getOperand(2).getImm()))
249 break;
250 goto RecCheck;
251 case RISCV::AND:
252 case RISCV::OR:
253 case RISCV::XOR:
254 RecCheck:
255 if (hasAllNBitUsers(UserMI, Bits, Depth + 1))
256 break;
257 return false;
258 case RISCV::SRLI: {
259 unsigned ShAmt = UserMI.getOperand(2).getImm();
260 // If we are shifting right by less than Bits, and users don't demand any
261 // bits that were shifted into [Bits-1:0], then we can consider this as an
262 // N-Bit user.
263 if (Bits > ShAmt && hasAllNBitUsers(UserMI, Bits - ShAmt, Depth + 1))
264 break;
265 return false;
266 }
267 }
268 }
269
270 return true;
271}
272
273InstructionSelector::ComplexRendererFns
274RISCVInstructionSelector::selectShiftMask(MachineOperand &Root,
275 unsigned ShiftWidth) const {
276 if (!Root.isReg())
277 return std::nullopt;
278
279 using namespace llvm::MIPatternMatch;
280
281 Register ShAmtReg = Root.getReg();
282 // Peek through zext.
283 Register ZExtSrcReg;
284 if (mi_match(ShAmtReg, *MRI, m_GZExt(m_Reg(ZExtSrcReg))))
285 ShAmtReg = ZExtSrcReg;
286
287 APInt AndMask;
288 Register AndSrcReg;
289 // Try to combine the following pattern (applicable to other shift
290 // instructions as well as 32-bit ones):
291 //
292 // %4:gprb(s64) = G_AND %3, %2
293 // %5:gprb(s64) = G_LSHR %1, %4(s64)
294 //
295 // According to RISC-V's ISA manual, SLL, SRL, and SRA ignore other bits than
296 // the lowest log2(XLEN) bits of register rs2. As for the above pattern, if
297 // the lowest log2(XLEN) bits of register rd and rs2 of G_AND are the same,
298 // then it can be eliminated. Given register rs1 or rs2 holding a constant
299 // (the and mask), there are two cases G_AND can be erased:
300 //
301 // 1. the lowest log2(XLEN) bits of the and mask are all set
302 // 2. the bits of the register being masked are already unset (zero set)
303 if (mi_match(ShAmtReg, *MRI, m_GAnd(m_Reg(AndSrcReg), m_ICst(AndMask)))) {
304 APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
305 if (ShMask.isSubsetOf(AndMask)) {
306 ShAmtReg = AndSrcReg;
307 } else {
308 // SimplifyDemandedBits may have optimized the mask so try restoring any
309 // bits that are known zero.
310 KnownBits Known = VT->getKnownBits(AndSrcReg);
311 if (ShMask.isSubsetOf(AndMask | Known.Zero))
312 ShAmtReg = AndSrcReg;
313 }
314 }
315
316 APInt Imm;
318 if (mi_match(ShAmtReg, *MRI, m_GAdd(m_Reg(Reg), m_ICst(Imm)))) {
319 if (Imm != 0 && Imm.urem(ShiftWidth) == 0)
320 // If we are shifting by X+N where N == 0 mod Size, then just shift by X
321 // to avoid the ADD.
322 ShAmtReg = Reg;
323 } else if (mi_match(ShAmtReg, *MRI, m_GSub(m_ICst(Imm), m_Reg(Reg)))) {
324 if (Imm != 0 && Imm.urem(ShiftWidth) == 0) {
325 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X
326 // to generate a NEG instead of a SUB of a constant.
327 ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
328 unsigned NegOpc = Subtarget->is64Bit() ? RISCV::SUBW : RISCV::SUB;
329 return {{[=](MachineInstrBuilder &MIB) {
330 MachineIRBuilder(*MIB.getInstr())
331 .buildInstr(NegOpc, {ShAmtReg}, {Register(RISCV::X0), Reg});
332 MIB.addReg(ShAmtReg);
333 }}};
334 }
335 if (Imm.urem(ShiftWidth) == ShiftWidth - 1) {
336 // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
337 // to generate a NOT instead of a SUB of a constant.
338 ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
339 return {{[=](MachineInstrBuilder &MIB) {
340 MachineIRBuilder(*MIB.getInstr())
341 .buildInstr(RISCV::XORI, {ShAmtReg}, {Reg})
342 .addImm(-1);
343 MIB.addReg(ShAmtReg);
344 }}};
345 }
346 }
347
348 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}};
349}
350
351InstructionSelector::ComplexRendererFns
352RISCVInstructionSelector::selectSExtBits(MachineOperand &Root,
353 unsigned Bits) const {
354 if (!Root.isReg())
355 return std::nullopt;
356 Register RootReg = Root.getReg();
357 MachineInstr *RootDef = MRI->getVRegDef(RootReg);
358
359 if (RootDef->getOpcode() == TargetOpcode::G_SEXT_INREG &&
360 RootDef->getOperand(2).getImm() == Bits) {
361 return {
362 {[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); }}};
363 }
364
365 unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
366 if ((Size - VT->computeNumSignBits(RootReg)) < Bits)
367 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
368
369 return std::nullopt;
370}
371
372InstructionSelector::ComplexRendererFns
373RISCVInstructionSelector::selectZExtBits(MachineOperand &Root,
374 unsigned Bits) const {
375 if (!Root.isReg())
376 return std::nullopt;
377 Register RootReg = Root.getReg();
378
379 Register RegX;
380 uint64_t Mask = maskTrailingOnes<uint64_t>(Bits);
381 if (mi_match(RootReg, *MRI, m_GAnd(m_Reg(RegX), m_SpecificICst(Mask)))) {
382 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
383 }
384
385 if (mi_match(RootReg, *MRI, m_GZExt(m_Reg(RegX))) &&
386 MRI->getType(RegX).getScalarSizeInBits() == Bits)
387 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
388
389 unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
390 if (VT->maskedValueIsZero(RootReg, APInt::getBitsSetFrom(Size, Bits)))
391 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
392
393 return std::nullopt;
394}
395
396InstructionSelector::ComplexRendererFns
397RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
398 unsigned ShAmt) const {
399 using namespace llvm::MIPatternMatch;
400
401 if (!Root.isReg())
402 return std::nullopt;
403 Register RootReg = Root.getReg();
404
405 const unsigned XLen = STI.getXLen();
406 APInt Mask, C2;
407 Register RegY;
408 std::optional<bool> LeftShift;
409 // (and (shl y, c2), mask)
410 if (mi_match(RootReg, *MRI,
411 m_GAnd(m_GShl(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
412 LeftShift = true;
413 // (and (lshr y, c2), mask)
414 else if (mi_match(RootReg, *MRI,
415 m_GAnd(m_GLShr(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
416 LeftShift = false;
417
418 if (LeftShift.has_value()) {
419 if (*LeftShift)
421 else
423
424 if (Mask.isShiftedMask()) {
425 unsigned Leading = XLen - Mask.getActiveBits();
426 unsigned Trailing = Mask.countr_zero();
427 // Given (and (shl y, c2), mask) in which mask has no leading zeros and
428 // c3 trailing zeros. We can use an SRLI by c3 - c2 followed by a SHXADD.
429 if (*LeftShift && Leading == 0 && C2.ult(Trailing) && Trailing == ShAmt) {
430 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
431 return {{[=](MachineInstrBuilder &MIB) {
432 MachineIRBuilder(*MIB.getInstr())
433 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
434 .addImm(Trailing - C2.getLimitedValue());
435 MIB.addReg(DstReg);
436 }}};
437 }
438
439 // Given (and (lshr y, c2), mask) in which mask has c2 leading zeros and
440 // c3 trailing zeros. We can use an SRLI by c2 + c3 followed by a SHXADD.
441 if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
442 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
443 return {{[=](MachineInstrBuilder &MIB) {
444 MachineIRBuilder(*MIB.getInstr())
445 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
446 .addImm(Leading + Trailing);
447 MIB.addReg(DstReg);
448 }}};
449 }
450 }
451 }
452
453 LeftShift.reset();
454
455 // (shl (and y, mask), c2)
456 if (mi_match(RootReg, *MRI,
457 m_GShl(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))),
458 m_ICst(C2))))
459 LeftShift = true;
460 // (lshr (and y, mask), c2)
461 else if (mi_match(RootReg, *MRI,
463 m_ICst(C2))))
464 LeftShift = false;
465
466 if (LeftShift.has_value() && Mask.isShiftedMask()) {
467 unsigned Leading = XLen - Mask.getActiveBits();
468 unsigned Trailing = Mask.countr_zero();
469
470 // Given (shl (and y, mask), c2) in which mask has 32 leading zeros and
471 // c3 trailing zeros. If c1 + c3 == ShAmt, we can emit SRLIW + SHXADD.
472 bool Cond = *LeftShift && Leading == 32 && Trailing > 0 &&
473 (Trailing + C2.getLimitedValue()) == ShAmt;
474 if (!Cond)
475 // Given (lshr (and y, mask), c2) in which mask has 32 leading zeros and
476 // c3 trailing zeros. If c3 - c1 == ShAmt, we can emit SRLIW + SHXADD.
477 Cond = !*LeftShift && Leading == 32 && C2.ult(Trailing) &&
478 (Trailing - C2.getLimitedValue()) == ShAmt;
479
480 if (Cond) {
481 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
482 return {{[=](MachineInstrBuilder &MIB) {
483 MachineIRBuilder(*MIB.getInstr())
484 .buildInstr(RISCV::SRLIW, {DstReg}, {RegY})
485 .addImm(Trailing);
486 MIB.addReg(DstReg);
487 }}};
488 }
489 }
490
491 return std::nullopt;
492}
493
494InstructionSelector::ComplexRendererFns
495RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
496 unsigned ShAmt) const {
497 using namespace llvm::MIPatternMatch;
498
499 if (!Root.isReg())
500 return std::nullopt;
501 Register RootReg = Root.getReg();
502
503 // Given (and (shl x, c2), mask) in which mask is a shifted mask with
504 // 32 - ShAmt leading zeros and c2 trailing zeros. We can use SLLI by
505 // c2 - ShAmt followed by SHXADD_UW with ShAmt for x amount.
506 APInt Mask, C2;
507 Register RegX;
508 if (mi_match(
509 RootReg, *MRI,
511 m_ICst(Mask))))) {
513
514 if (Mask.isShiftedMask()) {
515 unsigned Leading = Mask.countl_zero();
516 unsigned Trailing = Mask.countr_zero();
517 if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
518 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
519 return {{[=](MachineInstrBuilder &MIB) {
520 MachineIRBuilder(*MIB.getInstr())
521 .buildInstr(RISCV::SLLI, {DstReg}, {RegX})
522 .addImm(C2.getLimitedValue() - ShAmt);
523 MIB.addReg(DstReg);
524 }}};
525 }
526 }
527 }
528
529 return std::nullopt;
530}
531
532InstructionSelector::ComplexRendererFns
533RISCVInstructionSelector::renderVLOp(MachineOperand &Root) const {
534 assert(Root.isReg() && "Expected operand to be a Register");
535 MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
536
537 if (RootDef->getOpcode() == TargetOpcode::G_CONSTANT) {
538 auto C = RootDef->getOperand(1).getCImm();
539 if (C->getValue().isAllOnes())
540 // If the operand is a G_CONSTANT with value of all ones it is larger than
541 // VLMAX. We convert it to an immediate with value VLMaxSentinel. This is
542 // recognized specially by the vsetvli insertion pass.
543 return {{[=](MachineInstrBuilder &MIB) {
544 MIB.addImm(RISCV::VLMaxSentinel);
545 }}};
546
547 if (isUInt<5>(C->getZExtValue())) {
548 uint64_t ZExtC = C->getZExtValue();
549 return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(ZExtC); }}};
550 }
551 }
552 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); }}};
553}
554
555InstructionSelector::ComplexRendererFns
556RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root) const {
557 if (!Root.isReg())
558 return std::nullopt;
559
560 MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
561 if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
562 return {{
563 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
564 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
565 }};
566 }
567
568 if (isBaseWithConstantOffset(Root, *MRI)) {
569 MachineOperand &LHS = RootDef->getOperand(1);
570 MachineOperand &RHS = RootDef->getOperand(2);
571 MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
572 MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
573
574 int64_t RHSC = RHSDef->getOperand(1).getCImm()->getSExtValue();
575 if (isInt<12>(RHSC)) {
576 if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
577 return {{
578 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
579 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
580 }};
581
582 return {{[=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
583 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}};
584 }
585 }
586
587 // TODO: Need to get the immediate from a G_PTR_ADD. Should this be done in
588 // the combiner?
589 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
590 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}};
591}
592
593/// Returns the RISCVCC::CondCode that corresponds to the CmpInst::Predicate CC.
594/// CC Must be an ICMP Predicate.
595static RISCVCC::CondCode getRISCVCCFromICmp(CmpInst::Predicate CC) {
596 switch (CC) {
597 default:
598 llvm_unreachable("Expected ICMP CmpInst::Predicate.");
599 case CmpInst::Predicate::ICMP_EQ:
600 return RISCVCC::COND_EQ;
601 case CmpInst::Predicate::ICMP_NE:
602 return RISCVCC::COND_NE;
603 case CmpInst::Predicate::ICMP_ULT:
604 return RISCVCC::COND_LTU;
605 case CmpInst::Predicate::ICMP_SLT:
606 return RISCVCC::COND_LT;
607 case CmpInst::Predicate::ICMP_UGE:
608 return RISCVCC::COND_GEU;
609 case CmpInst::Predicate::ICMP_SGE:
610 return RISCVCC::COND_GE;
611 }
612}
613
617 // Try to fold an ICmp. If that fails, use a NE compare with X0.
619 if (!mi_match(CondReg, MRI, m_GICmp(m_Pred(Pred), m_Reg(LHS), m_Reg(RHS)))) {
620 LHS = CondReg;
621 RHS = RISCV::X0;
622 CC = RISCVCC::COND_NE;
623 return;
624 }
625
626 // We found an ICmp, do some canonicalization.
627
628 // Adjust comparisons to use comparison with 0 if possible.
630 switch (Pred) {
632 // Convert X > -1 to X >= 0
633 if (*Constant == -1) {
634 CC = RISCVCC::COND_GE;
635 RHS = RISCV::X0;
636 return;
637 }
638 break;
640 // Convert X < 1 to 0 >= X
641 if (*Constant == 1) {
642 CC = RISCVCC::COND_GE;
643 RHS = LHS;
644 LHS = RISCV::X0;
645 return;
646 }
647 break;
648 default:
649 break;
650 }
651 }
652
653 switch (Pred) {
654 default:
655 llvm_unreachable("Expected ICMP CmpInst::Predicate.");
662 // These CCs are supported directly by RISC-V branches.
663 break;
668 // These CCs are not supported directly by RISC-V branches, but changing the
669 // direction of the CC and swapping LHS and RHS are.
670 Pred = CmpInst::getSwappedPredicate(Pred);
671 std::swap(LHS, RHS);
672 break;
673 }
674
675 CC = getRISCVCCFromICmp(Pred);
676}
677
678bool RISCVInstructionSelector::select(MachineInstr &MI) {
679 MachineIRBuilder MIB(MI);
680
681 preISelLower(MI, MIB);
682 const unsigned Opc = MI.getOpcode();
683
684 if (!MI.isPreISelOpcode() || Opc == TargetOpcode::G_PHI) {
685 if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI) {
686 const Register DefReg = MI.getOperand(0).getReg();
687 const LLT DefTy = MRI->getType(DefReg);
688
689 const RegClassOrRegBank &RegClassOrBank =
690 MRI->getRegClassOrRegBank(DefReg);
691
692 const TargetRegisterClass *DefRC =
694 if (!DefRC) {
695 if (!DefTy.isValid()) {
696 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
697 return false;
698 }
699
700 const RegisterBank &RB = *cast<const RegisterBank *>(RegClassOrBank);
701 DefRC = getRegClassForTypeOnBank(DefTy, RB);
702 if (!DefRC) {
703 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
704 return false;
705 }
706 }
707
708 MI.setDesc(TII.get(TargetOpcode::PHI));
709 return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
710 }
711
712 // Certain non-generic instructions also need some special handling.
713 if (MI.isCopy())
714 return selectCopy(MI);
715
716 return true;
717 }
718
719 if (selectImpl(MI, *CoverageInfo))
720 return true;
721
722 switch (Opc) {
723 case TargetOpcode::G_ANYEXT:
724 case TargetOpcode::G_PTRTOINT:
725 case TargetOpcode::G_INTTOPTR:
726 case TargetOpcode::G_TRUNC:
727 case TargetOpcode::G_FREEZE:
728 return selectCopy(MI);
729 case TargetOpcode::G_CONSTANT: {
730 Register DstReg = MI.getOperand(0).getReg();
731 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
732
733 if (!materializeImm(DstReg, Imm, MIB))
734 return false;
735
736 MI.eraseFromParent();
737 return true;
738 }
739 case TargetOpcode::G_FCONSTANT: {
740 // TODO: Use constant pool for complex constants.
741 Register DstReg = MI.getOperand(0).getReg();
742 const APFloat &FPimm = MI.getOperand(1).getFPImm()->getValueAPF();
743 unsigned Size = MRI->getType(DstReg).getSizeInBits();
744 if (Size == 16 || Size == 32 || (Size == 64 && Subtarget->is64Bit())) {
745 Register GPRReg;
746 if (FPimm.isPosZero()) {
747 GPRReg = RISCV::X0;
748 } else {
749 GPRReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
750 APInt Imm = FPimm.bitcastToAPInt();
751 if (!materializeImm(GPRReg, Imm.getSExtValue(), MIB))
752 return false;
753 }
754
755 unsigned Opcode = Size == 64 ? RISCV::FMV_D_X
756 : Size == 32 ? RISCV::FMV_W_X
757 : RISCV::FMV_H_X;
758 auto FMV = MIB.buildInstr(Opcode, {DstReg}, {GPRReg});
759 if (!FMV.constrainAllUses(TII, TRI, RBI))
760 return false;
761 } else {
762 // s64 on rv32
763 assert(Size == 64 && !Subtarget->is64Bit() &&
764 "Unexpected size or subtarget");
765
766 if (FPimm.isPosZero()) {
767 // Optimize +0.0 to use fcvt.d.w
768 MachineInstrBuilder FCVT =
769 MIB.buildInstr(RISCV::FCVT_D_W, {DstReg}, {Register(RISCV::X0)})
770 .addImm(RISCVFPRndMode::RNE);
771 if (!FCVT.constrainAllUses(TII, TRI, RBI))
772 return false;
773
774 MI.eraseFromParent();
775 return true;
776 }
777
778 // Split into two pieces and build through the stack.
779 Register GPRRegHigh = MRI->createVirtualRegister(&RISCV::GPRRegClass);
780 Register GPRRegLow = MRI->createVirtualRegister(&RISCV::GPRRegClass);
781 APInt Imm = FPimm.bitcastToAPInt();
782 if (!materializeImm(GPRRegHigh, Imm.extractBits(32, 32).getSExtValue(),
783 MIB))
784 return false;
785 if (!materializeImm(GPRRegLow, Imm.trunc(32).getSExtValue(), MIB))
786 return false;
787 MachineInstrBuilder PairF64 = MIB.buildInstr(
788 RISCV::BuildPairF64Pseudo, {DstReg}, {GPRRegLow, GPRRegHigh});
789 if (!PairF64.constrainAllUses(TII, TRI, RBI))
790 return false;
791 }
792
793 MI.eraseFromParent();
794 return true;
795 }
796 case TargetOpcode::G_GLOBAL_VALUE: {
797 auto *GV = MI.getOperand(1).getGlobal();
798 if (GV->isThreadLocal()) {
799 // TODO: implement this case.
800 return false;
801 }
802
803 return selectAddr(MI, MIB, GV->isDSOLocal(), GV->hasExternalWeakLinkage());
804 }
805 case TargetOpcode::G_JUMP_TABLE:
806 case TargetOpcode::G_CONSTANT_POOL:
807 return selectAddr(MI, MIB, MRI);
808 case TargetOpcode::G_BRCOND: {
811 getOperandsForBranch(MI.getOperand(0).getReg(), CC, LHS, RHS, *MRI);
812
813 auto Bcc = MIB.buildInstr(RISCVCC::getBrCond(CC), {}, {LHS, RHS})
814 .addMBB(MI.getOperand(1).getMBB());
815 MI.eraseFromParent();
816 return constrainSelectedInstRegOperands(*Bcc, TII, TRI, RBI);
817 }
818 case TargetOpcode::G_BRINDIRECT:
819 MI.setDesc(TII.get(RISCV::PseudoBRIND));
820 MI.addOperand(MachineOperand::CreateImm(0));
822 case TargetOpcode::G_SELECT:
823 return selectSelect(MI, MIB);
824 case TargetOpcode::G_FCMP:
825 return selectFPCompare(MI, MIB);
826 case TargetOpcode::G_FENCE: {
827 AtomicOrdering FenceOrdering =
828 static_cast<AtomicOrdering>(MI.getOperand(0).getImm());
829 SyncScope::ID FenceSSID =
830 static_cast<SyncScope::ID>(MI.getOperand(1).getImm());
831 emitFence(FenceOrdering, FenceSSID, MIB);
832 MI.eraseFromParent();
833 return true;
834 }
835 case TargetOpcode::G_IMPLICIT_DEF:
836 return selectImplicitDef(MI, MIB);
837 case TargetOpcode::G_UNMERGE_VALUES:
838 return selectUnmergeValues(MI, MIB);
839 default:
840 return false;
841 }
842}
843
844bool RISCVInstructionSelector::selectUnmergeValues(
845 MachineInstr &MI, MachineIRBuilder &MIB) const {
846 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
847
848 if (!Subtarget->hasStdExtZfa())
849 return false;
850
851 // Split F64 Src into two s32 parts
852 if (MI.getNumOperands() != 3)
853 return false;
854 Register Src = MI.getOperand(2).getReg();
855 Register Lo = MI.getOperand(0).getReg();
856 Register Hi = MI.getOperand(1).getReg();
857 if (!isRegInFprb(Src) || !isRegInGprb(Lo) || !isRegInGprb(Hi))
858 return false;
859
860 MachineInstr *ExtractLo = MIB.buildInstr(RISCV::FMV_X_W_FPR64, {Lo}, {Src});
861 if (!constrainSelectedInstRegOperands(*ExtractLo, TII, TRI, RBI))
862 return false;
863
864 MachineInstr *ExtractHi = MIB.buildInstr(RISCV::FMVH_X_D, {Hi}, {Src});
865 if (!constrainSelectedInstRegOperands(*ExtractHi, TII, TRI, RBI))
866 return false;
867
868 MI.eraseFromParent();
869 return true;
870}
871
872bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &Op,
873 MachineIRBuilder &MIB) {
874 Register PtrReg = Op.getReg();
875 assert(MRI->getType(PtrReg).isPointer() && "Operand is not a pointer!");
876
877 const LLT sXLen = LLT::scalar(STI.getXLen());
878 auto PtrToInt = MIB.buildPtrToInt(sXLen, PtrReg);
879 MRI->setRegBank(PtrToInt.getReg(0), RBI.getRegBank(RISCV::GPRBRegBankID));
880 Op.setReg(PtrToInt.getReg(0));
881 return select(*PtrToInt);
882}
883
884void RISCVInstructionSelector::preISelLower(MachineInstr &MI,
885 MachineIRBuilder &MIB) {
886 switch (MI.getOpcode()) {
887 case TargetOpcode::G_PTR_ADD: {
888 Register DstReg = MI.getOperand(0).getReg();
889 const LLT sXLen = LLT::scalar(STI.getXLen());
890
891 replacePtrWithInt(MI.getOperand(1), MIB);
892 MI.setDesc(TII.get(TargetOpcode::G_ADD));
893 MRI->setType(DstReg, sXLen);
894 break;
895 }
896 case TargetOpcode::G_PTRMASK: {
897 Register DstReg = MI.getOperand(0).getReg();
898 const LLT sXLen = LLT::scalar(STI.getXLen());
899 replacePtrWithInt(MI.getOperand(1), MIB);
900 MI.setDesc(TII.get(TargetOpcode::G_AND));
901 MRI->setType(DstReg, sXLen);
902 break;
903 }
904 }
905}
906
907void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
908 const MachineInstr &MI,
909 int OpIdx) const {
910 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
911 "Expected G_CONSTANT");
912 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
913 MIB.addImm(-CstVal);
914}
915
916void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
917 const MachineInstr &MI,
918 int OpIdx) const {
919 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
920 "Expected G_CONSTANT");
921 uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
922 MIB.addImm(STI.getXLen() - CstVal);
923}
924
925void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
926 const MachineInstr &MI,
927 int OpIdx) const {
928 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
929 "Expected G_CONSTANT");
930 uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
931 MIB.addImm(32 - CstVal);
932}
933
934void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
935 const MachineInstr &MI,
936 int OpIdx) const {
937 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
938 "Expected G_CONSTANT");
939 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
940 MIB.addImm(CstVal + 1);
941}
942
943void RISCVInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
944 const MachineInstr &MI,
945 int OpIdx) const {
946 assert(MI.getOpcode() == TargetOpcode::G_FRAME_INDEX && OpIdx == -1 &&
947 "Expected G_FRAME_INDEX");
948 MIB.add(MI.getOperand(1));
949}
950
951void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
952 const MachineInstr &MI,
953 int OpIdx) const {
954 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
955 "Expected G_CONSTANT");
956 uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
958}
959
960void RISCVInstructionSelector::renderXLenSubTrailingOnes(
961 MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {
962 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
963 "Expected G_CONSTANT");
964 uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
965 MIB.addImm(Subtarget->getXLen() - llvm::countr_one(C));
966}
967
968void RISCVInstructionSelector::renderAddiPairImmSmall(MachineInstrBuilder &MIB,
969 const MachineInstr &MI,
970 int OpIdx) const {
971 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
972 "Expected G_CONSTANT");
973 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
974 int64_t Adj = Imm < 0 ? -2048 : 2047;
975 MIB.addImm(Imm - Adj);
976}
977
978void RISCVInstructionSelector::renderAddiPairImmLarge(MachineInstrBuilder &MIB,
979 const MachineInstr &MI,
980 int OpIdx) const {
981 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
982 "Expected G_CONSTANT");
983 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue() < 0 ? -2048 : 2047;
984 MIB.addImm(Imm);
985}
986
987const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
988 LLT Ty, const RegisterBank &RB) const {
989 if (RB.getID() == RISCV::GPRBRegBankID) {
990 if (Ty.getSizeInBits() <= 32 || (STI.is64Bit() && Ty.getSizeInBits() == 64))
991 return &RISCV::GPRRegClass;
992 }
993
994 if (RB.getID() == RISCV::FPRBRegBankID) {
995 if (Ty.getSizeInBits() == 16)
996 return &RISCV::FPR16RegClass;
997 if (Ty.getSizeInBits() == 32)
998 return &RISCV::FPR32RegClass;
999 if (Ty.getSizeInBits() == 64)
1000 return &RISCV::FPR64RegClass;
1001 }
1002
1003 if (RB.getID() == RISCV::VRBRegBankID) {
1004 if (Ty.getSizeInBits().getKnownMinValue() <= 64)
1005 return &RISCV::VRRegClass;
1006
1007 if (Ty.getSizeInBits().getKnownMinValue() == 128)
1008 return &RISCV::VRM2RegClass;
1009
1010 if (Ty.getSizeInBits().getKnownMinValue() == 256)
1011 return &RISCV::VRM4RegClass;
1012
1013 if (Ty.getSizeInBits().getKnownMinValue() == 512)
1014 return &RISCV::VRM8RegClass;
1015 }
1016
1017 return nullptr;
1018}
1019
1020bool RISCVInstructionSelector::isRegInGprb(Register Reg) const {
1021 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::GPRBRegBankID;
1022}
1023
1024bool RISCVInstructionSelector::isRegInFprb(Register Reg) const {
1025 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID;
1026}
1027
1028bool RISCVInstructionSelector::selectCopy(MachineInstr &MI) const {
1029 Register DstReg = MI.getOperand(0).getReg();
1030
1031 if (DstReg.isPhysical())
1032 return true;
1033
1034 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1035 MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
1036 assert(DstRC &&
1037 "Register class not available for LLT, register bank combination");
1038
1039 // No need to constrain SrcReg. It will get constrained when
1040 // we hit another of its uses or its defs.
1041 // Copies do not have constraints.
1042 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1043 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
1044 << " operand\n");
1045 return false;
1046 }
1047
1048 MI.setDesc(TII.get(RISCV::COPY));
1049 return true;
1050}
1051
1052bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &MI,
1053 MachineIRBuilder &MIB) const {
1054 assert(MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
1055
1056 const Register DstReg = MI.getOperand(0).getReg();
1057 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1058 MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
1059
1060 assert(DstRC &&
1061 "Register class not available for LLT, register bank combination");
1062
1063 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1064 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
1065 << " operand\n");
1066 }
1067 MI.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
1068 return true;
1069}
1070
1071bool RISCVInstructionSelector::materializeImm(Register DstReg, int64_t Imm,
1072 MachineIRBuilder &MIB) const {
1073 if (Imm == 0) {
1074 MIB.buildCopy(DstReg, Register(RISCV::X0));
1075 RBI.constrainGenericRegister(DstReg, RISCV::GPRRegClass, *MRI);
1076 return true;
1077 }
1078
1080 unsigned NumInsts = Seq.size();
1081 Register SrcReg = RISCV::X0;
1082
1083 for (unsigned i = 0; i < NumInsts; i++) {
1084 Register TmpReg = i < NumInsts - 1
1085 ? MRI->createVirtualRegister(&RISCV::GPRRegClass)
1086 : DstReg;
1087 const RISCVMatInt::Inst &I = Seq[i];
1088 MachineInstr *Result;
1089
1090 switch (I.getOpndKind()) {
1091 case RISCVMatInt::Imm:
1092 // clang-format off
1093 Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {})
1094 .addImm(I.getImm());
1095 // clang-format on
1096 break;
1097 case RISCVMatInt::RegX0:
1098 Result = MIB.buildInstr(I.getOpcode(), {TmpReg},
1099 {SrcReg, Register(RISCV::X0)});
1100 break;
1102 Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg, SrcReg});
1103 break;
1105 Result =
1106 MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg}).addImm(I.getImm());
1107 break;
1108 }
1109
1110 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1111 return false;
1112
1113 SrcReg = TmpReg;
1114 }
1115
1116 return true;
1117}
1118
1119bool RISCVInstructionSelector::selectAddr(MachineInstr &MI,
1120 MachineIRBuilder &MIB, bool IsLocal,
1121 bool IsExternWeak) const {
1122 assert((MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1123 MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
1124 MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
1125 "Unexpected opcode");
1126
1127 const MachineOperand &DispMO = MI.getOperand(1);
1128
1129 Register DefReg = MI.getOperand(0).getReg();
1130 const LLT DefTy = MRI->getType(DefReg);
1131
1132 // When HWASAN is used and tagging of global variables is enabled
1133 // they should be accessed via the GOT, since the tagged address of a global
1134 // is incompatible with existing code models. This also applies to non-pic
1135 // mode.
1136 if (TM.isPositionIndependent() || Subtarget->allowTaggedGlobals()) {
1137 if (IsLocal && !Subtarget->allowTaggedGlobals()) {
1138 // Use PC-relative addressing to access the symbol. This generates the
1139 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
1140 // %pcrel_lo(auipc)).
1141 MI.setDesc(TII.get(RISCV::PseudoLLA));
1143 }
1144
1145 // Use PC-relative addressing to access the GOT for this symbol, then
1146 // load the address from the GOT. This generates the pattern (PseudoLGA
1147 // sym), which expands to (ld (addi (auipc %got_pcrel_hi(sym))
1148 // %pcrel_lo(auipc))).
1149 MachineFunction &MF = *MI.getParent()->getParent();
1150 MachineMemOperand *MemOp = MF.getMachineMemOperand(
1154 DefTy, Align(DefTy.getSizeInBits() / 8));
1155
1156 auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
1157 .addDisp(DispMO, 0)
1158 .addMemOperand(MemOp);
1159
1160 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1161 return false;
1162
1163 MI.eraseFromParent();
1164 return true;
1165 }
1166
1167 switch (TM.getCodeModel()) {
1168 default: {
1169 reportGISelFailure(*MF, *TPC, *MORE, getName(),
1170 "Unsupported code model for lowering", MI);
1171 return false;
1172 }
1173 case CodeModel::Small: {
1174 // Must lie within a single 2 GiB address range and must lie between
1175 // absolute addresses -2 GiB and +2 GiB. This generates the pattern (addi
1176 // (lui %hi(sym)) %lo(sym)).
1177 Register AddrHiDest = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1178 MachineInstr *AddrHi = MIB.buildInstr(RISCV::LUI, {AddrHiDest}, {})
1179 .addDisp(DispMO, 0, RISCVII::MO_HI);
1180
1181 if (!constrainSelectedInstRegOperands(*AddrHi, TII, TRI, RBI))
1182 return false;
1183
1184 auto Result = MIB.buildInstr(RISCV::ADDI, {DefReg}, {AddrHiDest})
1185 .addDisp(DispMO, 0, RISCVII::MO_LO);
1186
1187 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1188 return false;
1189
1190 MI.eraseFromParent();
1191 return true;
1192 }
1193 case CodeModel::Medium:
1194 // Emit LGA/LLA instead of the sequence it expands to because the pcrel_lo
1195 // relocation needs to reference a label that points to the auipc
1196 // instruction itself, not the global. This cannot be done inside the
1197 // instruction selector.
1198 if (IsExternWeak) {
1199 // An extern weak symbol may be undefined, i.e. have value 0, which may
1200 // not be within 2GiB of PC, so use GOT-indirect addressing to access the
1201 // symbol. This generates the pattern (PseudoLGA sym), which expands to
1202 // (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
1203 MachineFunction &MF = *MI.getParent()->getParent();
1204 MachineMemOperand *MemOp = MF.getMachineMemOperand(
1208 DefTy, Align(DefTy.getSizeInBits() / 8));
1209
1210 auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
1211 .addDisp(DispMO, 0)
1212 .addMemOperand(MemOp);
1213
1214 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1215 return false;
1216
1217 MI.eraseFromParent();
1218 return true;
1219 }
1220
1221 // Generate a sequence for accessing addresses within any 2GiB range
1222 // within the address space. This generates the pattern (PseudoLLA sym),
1223 // which expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
1224 MI.setDesc(TII.get(RISCV::PseudoLLA));
1226 }
1227
1228 return false;
1229}
1230
1231bool RISCVInstructionSelector::selectSelect(MachineInstr &MI,
1232 MachineIRBuilder &MIB) const {
1233 auto &SelectMI = cast<GSelect>(MI);
1234
1235 Register LHS, RHS;
1237 getOperandsForBranch(SelectMI.getCondReg(), CC, LHS, RHS, *MRI);
1238
1239 Register DstReg = SelectMI.getReg(0);
1240
1241 unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
1242 if (RBI.getRegBank(DstReg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID) {
1243 unsigned Size = MRI->getType(DstReg).getSizeInBits();
1244 Opc = Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
1245 : RISCV::Select_FPR64_Using_CC_GPR;
1246 }
1247
1248 MachineInstr *Result = MIB.buildInstr(Opc)
1249 .addDef(DstReg)
1250 .addReg(LHS)
1251 .addReg(RHS)
1252 .addImm(CC)
1253 .addReg(SelectMI.getTrueReg())
1254 .addReg(SelectMI.getFalseReg());
1255 MI.eraseFromParent();
1256 return constrainSelectedInstRegOperands(*Result, TII, TRI, RBI);
1257}
1258
1259// Convert an FCMP predicate to one of the supported F or D instructions.
1260static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size) {
1261 assert((Size == 16 || Size == 32 || Size == 64) && "Unsupported size");
1262 switch (Pred) {
1263 default:
1264 llvm_unreachable("Unsupported predicate");
1265 case CmpInst::FCMP_OLT:
1266 return Size == 16 ? RISCV::FLT_H : Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
1267 case CmpInst::FCMP_OLE:
1268 return Size == 16 ? RISCV::FLE_H : Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
1269 case CmpInst::FCMP_OEQ:
1270 return Size == 16 ? RISCV::FEQ_H : Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
1271 }
1272}
1273
1274// Try legalizing an FCMP by swapping or inverting the predicate to one that
1275// is supported.
1277 CmpInst::Predicate &Pred, bool &NeedInvert) {
1278 auto isLegalFCmpPredicate = [](CmpInst::Predicate Pred) {
1279 return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE ||
1280 Pred == CmpInst::FCMP_OEQ;
1281 };
1282
1283 assert(!isLegalFCmpPredicate(Pred) && "Predicate already legal?");
1284
1286 if (isLegalFCmpPredicate(InvPred)) {
1287 Pred = InvPred;
1288 std::swap(LHS, RHS);
1289 return true;
1290 }
1291
1292 InvPred = CmpInst::getInversePredicate(Pred);
1293 NeedInvert = true;
1294 if (isLegalFCmpPredicate(InvPred)) {
1295 Pred = InvPred;
1296 return true;
1297 }
1298 InvPred = CmpInst::getSwappedPredicate(InvPred);
1299 if (isLegalFCmpPredicate(InvPred)) {
1300 Pred = InvPred;
1301 std::swap(LHS, RHS);
1302 return true;
1303 }
1304
1305 return false;
1306}
1307
1308// Emit a sequence of instructions to compare LHS and RHS using Pred. Return
1309// the result in DstReg.
1310// FIXME: Maybe we should expand this earlier.
1311bool RISCVInstructionSelector::selectFPCompare(MachineInstr &MI,
1312 MachineIRBuilder &MIB) const {
1313 auto &CmpMI = cast<GFCmp>(MI);
1314 CmpInst::Predicate Pred = CmpMI.getCond();
1315
1316 Register DstReg = CmpMI.getReg(0);
1317 Register LHS = CmpMI.getLHSReg();
1318 Register RHS = CmpMI.getRHSReg();
1319
1320 unsigned Size = MRI->getType(LHS).getSizeInBits();
1321 assert((Size == 16 || Size == 32 || Size == 64) && "Unexpected size");
1322
1323 Register TmpReg = DstReg;
1324
1325 bool NeedInvert = false;
1326 // First try swapping operands or inverting.
1327 if (legalizeFCmpPredicate(LHS, RHS, Pred, NeedInvert)) {
1328 if (NeedInvert)
1329 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1330 auto Cmp = MIB.buildInstr(getFCmpOpcode(Pred, Size), {TmpReg}, {LHS, RHS});
1331 if (!Cmp.constrainAllUses(TII, TRI, RBI))
1332 return false;
1333 } else if (Pred == CmpInst::FCMP_ONE || Pred == CmpInst::FCMP_UEQ) {
1334 // fcmp one LHS, RHS => (OR (FLT LHS, RHS), (FLT RHS, LHS))
1335 NeedInvert = Pred == CmpInst::FCMP_UEQ;
1337 {&RISCV::GPRRegClass}, {LHS, RHS});
1338 if (!Cmp1.constrainAllUses(TII, TRI, RBI))
1339 return false;
1341 {&RISCV::GPRRegClass}, {RHS, LHS});
1342 if (!Cmp2.constrainAllUses(TII, TRI, RBI))
1343 return false;
1344 if (NeedInvert)
1345 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1346 auto Or =
1347 MIB.buildInstr(RISCV::OR, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1348 if (!Or.constrainAllUses(TII, TRI, RBI))
1349 return false;
1350 } else if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
1351 // fcmp ord LHS, RHS => (AND (FEQ LHS, LHS), (FEQ RHS, RHS))
1352 // FIXME: If LHS and RHS are the same we can use a single FEQ.
1353 NeedInvert = Pred == CmpInst::FCMP_UNO;
1355 {&RISCV::GPRRegClass}, {LHS, LHS});
1356 if (!Cmp1.constrainAllUses(TII, TRI, RBI))
1357 return false;
1359 {&RISCV::GPRRegClass}, {RHS, RHS});
1360 if (!Cmp2.constrainAllUses(TII, TRI, RBI))
1361 return false;
1362 if (NeedInvert)
1363 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1364 auto And =
1365 MIB.buildInstr(RISCV::AND, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1366 if (!And.constrainAllUses(TII, TRI, RBI))
1367 return false;
1368 } else
1369 llvm_unreachable("Unhandled predicate");
1370
1371 // Emit an XORI to invert the result if needed.
1372 if (NeedInvert) {
1373 auto Xor = MIB.buildInstr(RISCV::XORI, {DstReg}, {TmpReg}).addImm(1);
1374 if (!Xor.constrainAllUses(TII, TRI, RBI))
1375 return false;
1376 }
1377
1378 MI.eraseFromParent();
1379 return true;
1380}
1381
1382void RISCVInstructionSelector::emitFence(AtomicOrdering FenceOrdering,
1383 SyncScope::ID FenceSSID,
1384 MachineIRBuilder &MIB) const {
1385 if (STI.hasStdExtZtso()) {
1386 // The only fence that needs an instruction is a sequentially-consistent
1387 // cross-thread fence.
1388 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
1389 FenceSSID == SyncScope::System) {
1390 // fence rw, rw
1391 MIB.buildInstr(RISCV::FENCE, {}, {})
1394 return;
1395 }
1396
1397 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
1398 MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1399 return;
1400 }
1401
1402 // singlethread fences only synchronize with signal handlers on the same
1403 // thread and thus only need to preserve instruction order, not actually
1404 // enforce memory ordering.
1405 if (FenceSSID == SyncScope::SingleThread) {
1406 MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1407 return;
1408 }
1409
1410 // Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set
1411 // Manual: Volume I.
1412 unsigned Pred, Succ;
1413 switch (FenceOrdering) {
1414 default:
1415 llvm_unreachable("Unexpected ordering");
1416 case AtomicOrdering::AcquireRelease:
1417 // fence acq_rel -> fence.tso
1418 MIB.buildInstr(RISCV::FENCE_TSO, {}, {});
1419 return;
1420 case AtomicOrdering::Acquire:
1421 // fence acquire -> fence r, rw
1422 Pred = RISCVFenceField::R;
1424 break;
1425 case AtomicOrdering::Release:
1426 // fence release -> fence rw, w
1428 Succ = RISCVFenceField::W;
1429 break;
1430 case AtomicOrdering::SequentiallyConsistent:
1431 // fence seq_cst -> fence rw, rw
1434 break;
1435 }
1436 MIB.buildInstr(RISCV::FENCE, {}, {}).addImm(Pred).addImm(Succ);
1437}
1438
1439namespace llvm {
1440InstructionSelector *
1442 const RISCVSubtarget &Subtarget,
1443 const RISCVRegisterBankInfo &RBI) {
1444 return new RISCVInstructionSelector(TM, Subtarget, RBI);
1445}
1446} // end namespace llvm
unsigned const MachineRegisterInfo * MRI
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Provides analysis for querying information about KnownBits during GISel passes.
#define DEBUG_TYPE
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
static bool hasAllWUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI)
static bool hasAllNBitUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI, unsigned OrigBits)
#define I(x, y, z)
Definition MD5.cpp:58
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
static StringRef getName(Value *V)
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static bool legalizeFCmpPredicate(Register &LHS, Register &RHS, CmpInst::Predicate &Pred, bool &NeedInvert)
static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC, Register &LHS, Register &RHS, MachineRegisterInfo &MRI)
const SmallVectorImpl< MachineOperand > & Cond
This file declares the targeting of the RegisterBankInfo class for RISC-V.
#define LLVM_DEBUG(...)
Definition Debug.h:114
Value * RHS
Value * LHS
APInt bitcastToAPInt() const
Definition APFloat.h:1353
bool isPosZero() const
Definition APFloat.h:1460
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1488
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition APInt.h:1111
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition APInt.h:475
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
Definition APInt.h:286
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:678
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:681
@ ICMP_SLT
signed less than
Definition InstrTypes.h:707
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:708
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:684
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:702
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:701
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:705
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:686
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:689
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:703
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:685
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:687
@ ICMP_NE
not equal
Definition InstrTypes.h:700
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:706
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:704
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:688
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:829
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:791
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition Constants.h:169
This is an important base class in LLVM.
Definition Constant.h:43
virtual void setupMF(MachineFunction &mf, GISelValueTracking *vt, CodeGenCoverage *covinfo=nullptr, ProfileSummaryInfo *psi=nullptr, BlockFrequencyInfo *bfi=nullptr)
Setup per-MF executor state.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildPtrToInt(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_PTRTOINT instruction.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
const MachineOperand & getOperand(unsigned i) const
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Analysis providing profile information.
This class provides the information for the target register banks.
unsigned getXLen() const
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
Definition Register.h:19
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:78
bool isPositionIndependent() const
CodeModel::Model getCodeModel() const
Returns the code model.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:166
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
operand_type_match m_Reg()
operand_type_match m_Pred()
UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)
ConstantMatch< APInt > m_ICst(APInt &Cst)
BinaryOp_match< LHS, RHS, TargetOpcode::G_ADD, true > m_GAdd(const LHS &L, const RHS &R)
OneNonDBGUse_match< SubPat > m_OneNonDBGUse(const SubPat &SP)
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_ICMP > m_GICmp(const Pred &P, const LHS &L, const RHS &R)
SpecificConstantMatch m_SpecificICst(APInt RequestedValue)
Matches a constant equal to RequestedValue.
BinaryOp_match< LHS, RHS, TargetOpcode::G_SUB > m_GSub(const LHS &L, const RHS &R)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SHL, false > m_GShl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_AND, true > m_GAnd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
Definition RISCVMatInt.h:43
static constexpr int64_t VLMaxSentinel
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition LLVMContext.h:55
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
This is an optimization pass for GlobalISel generic memory operations.
PointerUnion< const TargetRegisterClass *, const RegisterBank * > RegClassOrRegBank
Convenient type to represent either a register class or a register bank.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:174
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
Definition bit.h:279
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition Utils.cpp:155
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
Definition bit.h:289
InstructionSelector * createRISCVInstructionSelector(const RISCVTargetMachine &TM, const RISCVSubtarget &Subtarget, const RISCVRegisterBankInfo &RBI)
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition Utils.cpp:314
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:186
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:342
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition Utils.cpp:259
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:198
AtomicOrdering
Atomic ordering for LLVM's memory model.
constexpr T maskTrailingZeros(unsigned N)
Create a bitmask with the N right-most bits set to 0, and all other bits set to 1.
Definition MathExtras.h:103
@ Or
Bitwise or logical OR of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
Definition MathExtras.h:86
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:853
#define MORE()
Definition regcomp.c:246
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.