Thanks to visit codestin.com
Credit goes to llvm.org

LLVM 22.0.0git
MipsFastISel.cpp
Go to the documentation of this file.
1//===- MipsFastISel.cpp - Mips FastISel implementation --------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file defines the MIPS-specific support for the FastISel class.
11/// Some of the target-specific code is generated by tablegen in the file
12/// MipsGenFastISel.inc, which is #included here.
13///
14//===----------------------------------------------------------------------===//
15
18#include "MipsCCState.h"
19#include "MipsISelLowering.h"
20#include "MipsInstrInfo.h"
21#include "MipsMachineFunction.h"
22#include "MipsSubtarget.h"
23#include "MipsTargetMachine.h"
24#include "llvm/ADT/APInt.h"
25#include "llvm/ADT/DenseMap.h"
41#include "llvm/IR/Attributes.h"
42#include "llvm/IR/CallingConv.h"
43#include "llvm/IR/Constant.h"
44#include "llvm/IR/Constants.h"
45#include "llvm/IR/DataLayout.h"
46#include "llvm/IR/Function.h"
48#include "llvm/IR/GlobalValue.h"
50#include "llvm/IR/InstrTypes.h"
51#include "llvm/IR/Instruction.h"
54#include "llvm/IR/Operator.h"
55#include "llvm/IR/Type.h"
56#include "llvm/IR/User.h"
57#include "llvm/IR/Value.h"
58#include "llvm/MC/MCContext.h"
59#include "llvm/MC/MCInstrDesc.h"
60#include "llvm/MC/MCSymbol.h"
63#include "llvm/Support/Debug.h"
67#include <algorithm>
68#include <array>
69#include <cassert>
70#include <cstdint>
71
72#define DEBUG_TYPE "mips-fastisel"
73
74using namespace llvm;
75
77
78namespace {
79
80class MipsFastISel final : public FastISel {
81
82 // All possible address modes.
83 class Address {
84 public:
85 using BaseKind = enum { RegBase, FrameIndexBase };
86
87 private:
88 BaseKind Kind = RegBase;
89 union {
90 unsigned Reg;
91 int FI;
92 } Base;
93
94 int64_t Offset = 0;
95
96 const GlobalValue *GV = nullptr;
97
98 public:
99 // Innocuous defaults for our address.
100 Address() { Base.Reg = 0; }
101
102 void setKind(BaseKind K) { Kind = K; }
103 BaseKind getKind() const { return Kind; }
104 bool isRegBase() const { return Kind == RegBase; }
105 bool isFIBase() const { return Kind == FrameIndexBase; }
106
107 void setReg(unsigned Reg) {
108 assert(isRegBase() && "Invalid base register access!");
109 Base.Reg = Reg;
110 }
111
112 unsigned getReg() const {
113 assert(isRegBase() && "Invalid base register access!");
114 return Base.Reg;
115 }
116
117 void setFI(unsigned FI) {
118 assert(isFIBase() && "Invalid base frame index access!");
119 Base.FI = FI;
120 }
121
122 unsigned getFI() const {
123 assert(isFIBase() && "Invalid base frame index access!");
124 return Base.FI;
125 }
126
127 void setOffset(int64_t Offset_) { Offset = Offset_; }
128 int64_t getOffset() const { return Offset; }
129 void setGlobalValue(const GlobalValue *G) { GV = G; }
130 const GlobalValue *getGlobalValue() { return GV; }
131 };
132
133 /// Subtarget - Keep a pointer to the MipsSubtarget around so that we can
134 /// make the right decision when generating code for different targets.
135 const TargetMachine &TM;
136 const MipsSubtarget *Subtarget;
137 const TargetInstrInfo &TII;
138 const TargetLowering &TLI;
139 MipsFunctionInfo *MFI;
140
141 // Convenience variables to avoid some queries.
142 LLVMContext *Context;
143
144 bool fastLowerArguments() override;
145 bool fastLowerCall(CallLoweringInfo &CLI) override;
146 bool fastLowerIntrinsicCall(const IntrinsicInst *II) override;
147
148 bool UnsupportedFPMode; // To allow fast-isel to proceed and just not handle
149 // floating point but not reject doing fast-isel in other
150 // situations
151
152private:
153 // Selection routines.
154 bool selectLogicalOp(const Instruction *I);
155 bool selectLoad(const Instruction *I);
156 bool selectStore(const Instruction *I);
157 bool selectBranch(const Instruction *I);
158 bool selectSelect(const Instruction *I);
159 bool selectCmp(const Instruction *I);
160 bool selectFPExt(const Instruction *I);
161 bool selectFPTrunc(const Instruction *I);
162 bool selectFPToInt(const Instruction *I, bool IsSigned);
163 bool selectRet(const Instruction *I);
164 bool selectTrunc(const Instruction *I);
165 bool selectIntExt(const Instruction *I);
166 bool selectShift(const Instruction *I);
167 bool selectDivRem(const Instruction *I, unsigned ISDOpcode);
168
169 // Utility helper routines.
170 bool isTypeLegal(Type *Ty, MVT &VT);
171 bool isTypeSupported(Type *Ty, MVT &VT);
172 bool isLoadTypeLegal(Type *Ty, MVT &VT);
173 bool computeAddress(const Value *Obj, Address &Addr);
174 bool computeCallAddress(const Value *V, Address &Addr);
175 void simplifyAddress(Address &Addr);
176
177 // Emit helper routines.
178 bool emitCmp(unsigned DestReg, const CmpInst *CI);
179 bool emitLoad(MVT VT, unsigned &ResultReg, Address &Addr);
180 bool emitStore(MVT VT, unsigned SrcReg, Address &Addr);
181 unsigned emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt);
182 bool emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg,
183
184 bool IsZExt);
185 bool emitIntZExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg);
186
187 bool emitIntSExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg);
188 bool emitIntSExt32r1(MVT SrcVT, unsigned SrcReg, MVT DestVT,
189 unsigned DestReg);
190 bool emitIntSExt32r2(MVT SrcVT, unsigned SrcReg, MVT DestVT,
191 unsigned DestReg);
192
193 unsigned getRegEnsuringSimpleIntegerWidening(const Value *, bool IsUnsigned);
194
195 unsigned emitLogicalOp(unsigned ISDOpc, MVT RetVT, const Value *LHS,
196 const Value *RHS);
197
198 unsigned materializeFP(const ConstantFP *CFP, MVT VT);
199 unsigned materializeGV(const GlobalValue *GV, MVT VT);
200 unsigned materializeInt(const Constant *C, MVT VT);
201 unsigned materialize32BitInt(int64_t Imm, const TargetRegisterClass *RC);
202 unsigned materializeExternalCallSym(MCSymbol *Syn);
203
204 MachineInstrBuilder emitInst(unsigned Opc) {
205 return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc));
206 }
207
208 MachineInstrBuilder emitInst(unsigned Opc, unsigned DstReg) {
209 return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc),
210 DstReg);
211 }
212
213 MachineInstrBuilder emitInstStore(unsigned Opc, unsigned SrcReg,
214 unsigned MemReg, int64_t MemOffset) {
215 return emitInst(Opc).addReg(SrcReg).addReg(MemReg).addImm(MemOffset);
216 }
217
218 MachineInstrBuilder emitInstLoad(unsigned Opc, unsigned DstReg,
219 unsigned MemReg, int64_t MemOffset) {
220 return emitInst(Opc, DstReg).addReg(MemReg).addImm(MemOffset);
221 }
222
223 unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
224 const TargetRegisterClass *RC,
225 unsigned Op0, unsigned Op1);
226
227 // for some reason, this default is not generated by tablegen
228 // so we explicitly generate it here.
229 unsigned fastEmitInst_riir(uint64_t inst, const TargetRegisterClass *RC,
230 unsigned Op0, uint64_t imm1, uint64_t imm2,
231 unsigned Op3) {
232 return 0;
233 }
234
235 // Call handling routines.
236private:
237 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC) const;
238 bool processCallArgs(CallLoweringInfo &CLI, SmallVectorImpl<MVT> &ArgVTs,
239 unsigned &NumBytes);
240 bool finishCall(CallLoweringInfo &CLI, MVT RetVT, unsigned NumBytes);
241
242 const MipsABIInfo &getABI() const {
243 return static_cast<const MipsTargetMachine &>(TM).getABI();
244 }
245
246public:
247 // Backend specific FastISel code.
248 explicit MipsFastISel(FunctionLoweringInfo &funcInfo,
249 const TargetLibraryInfo *libInfo)
250 : FastISel(funcInfo, libInfo), TM(funcInfo.MF->getTarget()),
251 Subtarget(&funcInfo.MF->getSubtarget<MipsSubtarget>()),
252 TII(*Subtarget->getInstrInfo()), TLI(*Subtarget->getTargetLowering()) {
253 MFI = funcInfo.MF->getInfo<MipsFunctionInfo>();
254 Context = &funcInfo.Fn->getContext();
255 UnsupportedFPMode = Subtarget->isFP64bit() || Subtarget->useSoftFloat();
256 }
257
258 Register fastMaterializeAlloca(const AllocaInst *AI) override;
259 Register fastMaterializeConstant(const Constant *C) override;
260 bool fastSelectInstruction(const Instruction *I) override;
261
262#include "MipsGenFastISel.inc"
263};
264
265} // end anonymous namespace
266
267static bool CC_Mips(unsigned ValNo, MVT ValVT, MVT LocVT,
268 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
269 Type *OrigTy, CCState &State) LLVM_ATTRIBUTE_UNUSED;
270
271static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT,
272 CCValAssign::LocInfo LocInfo,
273 ISD::ArgFlagsTy ArgFlags, Type *OrigTy,
274 CCState &State) {
275 llvm_unreachable("should not be called");
276}
277
278static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT,
279 CCValAssign::LocInfo LocInfo,
280 ISD::ArgFlagsTy ArgFlags, Type *OrigTy,
281 CCState &State) {
282 llvm_unreachable("should not be called");
283}
284
285#include "MipsGenCallingConv.inc"
286
287CCAssignFn *MipsFastISel::CCAssignFnForCall(CallingConv::ID CC) const {
288 return CC_MipsO32;
289}
290
291unsigned MipsFastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT,
292 const Value *LHS, const Value *RHS) {
293 // Canonicalize immediates to the RHS first.
295 std::swap(LHS, RHS);
296
297 unsigned Opc;
298 switch (ISDOpc) {
299 case ISD::AND:
300 Opc = Mips::AND;
301 break;
302 case ISD::OR:
303 Opc = Mips::OR;
304 break;
305 case ISD::XOR:
306 Opc = Mips::XOR;
307 break;
308 default:
309 llvm_unreachable("unexpected opcode");
310 }
311
312 Register LHSReg = getRegForValue(LHS);
313 if (!LHSReg)
314 return 0;
315
316 unsigned RHSReg;
317 if (const auto *C = dyn_cast<ConstantInt>(RHS))
318 RHSReg = materializeInt(C, MVT::i32);
319 else
320 RHSReg = getRegForValue(RHS);
321 if (!RHSReg)
322 return 0;
323
324 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
325 if (!ResultReg)
326 return 0;
327
328 emitInst(Opc, ResultReg).addReg(LHSReg).addReg(RHSReg);
329 return ResultReg;
330}
331
332Register MipsFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
333 assert(TLI.getValueType(DL, AI->getType(), true) == MVT::i32 &&
334 "Alloca should always return a pointer.");
335
336 DenseMap<const AllocaInst *, int>::iterator SI =
337 FuncInfo.StaticAllocaMap.find(AI);
338
339 if (SI != FuncInfo.StaticAllocaMap.end()) {
340 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
341 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Mips::LEA_ADDiu),
342 ResultReg)
343 .addFrameIndex(SI->second)
344 .addImm(0);
345 return ResultReg;
346 }
347
348 return Register();
349}
350
351unsigned MipsFastISel::materializeInt(const Constant *C, MVT VT) {
352 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
353 return 0;
354 const TargetRegisterClass *RC = &Mips::GPR32RegClass;
355 const ConstantInt *CI = cast<ConstantInt>(C);
356 return materialize32BitInt(CI->getZExtValue(), RC);
357}
358
359unsigned MipsFastISel::materialize32BitInt(int64_t Imm,
360 const TargetRegisterClass *RC) {
361 Register ResultReg = createResultReg(RC);
362
363 if (isInt<16>(Imm)) {
364 unsigned Opc = Mips::ADDiu;
365 emitInst(Opc, ResultReg).addReg(Mips::ZERO).addImm(Imm);
366 return ResultReg;
367 } else if (isUInt<16>(Imm)) {
368 emitInst(Mips::ORi, ResultReg).addReg(Mips::ZERO).addImm(Imm);
369 return ResultReg;
370 }
371 unsigned Lo = Imm & 0xFFFF;
372 unsigned Hi = (Imm >> 16) & 0xFFFF;
373 if (Lo) {
374 // Both Lo and Hi have nonzero bits.
375 Register TmpReg = createResultReg(RC);
376 emitInst(Mips::LUi, TmpReg).addImm(Hi);
377 emitInst(Mips::ORi, ResultReg).addReg(TmpReg).addImm(Lo);
378 } else {
379 emitInst(Mips::LUi, ResultReg).addImm(Hi);
380 }
381 return ResultReg;
382}
383
384unsigned MipsFastISel::materializeFP(const ConstantFP *CFP, MVT VT) {
385 if (UnsupportedFPMode)
386 return 0;
387 int64_t Imm = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
388 if (VT == MVT::f32) {
389 const TargetRegisterClass *RC = &Mips::FGR32RegClass;
390 Register DestReg = createResultReg(RC);
391 unsigned TempReg = materialize32BitInt(Imm, &Mips::GPR32RegClass);
392 emitInst(Mips::MTC1, DestReg).addReg(TempReg);
393 return DestReg;
394 } else if (VT == MVT::f64) {
395 const TargetRegisterClass *RC = &Mips::AFGR64RegClass;
396 Register DestReg = createResultReg(RC);
397 unsigned TempReg1 = materialize32BitInt(Imm >> 32, &Mips::GPR32RegClass);
398 unsigned TempReg2 =
399 materialize32BitInt(Imm & 0xFFFFFFFF, &Mips::GPR32RegClass);
400 emitInst(Mips::BuildPairF64, DestReg).addReg(TempReg2).addReg(TempReg1);
401 return DestReg;
402 }
403 return 0;
404}
405
406unsigned MipsFastISel::materializeGV(const GlobalValue *GV, MVT VT) {
407 // For now 32-bit only.
408 if (VT != MVT::i32)
409 return 0;
410 const TargetRegisterClass *RC = &Mips::GPR32RegClass;
411 Register DestReg = createResultReg(RC);
412 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
413 bool IsThreadLocal = GVar && GVar->isThreadLocal();
414 // TLS not supported at this time.
415 if (IsThreadLocal)
416 return 0;
417 emitInst(Mips::LW, DestReg)
418 .addReg(MFI->getGlobalBaseReg(*MF))
419 .addGlobalAddress(GV, 0, MipsII::MO_GOT);
420 if ((GV->hasInternalLinkage() ||
421 (GV->hasLocalLinkage() && !isa<Function>(GV)))) {
422 Register TempReg = createResultReg(RC);
423 emitInst(Mips::ADDiu, TempReg)
424 .addReg(DestReg)
425 .addGlobalAddress(GV, 0, MipsII::MO_ABS_LO);
426 DestReg = TempReg;
427 }
428 return DestReg;
429}
430
431unsigned MipsFastISel::materializeExternalCallSym(MCSymbol *Sym) {
432 const TargetRegisterClass *RC = &Mips::GPR32RegClass;
433 Register DestReg = createResultReg(RC);
434 emitInst(Mips::LW, DestReg)
435 .addReg(MFI->getGlobalBaseReg(*MF))
436 .addSym(Sym, MipsII::MO_GOT);
437 return DestReg;
438}
439
440// Materialize a constant into a register, and return the register
441// number (or zero if we failed to handle it).
442Register MipsFastISel::fastMaterializeConstant(const Constant *C) {
443 EVT CEVT = TLI.getValueType(DL, C->getType(), true);
444
445 // Only handle simple types.
446 if (!CEVT.isSimple())
447 return Register();
448 MVT VT = CEVT.getSimpleVT();
449
450 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
451 return (UnsupportedFPMode) ? 0 : materializeFP(CFP, VT);
452 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
453 return materializeGV(GV, VT);
454 else if (isa<ConstantInt>(C))
455 return materializeInt(C, VT);
456
457 return Register();
458}
459
460bool MipsFastISel::computeAddress(const Value *Obj, Address &Addr) {
461 const User *U = nullptr;
462 unsigned Opcode = Instruction::UserOp1;
463 if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
464 // Don't walk into other basic blocks unless the object is an alloca from
465 // another block, otherwise it may not have a virtual register assigned.
466 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
467 FuncInfo.getMBB(I->getParent()) == FuncInfo.MBB) {
468 Opcode = I->getOpcode();
469 U = I;
470 }
471 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
472 Opcode = C->getOpcode();
473 U = C;
474 }
475 switch (Opcode) {
476 default:
477 break;
478 case Instruction::BitCast:
479 // Look through bitcasts.
480 return computeAddress(U->getOperand(0), Addr);
481 case Instruction::GetElementPtr: {
482 Address SavedAddr = Addr;
483 int64_t TmpOffset = Addr.getOffset();
484 // Iterate through the GEP folding the constants into offsets where
485 // we can.
487 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e;
488 ++i, ++GTI) {
489 const Value *Op = *i;
490 if (StructType *STy = GTI.getStructTypeOrNull()) {
491 const StructLayout *SL = DL.getStructLayout(STy);
492 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
493 TmpOffset += SL->getElementOffset(Idx);
494 } else {
495 uint64_t S = GTI.getSequentialElementStride(DL);
496 while (true) {
497 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
498 // Constant-offset addressing.
499 TmpOffset += CI->getSExtValue() * S;
500 break;
501 }
502 if (canFoldAddIntoGEP(U, Op)) {
503 // A compatible add with a constant operand. Fold the constant.
504 ConstantInt *CI =
505 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
506 TmpOffset += CI->getSExtValue() * S;
507 // Iterate on the other operand.
508 Op = cast<AddOperator>(Op)->getOperand(0);
509 continue;
510 }
511 // Unsupported
512 goto unsupported_gep;
513 }
514 }
515 }
516 // Try to grab the base operand now.
517 Addr.setOffset(TmpOffset);
518 if (computeAddress(U->getOperand(0), Addr))
519 return true;
520 // We failed, restore everything and try the other options.
521 Addr = SavedAddr;
522 unsupported_gep:
523 break;
524 }
525 case Instruction::Alloca: {
526 const AllocaInst *AI = cast<AllocaInst>(Obj);
527 DenseMap<const AllocaInst *, int>::iterator SI =
528 FuncInfo.StaticAllocaMap.find(AI);
529 if (SI != FuncInfo.StaticAllocaMap.end()) {
530 Addr.setKind(Address::FrameIndexBase);
531 Addr.setFI(SI->second);
532 return true;
533 }
534 break;
535 }
536 }
537 Addr.setReg(getRegForValue(Obj));
538 return Addr.getReg() != 0;
539}
540
541bool MipsFastISel::computeCallAddress(const Value *V, Address &Addr) {
542 const User *U = nullptr;
543 unsigned Opcode = Instruction::UserOp1;
544
545 if (const auto *I = dyn_cast<Instruction>(V)) {
546 // Check if the value is defined in the same basic block. This information
547 // is crucial to know whether or not folding an operand is valid.
548 if (I->getParent() == FuncInfo.MBB->getBasicBlock()) {
549 Opcode = I->getOpcode();
550 U = I;
551 }
552 } else if (const auto *C = dyn_cast<ConstantExpr>(V)) {
553 Opcode = C->getOpcode();
554 U = C;
555 }
556
557 switch (Opcode) {
558 default:
559 break;
560 case Instruction::BitCast:
561 // Look past bitcasts if its operand is in the same BB.
562 return computeCallAddress(U->getOperand(0), Addr);
563 break;
564 case Instruction::IntToPtr:
565 // Look past no-op inttoptrs if its operand is in the same BB.
566 if (TLI.getValueType(DL, U->getOperand(0)->getType()) ==
567 TLI.getPointerTy(DL))
568 return computeCallAddress(U->getOperand(0), Addr);
569 break;
570 case Instruction::PtrToInt:
571 // Look past no-op ptrtoints if its operand is in the same BB.
572 if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
573 return computeCallAddress(U->getOperand(0), Addr);
574 break;
575 }
576
577 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
578 Addr.setGlobalValue(GV);
579 return true;
580 }
581
582 // If all else fails, try to materialize the value in a register.
583 if (!Addr.getGlobalValue()) {
584 Addr.setReg(getRegForValue(V));
585 return Addr.getReg() != 0;
586 }
587
588 return false;
589}
590
591bool MipsFastISel::isTypeLegal(Type *Ty, MVT &VT) {
592 EVT evt = TLI.getValueType(DL, Ty, true);
593 // Only handle simple types.
594 if (evt == MVT::Other || !evt.isSimple())
595 return false;
596 VT = evt.getSimpleVT();
597
598 // Handle all legal types, i.e. a register that will directly hold this
599 // value.
600 return TLI.isTypeLegal(VT);
601}
602
603bool MipsFastISel::isTypeSupported(Type *Ty, MVT &VT) {
604 if (Ty->isVectorTy())
605 return false;
606
607 if (isTypeLegal(Ty, VT))
608 return true;
609
610 // If this is a type than can be sign or zero-extended to a basic operation
611 // go ahead and accept it now.
612 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
613 return true;
614
615 return false;
616}
617
618bool MipsFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
619 if (isTypeLegal(Ty, VT))
620 return true;
621 // We will extend this in a later patch:
622 // If this is a type than can be sign or zero-extended to a basic operation
623 // go ahead and accept it now.
624 if (VT == MVT::i8 || VT == MVT::i16)
625 return true;
626 return false;
627}
628
629// Because of how EmitCmp is called with fast-isel, you can
630// end up with redundant "andi" instructions after the sequences emitted below.
631// We should try and solve this issue in the future.
632//
633bool MipsFastISel::emitCmp(unsigned ResultReg, const CmpInst *CI) {
634 const Value *Left = CI->getOperand(0), *Right = CI->getOperand(1);
635 bool IsUnsigned = CI->isUnsigned();
636 unsigned LeftReg = getRegEnsuringSimpleIntegerWidening(Left, IsUnsigned);
637 if (LeftReg == 0)
638 return false;
639 unsigned RightReg = getRegEnsuringSimpleIntegerWidening(Right, IsUnsigned);
640 if (RightReg == 0)
641 return false;
643
644 switch (P) {
645 default:
646 return false;
647 case CmpInst::ICMP_EQ: {
648 Register TempReg = createResultReg(&Mips::GPR32RegClass);
649 emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg);
650 emitInst(Mips::SLTiu, ResultReg).addReg(TempReg).addImm(1);
651 break;
652 }
653 case CmpInst::ICMP_NE: {
654 Register TempReg = createResultReg(&Mips::GPR32RegClass);
655 emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg);
656 emitInst(Mips::SLTu, ResultReg).addReg(Mips::ZERO).addReg(TempReg);
657 break;
658 }
660 emitInst(Mips::SLTu, ResultReg).addReg(RightReg).addReg(LeftReg);
661 break;
663 emitInst(Mips::SLTu, ResultReg).addReg(LeftReg).addReg(RightReg);
664 break;
665 case CmpInst::ICMP_UGE: {
666 Register TempReg = createResultReg(&Mips::GPR32RegClass);
667 emitInst(Mips::SLTu, TempReg).addReg(LeftReg).addReg(RightReg);
668 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
669 break;
670 }
671 case CmpInst::ICMP_ULE: {
672 Register TempReg = createResultReg(&Mips::GPR32RegClass);
673 emitInst(Mips::SLTu, TempReg).addReg(RightReg).addReg(LeftReg);
674 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
675 break;
676 }
678 emitInst(Mips::SLT, ResultReg).addReg(RightReg).addReg(LeftReg);
679 break;
681 emitInst(Mips::SLT, ResultReg).addReg(LeftReg).addReg(RightReg);
682 break;
683 case CmpInst::ICMP_SGE: {
684 Register TempReg = createResultReg(&Mips::GPR32RegClass);
685 emitInst(Mips::SLT, TempReg).addReg(LeftReg).addReg(RightReg);
686 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
687 break;
688 }
689 case CmpInst::ICMP_SLE: {
690 Register TempReg = createResultReg(&Mips::GPR32RegClass);
691 emitInst(Mips::SLT, TempReg).addReg(RightReg).addReg(LeftReg);
692 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
693 break;
694 }
700 case CmpInst::FCMP_OGE: {
701 if (UnsupportedFPMode)
702 return false;
703 bool IsFloat = Left->getType()->isFloatTy();
704 bool IsDouble = Left->getType()->isDoubleTy();
705 if (!IsFloat && !IsDouble)
706 return false;
707 unsigned Opc, CondMovOpc;
708 switch (P) {
710 Opc = IsFloat ? Mips::C_EQ_S : Mips::C_EQ_D32;
711 CondMovOpc = Mips::MOVT_I;
712 break;
714 Opc = IsFloat ? Mips::C_EQ_S : Mips::C_EQ_D32;
715 CondMovOpc = Mips::MOVF_I;
716 break;
718 Opc = IsFloat ? Mips::C_OLT_S : Mips::C_OLT_D32;
719 CondMovOpc = Mips::MOVT_I;
720 break;
722 Opc = IsFloat ? Mips::C_OLE_S : Mips::C_OLE_D32;
723 CondMovOpc = Mips::MOVT_I;
724 break;
726 Opc = IsFloat ? Mips::C_ULE_S : Mips::C_ULE_D32;
727 CondMovOpc = Mips::MOVF_I;
728 break;
730 Opc = IsFloat ? Mips::C_ULT_S : Mips::C_ULT_D32;
731 CondMovOpc = Mips::MOVF_I;
732 break;
733 default:
734 llvm_unreachable("Only switching of a subset of CCs.");
735 }
736 Register RegWithZero = createResultReg(&Mips::GPR32RegClass);
737 Register RegWithOne = createResultReg(&Mips::GPR32RegClass);
738 emitInst(Mips::ADDiu, RegWithZero).addReg(Mips::ZERO).addImm(0);
739 emitInst(Mips::ADDiu, RegWithOne).addReg(Mips::ZERO).addImm(1);
740 emitInst(Opc).addReg(Mips::FCC0, RegState::Define).addReg(LeftReg)
741 .addReg(RightReg);
742 emitInst(CondMovOpc, ResultReg)
743 .addReg(RegWithOne)
744 .addReg(Mips::FCC0)
745 .addReg(RegWithZero);
746 break;
747 }
748 }
749 return true;
750}
751
752bool MipsFastISel::emitLoad(MVT VT, unsigned &ResultReg, Address &Addr) {
753 //
754 // more cases will be handled here in following patches.
755 //
756 unsigned Opc;
757 switch (VT.SimpleTy) {
758 case MVT::i32:
759 ResultReg = createResultReg(&Mips::GPR32RegClass);
760 Opc = Mips::LW;
761 break;
762 case MVT::i16:
763 ResultReg = createResultReg(&Mips::GPR32RegClass);
764 Opc = Mips::LHu;
765 break;
766 case MVT::i8:
767 ResultReg = createResultReg(&Mips::GPR32RegClass);
768 Opc = Mips::LBu;
769 break;
770 case MVT::f32:
771 if (UnsupportedFPMode)
772 return false;
773 ResultReg = createResultReg(&Mips::FGR32RegClass);
774 Opc = Mips::LWC1;
775 break;
776 case MVT::f64:
777 if (UnsupportedFPMode)
778 return false;
779 ResultReg = createResultReg(&Mips::AFGR64RegClass);
780 Opc = Mips::LDC1;
781 break;
782 default:
783 return false;
784 }
785 if (Addr.isRegBase()) {
786 simplifyAddress(Addr);
787 emitInstLoad(Opc, ResultReg, Addr.getReg(), Addr.getOffset());
788 return true;
789 }
790 if (Addr.isFIBase()) {
791 unsigned FI = Addr.getFI();
792 int64_t Offset = Addr.getOffset();
793 MachineFrameInfo &MFI = MF->getFrameInfo();
794 MachineMemOperand *MMO = MF->getMachineMemOperand(
796 MFI.getObjectSize(FI), Align(4));
797 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg)
798 .addFrameIndex(FI)
799 .addImm(Offset)
800 .addMemOperand(MMO);
801 return true;
802 }
803 return false;
804}
805
806bool MipsFastISel::emitStore(MVT VT, unsigned SrcReg, Address &Addr) {
807 //
808 // more cases will be handled here in following patches.
809 //
810 unsigned Opc;
811 switch (VT.SimpleTy) {
812 case MVT::i8:
813 Opc = Mips::SB;
814 break;
815 case MVT::i16:
816 Opc = Mips::SH;
817 break;
818 case MVT::i32:
819 Opc = Mips::SW;
820 break;
821 case MVT::f32:
822 if (UnsupportedFPMode)
823 return false;
824 Opc = Mips::SWC1;
825 break;
826 case MVT::f64:
827 if (UnsupportedFPMode)
828 return false;
829 Opc = Mips::SDC1;
830 break;
831 default:
832 return false;
833 }
834 if (Addr.isRegBase()) {
835 simplifyAddress(Addr);
836 emitInstStore(Opc, SrcReg, Addr.getReg(), Addr.getOffset());
837 return true;
838 }
839 if (Addr.isFIBase()) {
840 unsigned FI = Addr.getFI();
841 int64_t Offset = Addr.getOffset();
842 MachineFrameInfo &MFI = MF->getFrameInfo();
843 MachineMemOperand *MMO = MF->getMachineMemOperand(
845 MFI.getObjectSize(FI), Align(4));
846 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc))
847 .addReg(SrcReg)
848 .addFrameIndex(FI)
849 .addImm(Offset)
850 .addMemOperand(MMO);
851 return true;
852 }
853 return false;
854}
855
856bool MipsFastISel::selectLogicalOp(const Instruction *I) {
857 MVT VT;
858 if (!isTypeSupported(I->getType(), VT))
859 return false;
860
861 unsigned ResultReg;
862 switch (I->getOpcode()) {
863 default:
864 llvm_unreachable("Unexpected instruction.");
865 case Instruction::And:
866 ResultReg = emitLogicalOp(ISD::AND, VT, I->getOperand(0), I->getOperand(1));
867 break;
868 case Instruction::Or:
869 ResultReg = emitLogicalOp(ISD::OR, VT, I->getOperand(0), I->getOperand(1));
870 break;
871 case Instruction::Xor:
872 ResultReg = emitLogicalOp(ISD::XOR, VT, I->getOperand(0), I->getOperand(1));
873 break;
874 }
875
876 if (!ResultReg)
877 return false;
878
879 updateValueMap(I, ResultReg);
880 return true;
881}
882
883bool MipsFastISel::selectLoad(const Instruction *I) {
884 const LoadInst *LI = cast<LoadInst>(I);
885
886 // Atomic loads need special handling.
887 if (LI->isAtomic())
888 return false;
889
890 // Verify we have a legal type before going any further.
891 MVT VT;
892 if (!isLoadTypeLegal(LI->getType(), VT))
893 return false;
894
895 // Underaligned loads need special handling.
896 if (LI->getAlign() < VT.getFixedSizeInBits() / 8 &&
897 !Subtarget->systemSupportsUnalignedAccess())
898 return false;
899
900 // See if we can handle this address.
901 Address Addr;
902 if (!computeAddress(LI->getOperand(0), Addr))
903 return false;
904
905 unsigned ResultReg;
906 if (!emitLoad(VT, ResultReg, Addr))
907 return false;
908 updateValueMap(LI, ResultReg);
909 return true;
910}
911
912bool MipsFastISel::selectStore(const Instruction *I) {
913 const StoreInst *SI = cast<StoreInst>(I);
914
915 Value *Op0 = SI->getOperand(0);
916 unsigned SrcReg = 0;
917
918 // Atomic stores need special handling.
919 if (SI->isAtomic())
920 return false;
921
922 // Verify we have a legal type before going any further.
923 MVT VT;
924 if (!isLoadTypeLegal(SI->getOperand(0)->getType(), VT))
925 return false;
926
927 // Underaligned stores need special handling.
928 if (SI->getAlign() < VT.getFixedSizeInBits() / 8 &&
929 !Subtarget->systemSupportsUnalignedAccess())
930 return false;
931
932 // Get the value to be stored into a register.
933 SrcReg = getRegForValue(Op0);
934 if (SrcReg == 0)
935 return false;
936
937 // See if we can handle this address.
938 Address Addr;
939 if (!computeAddress(SI->getOperand(1), Addr))
940 return false;
941
942 if (!emitStore(VT, SrcReg, Addr))
943 return false;
944 return true;
945}
946
947// This can cause a redundant sltiu to be generated.
948// FIXME: try and eliminate this in a future patch.
949bool MipsFastISel::selectBranch(const Instruction *I) {
950 const BranchInst *BI = cast<BranchInst>(I);
951 MachineBasicBlock *BrBB = FuncInfo.MBB;
952 //
953 // TBB is the basic block for the case where the comparison is true.
954 // FBB is the basic block for the case where the comparison is false.
955 // if (cond) goto TBB
956 // goto FBB
957 // TBB:
958 //
959 MachineBasicBlock *TBB = FuncInfo.getMBB(BI->getSuccessor(0));
960 MachineBasicBlock *FBB = FuncInfo.getMBB(BI->getSuccessor(1));
961
962 // Fold the common case of a conditional branch with a comparison
963 // in the same block.
964 unsigned ZExtCondReg = 0;
965 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
966 if (CI->hasOneUse() && CI->getParent() == I->getParent()) {
967 ZExtCondReg = createResultReg(&Mips::GPR32RegClass);
968 if (!emitCmp(ZExtCondReg, CI))
969 return false;
970 }
971 }
972
973 // For the general case, we need to mask with 1.
974 if (ZExtCondReg == 0) {
975 Register CondReg = getRegForValue(BI->getCondition());
976 if (CondReg == 0)
977 return false;
978
979 ZExtCondReg = emitIntExt(MVT::i1, CondReg, MVT::i32, true);
980 if (ZExtCondReg == 0)
981 return false;
982 }
983
984 BuildMI(*BrBB, FuncInfo.InsertPt, MIMD, TII.get(Mips::BGTZ))
985 .addReg(ZExtCondReg)
986 .addMBB(TBB);
987 finishCondBranch(BI->getParent(), TBB, FBB);
988 return true;
989}
990
991bool MipsFastISel::selectCmp(const Instruction *I) {
992 const CmpInst *CI = cast<CmpInst>(I);
993 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
994 if (!emitCmp(ResultReg, CI))
995 return false;
996 updateValueMap(I, ResultReg);
997 return true;
998}
999
1000// Attempt to fast-select a floating-point extend instruction.
1001bool MipsFastISel::selectFPExt(const Instruction *I) {
1002 if (UnsupportedFPMode)
1003 return false;
1004 Value *Src = I->getOperand(0);
1005 EVT SrcVT = TLI.getValueType(DL, Src->getType(), true);
1006 EVT DestVT = TLI.getValueType(DL, I->getType(), true);
1007
1008 if (SrcVT != MVT::f32 || DestVT != MVT::f64)
1009 return false;
1010
1011 Register SrcReg =
1012 getRegForValue(Src); // this must be a 32bit floating point register class
1013 // maybe we should handle this differently
1014 if (!SrcReg)
1015 return false;
1016
1017 Register DestReg = createResultReg(&Mips::AFGR64RegClass);
1018 emitInst(Mips::CVT_D32_S, DestReg).addReg(SrcReg);
1019 updateValueMap(I, DestReg);
1020 return true;
1021}
1022
1023bool MipsFastISel::selectSelect(const Instruction *I) {
1024 assert(isa<SelectInst>(I) && "Expected a select instruction.");
1025
1026 LLVM_DEBUG(dbgs() << "selectSelect\n");
1027
1028 MVT VT;
1029 if (!isTypeSupported(I->getType(), VT) || UnsupportedFPMode) {
1030 LLVM_DEBUG(
1031 dbgs() << ".. .. gave up (!isTypeSupported || UnsupportedFPMode)\n");
1032 return false;
1033 }
1034
1035 unsigned CondMovOpc;
1036 const TargetRegisterClass *RC;
1037
1038 if (VT.isInteger() && !VT.isVector() && VT.getSizeInBits() <= 32) {
1039 CondMovOpc = Mips::MOVN_I_I;
1040 RC = &Mips::GPR32RegClass;
1041 } else if (VT == MVT::f32) {
1042 CondMovOpc = Mips::MOVN_I_S;
1043 RC = &Mips::FGR32RegClass;
1044 } else if (VT == MVT::f64) {
1045 CondMovOpc = Mips::MOVN_I_D32;
1046 RC = &Mips::AFGR64RegClass;
1047 } else
1048 return false;
1049
1050 const SelectInst *SI = cast<SelectInst>(I);
1051 const Value *Cond = SI->getCondition();
1052 Register Src1Reg = getRegForValue(SI->getTrueValue());
1053 Register Src2Reg = getRegForValue(SI->getFalseValue());
1054 Register CondReg = getRegForValue(Cond);
1055
1056 if (!Src1Reg || !Src2Reg || !CondReg)
1057 return false;
1058
1059 Register ZExtCondReg = createResultReg(&Mips::GPR32RegClass);
1060 if (!ZExtCondReg)
1061 return false;
1062
1063 if (!emitIntExt(MVT::i1, CondReg, MVT::i32, ZExtCondReg, true))
1064 return false;
1065
1066 Register ResultReg = createResultReg(RC);
1067 Register TempReg = createResultReg(RC);
1068
1069 if (!ResultReg || !TempReg)
1070 return false;
1071
1072 emitInst(TargetOpcode::COPY, TempReg).addReg(Src2Reg);
1073 emitInst(CondMovOpc, ResultReg)
1074 .addReg(Src1Reg).addReg(ZExtCondReg).addReg(TempReg);
1075 updateValueMap(I, ResultReg);
1076 return true;
1077}
1078
1079// Attempt to fast-select a floating-point truncate instruction.
1080bool MipsFastISel::selectFPTrunc(const Instruction *I) {
1081 if (UnsupportedFPMode)
1082 return false;
1083 Value *Src = I->getOperand(0);
1084 EVT SrcVT = TLI.getValueType(DL, Src->getType(), true);
1085 EVT DestVT = TLI.getValueType(DL, I->getType(), true);
1086
1087 if (SrcVT != MVT::f64 || DestVT != MVT::f32)
1088 return false;
1089
1090 Register SrcReg = getRegForValue(Src);
1091 if (!SrcReg)
1092 return false;
1093
1094 Register DestReg = createResultReg(&Mips::FGR32RegClass);
1095 if (!DestReg)
1096 return false;
1097
1098 emitInst(Mips::CVT_S_D32, DestReg).addReg(SrcReg);
1099 updateValueMap(I, DestReg);
1100 return true;
1101}
1102
1103// Attempt to fast-select a floating-point-to-integer conversion.
1104bool MipsFastISel::selectFPToInt(const Instruction *I, bool IsSigned) {
1105 if (UnsupportedFPMode)
1106 return false;
1107 MVT DstVT, SrcVT;
1108 if (!IsSigned)
1109 return false; // We don't handle this case yet. There is no native
1110 // instruction for this but it can be synthesized.
1111 Type *DstTy = I->getType();
1112 if (!isTypeLegal(DstTy, DstVT))
1113 return false;
1114
1115 if (DstVT != MVT::i32)
1116 return false;
1117
1118 Value *Src = I->getOperand(0);
1119 Type *SrcTy = Src->getType();
1120 if (!isTypeLegal(SrcTy, SrcVT))
1121 return false;
1122
1123 if (SrcVT != MVT::f32 && SrcVT != MVT::f64)
1124 return false;
1125
1126 Register SrcReg = getRegForValue(Src);
1127 if (SrcReg == 0)
1128 return false;
1129
1130 // Determine the opcode for the conversion, which takes place
1131 // entirely within FPRs.
1132 Register DestReg = createResultReg(&Mips::GPR32RegClass);
1133 Register TempReg = createResultReg(&Mips::FGR32RegClass);
1134 unsigned Opc = (SrcVT == MVT::f32) ? Mips::TRUNC_W_S : Mips::TRUNC_W_D32;
1135
1136 // Generate the convert.
1137 emitInst(Opc, TempReg).addReg(SrcReg);
1138 emitInst(Mips::MFC1, DestReg).addReg(TempReg);
1139
1140 updateValueMap(I, DestReg);
1141 return true;
1142}
1143
1144bool MipsFastISel::processCallArgs(CallLoweringInfo &CLI,
1145 SmallVectorImpl<MVT> &OutVTs,
1146 unsigned &NumBytes) {
1147 CallingConv::ID CC = CLI.CallConv;
1150 for (const ArgListEntry &Arg : CLI.Args)
1151 ArgTys.push_back(Arg.Val->getType());
1152 CCState CCInfo(CC, false, *FuncInfo.MF, ArgLocs, *Context);
1153 CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, ArgTys,
1154 CCAssignFnForCall(CC));
1155 // Get a count of how many bytes are to be pushed on the stack.
1156 NumBytes = CCInfo.getStackSize();
1157 // This is the minimum argument area used for A0-A3.
1158 if (NumBytes < 16)
1159 NumBytes = 16;
1160
1161 emitInst(Mips::ADJCALLSTACKDOWN).addImm(16).addImm(0);
1162 // Process the args.
1163 MVT firstMVT;
1164 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1165 CCValAssign &VA = ArgLocs[i];
1166 const Value *ArgVal = CLI.OutVals[VA.getValNo()];
1167 MVT ArgVT = OutVTs[VA.getValNo()];
1168
1169 if (i == 0) {
1170 firstMVT = ArgVT;
1171 if (ArgVT == MVT::f32) {
1172 VA.convertToReg(Mips::F12);
1173 } else if (ArgVT == MVT::f64) {
1174 if (Subtarget->isFP64bit())
1175 VA.convertToReg(Mips::D6_64);
1176 else
1177 VA.convertToReg(Mips::D6);
1178 }
1179 } else if (i == 1) {
1180 if ((firstMVT == MVT::f32) || (firstMVT == MVT::f64)) {
1181 if (ArgVT == MVT::f32) {
1182 VA.convertToReg(Mips::F14);
1183 } else if (ArgVT == MVT::f64) {
1184 if (Subtarget->isFP64bit())
1185 VA.convertToReg(Mips::D7_64);
1186 else
1187 VA.convertToReg(Mips::D7);
1188 }
1189 }
1190 }
1191 if (((ArgVT == MVT::i32) || (ArgVT == MVT::f32) || (ArgVT == MVT::i16) ||
1192 (ArgVT == MVT::i8)) &&
1193 VA.isMemLoc()) {
1194 switch (VA.getLocMemOffset()) {
1195 case 0:
1196 VA.convertToReg(Mips::A0);
1197 break;
1198 case 4:
1199 VA.convertToReg(Mips::A1);
1200 break;
1201 case 8:
1202 VA.convertToReg(Mips::A2);
1203 break;
1204 case 12:
1205 VA.convertToReg(Mips::A3);
1206 break;
1207 default:
1208 break;
1209 }
1210 }
1211 Register ArgReg = getRegForValue(ArgVal);
1212 if (!ArgReg)
1213 return false;
1214
1215 // Handle arg promotion: SExt, ZExt, AExt.
1216 switch (VA.getLocInfo()) {
1217 case CCValAssign::Full:
1218 break;
1219 case CCValAssign::AExt:
1220 case CCValAssign::SExt: {
1221 MVT DestVT = VA.getLocVT();
1222 MVT SrcVT = ArgVT;
1223 ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/false);
1224 if (!ArgReg)
1225 return false;
1226 break;
1227 }
1228 case CCValAssign::ZExt: {
1229 MVT DestVT = VA.getLocVT();
1230 MVT SrcVT = ArgVT;
1231 ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/true);
1232 if (!ArgReg)
1233 return false;
1234 break;
1235 }
1236 default:
1237 llvm_unreachable("Unknown arg promotion!");
1238 }
1239
1240 // Now copy/store arg to correct locations.
1241 if (VA.isRegLoc() && !VA.needsCustom()) {
1242 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1243 TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg);
1244 CLI.OutRegs.push_back(VA.getLocReg());
1245 } else if (VA.needsCustom()) {
1246 llvm_unreachable("Mips does not use custom args.");
1247 return false;
1248 } else {
1249 //
1250 // FIXME: This path will currently return false. It was copied
1251 // from the AArch64 port and should be essentially fine for Mips too.
1252 // The work to finish up this path will be done in a follow-on patch.
1253 //
1254 assert(VA.isMemLoc() && "Assuming store on stack.");
1255 // Don't emit stores for undef values.
1256 if (isa<UndefValue>(ArgVal))
1257 continue;
1258
1259 // Need to store on the stack.
1260 // FIXME: This alignment is incorrect but this path is disabled
1261 // for now (will return false). We need to determine the right alignment
1262 // based on the normal alignment for the underlying machine type.
1263 //
1264 unsigned ArgSize = alignTo(ArgVT.getSizeInBits(), 4);
1265
1266 unsigned BEAlign = 0;
1267 if (ArgSize < 8 && !Subtarget->isLittle())
1268 BEAlign = 8 - ArgSize;
1269
1270 Address Addr;
1271 Addr.setKind(Address::RegBase);
1272 Addr.setReg(Mips::SP);
1273 Addr.setOffset(VA.getLocMemOffset() + BEAlign);
1274
1275 Align Alignment = DL.getABITypeAlign(ArgVal->getType());
1276 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
1277 MachinePointerInfo::getStack(*FuncInfo.MF, Addr.getOffset()),
1278 MachineMemOperand::MOStore, ArgVT.getStoreSize(), Alignment);
1279 (void)(MMO);
1280 // if (!emitStore(ArgVT, ArgReg, Addr, MMO))
1281 return false; // can't store on the stack yet.
1282 }
1283 }
1284
1285 return true;
1286}
1287
1288bool MipsFastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT,
1289 unsigned NumBytes) {
1290 CallingConv::ID CC = CLI.CallConv;
1291 emitInst(Mips::ADJCALLSTACKUP).addImm(16).addImm(0);
1292 if (RetVT != MVT::isVoid) {
1294 MipsCCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context);
1295
1296 CCInfo.AnalyzeCallResult(CLI.Ins, RetCC_Mips);
1297
1298 // Only handle a single return value.
1299 if (RVLocs.size() != 1)
1300 return false;
1301 // Copy all of the result registers out of their specified physreg.
1302 MVT CopyVT = RVLocs[0].getValVT();
1303 // Special handling for extended integers.
1304 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
1305 CopyVT = MVT::i32;
1306
1307 Register ResultReg = createResultReg(TLI.getRegClassFor(CopyVT));
1308 if (!ResultReg)
1309 return false;
1310 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1311 TII.get(TargetOpcode::COPY),
1312 ResultReg).addReg(RVLocs[0].getLocReg());
1313 CLI.InRegs.push_back(RVLocs[0].getLocReg());
1314
1315 CLI.ResultReg = ResultReg;
1316 CLI.NumResultRegs = 1;
1317 }
1318 return true;
1319}
1320
1321bool MipsFastISel::fastLowerArguments() {
1322 LLVM_DEBUG(dbgs() << "fastLowerArguments\n");
1323
1324 if (!FuncInfo.CanLowerReturn) {
1325 LLVM_DEBUG(dbgs() << ".. gave up (!CanLowerReturn)\n");
1326 return false;
1327 }
1328
1329 const Function *F = FuncInfo.Fn;
1330 if (F->isVarArg()) {
1331 LLVM_DEBUG(dbgs() << ".. gave up (varargs)\n");
1332 return false;
1333 }
1334
1335 CallingConv::ID CC = F->getCallingConv();
1336 if (CC != CallingConv::C) {
1337 LLVM_DEBUG(dbgs() << ".. gave up (calling convention is not C)\n");
1338 return false;
1339 }
1340
1341 std::array<MCPhysReg, 4> GPR32ArgRegs = {{Mips::A0, Mips::A1, Mips::A2,
1342 Mips::A3}};
1343 std::array<MCPhysReg, 2> FGR32ArgRegs = {{Mips::F12, Mips::F14}};
1344 std::array<MCPhysReg, 2> AFGR64ArgRegs = {{Mips::D6, Mips::D7}};
1345 auto NextGPR32 = GPR32ArgRegs.begin();
1346 auto NextFGR32 = FGR32ArgRegs.begin();
1347 auto NextAFGR64 = AFGR64ArgRegs.begin();
1348
1349 struct AllocatedReg {
1350 const TargetRegisterClass *RC;
1351 unsigned Reg;
1352 AllocatedReg(const TargetRegisterClass *RC, unsigned Reg)
1353 : RC(RC), Reg(Reg) {}
1354 };
1355
1356 // Only handle simple cases. i.e. All arguments are directly mapped to
1357 // registers of the appropriate type.
1359 for (const auto &FormalArg : F->args()) {
1360 if (FormalArg.hasAttribute(Attribute::InReg) ||
1361 FormalArg.hasAttribute(Attribute::StructRet) ||
1362 FormalArg.hasAttribute(Attribute::ByVal)) {
1363 LLVM_DEBUG(dbgs() << ".. gave up (inreg, structret, byval)\n");
1364 return false;
1365 }
1366
1367 Type *ArgTy = FormalArg.getType();
1368 if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy()) {
1369 LLVM_DEBUG(dbgs() << ".. gave up (struct, array, or vector)\n");
1370 return false;
1371 }
1372
1373 EVT ArgVT = TLI.getValueType(DL, ArgTy);
1374 LLVM_DEBUG(dbgs() << ".. " << FormalArg.getArgNo() << ": "
1375 << ArgVT << "\n");
1376 if (!ArgVT.isSimple()) {
1377 LLVM_DEBUG(dbgs() << ".. .. gave up (not a simple type)\n");
1378 return false;
1379 }
1380
1381 switch (ArgVT.getSimpleVT().SimpleTy) {
1382 case MVT::i1:
1383 case MVT::i8:
1384 case MVT::i16:
1385 if (!FormalArg.hasAttribute(Attribute::SExt) &&
1386 !FormalArg.hasAttribute(Attribute::ZExt)) {
1387 // It must be any extend, this shouldn't happen for clang-generated IR
1388 // so just fall back on SelectionDAG.
1389 LLVM_DEBUG(dbgs() << ".. .. gave up (i8/i16 arg is not extended)\n");
1390 return false;
1391 }
1392
1393 if (NextGPR32 == GPR32ArgRegs.end()) {
1394 LLVM_DEBUG(dbgs() << ".. .. gave up (ran out of GPR32 arguments)\n");
1395 return false;
1396 }
1397
1398 LLVM_DEBUG(dbgs() << ".. .. GPR32(" << *NextGPR32 << ")\n");
1399 Allocation.emplace_back(&Mips::GPR32RegClass, *NextGPR32++);
1400
1401 // Allocating any GPR32 prohibits further use of floating point arguments.
1402 NextFGR32 = FGR32ArgRegs.end();
1403 NextAFGR64 = AFGR64ArgRegs.end();
1404 break;
1405
1406 case MVT::i32:
1407 if (FormalArg.hasAttribute(Attribute::ZExt)) {
1408 // The O32 ABI does not permit a zero-extended i32.
1409 LLVM_DEBUG(dbgs() << ".. .. gave up (i32 arg is zero extended)\n");
1410 return false;
1411 }
1412
1413 if (NextGPR32 == GPR32ArgRegs.end()) {
1414 LLVM_DEBUG(dbgs() << ".. .. gave up (ran out of GPR32 arguments)\n");
1415 return false;
1416 }
1417
1418 LLVM_DEBUG(dbgs() << ".. .. GPR32(" << *NextGPR32 << ")\n");
1419 Allocation.emplace_back(&Mips::GPR32RegClass, *NextGPR32++);
1420
1421 // Allocating any GPR32 prohibits further use of floating point arguments.
1422 NextFGR32 = FGR32ArgRegs.end();
1423 NextAFGR64 = AFGR64ArgRegs.end();
1424 break;
1425
1426 case MVT::f32:
1427 if (UnsupportedFPMode) {
1428 LLVM_DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode)\n");
1429 return false;
1430 }
1431 if (NextFGR32 == FGR32ArgRegs.end()) {
1432 LLVM_DEBUG(dbgs() << ".. .. gave up (ran out of FGR32 arguments)\n");
1433 return false;
1434 }
1435 LLVM_DEBUG(dbgs() << ".. .. FGR32(" << *NextFGR32 << ")\n");
1436 Allocation.emplace_back(&Mips::FGR32RegClass, *NextFGR32++);
1437 // Allocating an FGR32 also allocates the super-register AFGR64, and
1438 // ABI rules require us to skip the corresponding GPR32.
1439 if (NextGPR32 != GPR32ArgRegs.end())
1440 NextGPR32++;
1441 if (NextAFGR64 != AFGR64ArgRegs.end())
1442 NextAFGR64++;
1443 break;
1444
1445 case MVT::f64:
1446 if (UnsupportedFPMode) {
1447 LLVM_DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode)\n");
1448 return false;
1449 }
1450 if (NextAFGR64 == AFGR64ArgRegs.end()) {
1451 LLVM_DEBUG(dbgs() << ".. .. gave up (ran out of AFGR64 arguments)\n");
1452 return false;
1453 }
1454 LLVM_DEBUG(dbgs() << ".. .. AFGR64(" << *NextAFGR64 << ")\n");
1455 Allocation.emplace_back(&Mips::AFGR64RegClass, *NextAFGR64++);
1456 // Allocating an FGR32 also allocates the super-register AFGR64, and
1457 // ABI rules require us to skip the corresponding GPR32 pair.
1458 if (NextGPR32 != GPR32ArgRegs.end())
1459 NextGPR32++;
1460 if (NextGPR32 != GPR32ArgRegs.end())
1461 NextGPR32++;
1462 if (NextFGR32 != FGR32ArgRegs.end())
1463 NextFGR32++;
1464 break;
1465
1466 default:
1467 LLVM_DEBUG(dbgs() << ".. .. gave up (unknown type)\n");
1468 return false;
1469 }
1470 }
1471
1472 for (const auto &FormalArg : F->args()) {
1473 unsigned ArgNo = FormalArg.getArgNo();
1474 unsigned SrcReg = Allocation[ArgNo].Reg;
1475 Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, Allocation[ArgNo].RC);
1476 // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
1477 // Without this, EmitLiveInCopies may eliminate the livein if its only
1478 // use is a bitcast (which isn't turned into an instruction).
1479 Register ResultReg = createResultReg(Allocation[ArgNo].RC);
1480 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1481 TII.get(TargetOpcode::COPY), ResultReg)
1482 .addReg(DstReg, getKillRegState(true));
1483 updateValueMap(&FormalArg, ResultReg);
1484 }
1485
1486 // Calculate the size of the incoming arguments area.
1487 // We currently reject all the cases where this would be non-zero.
1488 unsigned IncomingArgSizeInBytes = 0;
1489
1490 // Account for the reserved argument area on ABI's that have one (O32).
1491 // It seems strange to do this on the caller side but it's necessary in
1492 // SelectionDAG's implementation.
1493 IncomingArgSizeInBytes = std::min(getABI().GetCalleeAllocdArgSizeInBytes(CC),
1494 IncomingArgSizeInBytes);
1495
1496 MF->getInfo<MipsFunctionInfo>()->setFormalArgInfo(IncomingArgSizeInBytes,
1497 false);
1498
1499 return true;
1500}
1501
1502bool MipsFastISel::fastLowerCall(CallLoweringInfo &CLI) {
1503 CallingConv::ID CC = CLI.CallConv;
1504 bool IsTailCall = CLI.IsTailCall;
1505 bool IsVarArg = CLI.IsVarArg;
1506 const Value *Callee = CLI.Callee;
1507 MCSymbol *Symbol = CLI.Symbol;
1508
1509 // Do not handle FastCC.
1510 if (CC == CallingConv::Fast)
1511 return false;
1512
1513 // Allow SelectionDAG isel to handle tail calls.
1514 if (IsTailCall)
1515 return false;
1516
1517 // Let SDISel handle vararg functions.
1518 if (IsVarArg)
1519 return false;
1520
1521 // FIXME: Only handle *simple* calls for now.
1522 MVT RetVT;
1523 if (CLI.RetTy->isVoidTy())
1524 RetVT = MVT::isVoid;
1525 else if (!isTypeSupported(CLI.RetTy, RetVT))
1526 return false;
1527
1528 for (auto Flag : CLI.OutFlags)
1529 if (Flag.isInReg() || Flag.isSRet() || Flag.isNest() || Flag.isByVal())
1530 return false;
1531
1532 // Set up the argument vectors.
1533 SmallVector<MVT, 16> OutVTs;
1534 OutVTs.reserve(CLI.OutVals.size());
1535
1536 for (auto *Val : CLI.OutVals) {
1537 MVT VT;
1538 if (!isTypeLegal(Val->getType(), VT) &&
1539 !(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16))
1540 return false;
1541
1542 // We don't handle vector parameters yet.
1543 if (VT.isVector() || VT.getSizeInBits() > 64)
1544 return false;
1545
1546 OutVTs.push_back(VT);
1547 }
1548
1549 Address Addr;
1550 if (!computeCallAddress(Callee, Addr))
1551 return false;
1552
1553 // Handle the arguments now that we've gotten them.
1554 unsigned NumBytes;
1555 if (!processCallArgs(CLI, OutVTs, NumBytes))
1556 return false;
1557
1558 if (!Addr.getGlobalValue())
1559 return false;
1560
1561 // Issue the call.
1562 unsigned DestAddress;
1563 if (Symbol)
1564 DestAddress = materializeExternalCallSym(Symbol);
1565 else
1566 DestAddress = materializeGV(Addr.getGlobalValue(), MVT::i32);
1567 emitInst(TargetOpcode::COPY, Mips::T9).addReg(DestAddress);
1568 MachineInstrBuilder MIB =
1569 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Mips::JALR),
1570 Mips::RA).addReg(Mips::T9);
1571
1572 // Add implicit physical register uses to the call.
1573 for (auto Reg : CLI.OutRegs)
1575
1576 // Add a register mask with the call-preserved registers.
1577 // Proper defs for return values will be added by setPhysRegsDeadExcept().
1578 MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
1579
1580 CLI.Call = MIB;
1581
1582 if (EmitJalrReloc && !Subtarget->inMips16Mode()) {
1583 // Attach callee address to the instruction, let asm printer emit
1584 // .reloc R_MIPS_JALR.
1585 if (Symbol)
1586 MIB.addSym(Symbol, MipsII::MO_JALR);
1587 else
1588 MIB.addSym(FuncInfo.MF->getContext().getOrCreateSymbol(
1589 Addr.getGlobalValue()->getName()), MipsII::MO_JALR);
1590 }
1591
1592 // Finish off the call including any return values.
1593 return finishCall(CLI, RetVT, NumBytes);
1594}
1595
1596bool MipsFastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
1597 switch (II->getIntrinsicID()) {
1598 default:
1599 return false;
1600 case Intrinsic::bswap: {
1601 Type *RetTy = II->getCalledFunction()->getReturnType();
1602
1603 MVT VT;
1604 if (!isTypeSupported(RetTy, VT))
1605 return false;
1606
1607 Register SrcReg = getRegForValue(II->getOperand(0));
1608 if (SrcReg == 0)
1609 return false;
1610 Register DestReg = createResultReg(&Mips::GPR32RegClass);
1611 if (DestReg == 0)
1612 return false;
1613 if (VT == MVT::i16) {
1614 if (Subtarget->hasMips32r2()) {
1615 emitInst(Mips::WSBH, DestReg).addReg(SrcReg);
1616 updateValueMap(II, DestReg);
1617 return true;
1618 } else {
1619 unsigned TempReg[3];
1620 for (unsigned &R : TempReg) {
1621 R = createResultReg(&Mips::GPR32RegClass);
1622 if (R == 0)
1623 return false;
1624 }
1625 emitInst(Mips::SLL, TempReg[0]).addReg(SrcReg).addImm(8);
1626 emitInst(Mips::SRL, TempReg[1]).addReg(SrcReg).addImm(8);
1627 emitInst(Mips::ANDi, TempReg[2]).addReg(TempReg[1]).addImm(0xFF);
1628 emitInst(Mips::OR, DestReg).addReg(TempReg[0]).addReg(TempReg[2]);
1629 updateValueMap(II, DestReg);
1630 return true;
1631 }
1632 } else if (VT == MVT::i32) {
1633 if (Subtarget->hasMips32r2()) {
1634 Register TempReg = createResultReg(&Mips::GPR32RegClass);
1635 emitInst(Mips::WSBH, TempReg).addReg(SrcReg);
1636 emitInst(Mips::ROTR, DestReg).addReg(TempReg).addImm(16);
1637 updateValueMap(II, DestReg);
1638 return true;
1639 } else {
1640 unsigned TempReg[8];
1641 for (unsigned &R : TempReg) {
1642 R = createResultReg(&Mips::GPR32RegClass);
1643 if (R == 0)
1644 return false;
1645 }
1646
1647 emitInst(Mips::SRL, TempReg[0]).addReg(SrcReg).addImm(8);
1648 emitInst(Mips::SRL, TempReg[1]).addReg(SrcReg).addImm(24);
1649 emitInst(Mips::ANDi, TempReg[2]).addReg(TempReg[0]).addImm(0xFF00);
1650 emitInst(Mips::OR, TempReg[3]).addReg(TempReg[1]).addReg(TempReg[2]);
1651
1652 emitInst(Mips::ANDi, TempReg[4]).addReg(SrcReg).addImm(0xFF00);
1653 emitInst(Mips::SLL, TempReg[5]).addReg(TempReg[4]).addImm(8);
1654
1655 emitInst(Mips::SLL, TempReg[6]).addReg(SrcReg).addImm(24);
1656 emitInst(Mips::OR, TempReg[7]).addReg(TempReg[3]).addReg(TempReg[5]);
1657 emitInst(Mips::OR, DestReg).addReg(TempReg[6]).addReg(TempReg[7]);
1658 updateValueMap(II, DestReg);
1659 return true;
1660 }
1661 }
1662 return false;
1663 }
1664 case Intrinsic::memcpy:
1665 case Intrinsic::memmove: {
1666 const auto *MTI = cast<MemTransferInst>(II);
1667 // Don't handle volatile.
1668 if (MTI->isVolatile())
1669 return false;
1670 if (!MTI->getLength()->getType()->isIntegerTy(32))
1671 return false;
1672 const char *IntrMemName = isa<MemCpyInst>(II) ? "memcpy" : "memmove";
1673 return lowerCallTo(II, IntrMemName, II->arg_size() - 1);
1674 }
1675 case Intrinsic::memset: {
1676 const MemSetInst *MSI = cast<MemSetInst>(II);
1677 // Don't handle volatile.
1678 if (MSI->isVolatile())
1679 return false;
1680 if (!MSI->getLength()->getType()->isIntegerTy(32))
1681 return false;
1682 return lowerCallTo(II, "memset", II->arg_size() - 1);
1683 }
1684 }
1685 return false;
1686}
1687
1688bool MipsFastISel::selectRet(const Instruction *I) {
1689 const Function &F = *I->getParent()->getParent();
1690 const ReturnInst *Ret = cast<ReturnInst>(I);
1691
1692 LLVM_DEBUG(dbgs() << "selectRet\n");
1693
1694 if (!FuncInfo.CanLowerReturn)
1695 return false;
1696
1697 // Build a list of return value registers.
1698 SmallVector<unsigned, 4> RetRegs;
1699
1700 if (Ret->getNumOperands() > 0) {
1701 CallingConv::ID CC = F.getCallingConv();
1702
1703 // Do not handle FastCC.
1704 if (CC == CallingConv::Fast)
1705 return false;
1706
1708 GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
1709
1710 // Analyze operands of the call, assigning locations to each operand.
1712 MipsCCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs,
1713 I->getContext());
1714 CCAssignFn *RetCC = RetCC_Mips;
1715 CCInfo.AnalyzeReturn(Outs, RetCC);
1716
1717 // Only handle a single return value for now.
1718 if (ValLocs.size() != 1)
1719 return false;
1720
1721 CCValAssign &VA = ValLocs[0];
1722 const Value *RV = Ret->getOperand(0);
1723
1724 // Don't bother handling odd stuff for now.
1725 if ((VA.getLocInfo() != CCValAssign::Full) &&
1726 (VA.getLocInfo() != CCValAssign::BCvt))
1727 return false;
1728
1729 // Only handle register returns for now.
1730 if (!VA.isRegLoc())
1731 return false;
1732
1733 Register Reg = getRegForValue(RV);
1734 if (Reg == 0)
1735 return false;
1736
1737 unsigned SrcReg = Reg + VA.getValNo();
1738 Register DestReg = VA.getLocReg();
1739 // Avoid a cross-class copy. This is very unlikely.
1740 if (!MRI.getRegClass(SrcReg)->contains(DestReg))
1741 return false;
1742
1743 EVT RVEVT = TLI.getValueType(DL, RV->getType());
1744 if (!RVEVT.isSimple())
1745 return false;
1746
1747 if (RVEVT.isVector())
1748 return false;
1749
1750 MVT RVVT = RVEVT.getSimpleVT();
1751 if (RVVT == MVT::f128)
1752 return false;
1753
1754 // Do not handle FGR64 returns for now.
1755 if (RVVT == MVT::f64 && UnsupportedFPMode) {
1756 LLVM_DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode\n");
1757 return false;
1758 }
1759
1760 MVT DestVT = VA.getValVT();
1761 // Special handling for extended integers.
1762 if (RVVT != DestVT) {
1763 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
1764 return false;
1765
1766 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) {
1767 bool IsZExt = Outs[0].Flags.isZExt();
1768 SrcReg = emitIntExt(RVVT, SrcReg, DestVT, IsZExt);
1769 if (SrcReg == 0)
1770 return false;
1771 }
1772 }
1773
1774 // Make the copy.
1775 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1776 TII.get(TargetOpcode::COPY), DestReg).addReg(SrcReg);
1777
1778 // Add register to return instruction.
1779 RetRegs.push_back(VA.getLocReg());
1780 }
1781 MachineInstrBuilder MIB = emitInst(Mips::RetRA);
1782 for (unsigned Reg : RetRegs)
1784 return true;
1785}
1786
1787bool MipsFastISel::selectTrunc(const Instruction *I) {
1788 // The high bits for a type smaller than the register size are assumed to be
1789 // undefined.
1790 Value *Op = I->getOperand(0);
1791
1792 EVT SrcVT, DestVT;
1793 SrcVT = TLI.getValueType(DL, Op->getType(), true);
1794 DestVT = TLI.getValueType(DL, I->getType(), true);
1795
1796 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
1797 return false;
1798 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
1799 return false;
1800
1801 Register SrcReg = getRegForValue(Op);
1802 if (!SrcReg)
1803 return false;
1804
1805 // Because the high bits are undefined, a truncate doesn't generate
1806 // any code.
1807 updateValueMap(I, SrcReg);
1808 return true;
1809}
1810
1811bool MipsFastISel::selectIntExt(const Instruction *I) {
1812 Type *DestTy = I->getType();
1813 Value *Src = I->getOperand(0);
1814 Type *SrcTy = Src->getType();
1815
1816 bool isZExt = isa<ZExtInst>(I);
1817 Register SrcReg = getRegForValue(Src);
1818 if (!SrcReg)
1819 return false;
1820
1821 EVT SrcEVT, DestEVT;
1822 SrcEVT = TLI.getValueType(DL, SrcTy, true);
1823 DestEVT = TLI.getValueType(DL, DestTy, true);
1824 if (!SrcEVT.isSimple())
1825 return false;
1826 if (!DestEVT.isSimple())
1827 return false;
1828
1829 MVT SrcVT = SrcEVT.getSimpleVT();
1830 MVT DestVT = DestEVT.getSimpleVT();
1831 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
1832
1833 if (!emitIntExt(SrcVT, SrcReg, DestVT, ResultReg, isZExt))
1834 return false;
1835 updateValueMap(I, ResultReg);
1836 return true;
1837}
1838
1839bool MipsFastISel::emitIntSExt32r1(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1840 unsigned DestReg) {
1841 unsigned ShiftAmt;
1842 switch (SrcVT.SimpleTy) {
1843 default:
1844 return false;
1845 case MVT::i8:
1846 ShiftAmt = 24;
1847 break;
1848 case MVT::i16:
1849 ShiftAmt = 16;
1850 break;
1851 }
1852 Register TempReg = createResultReg(&Mips::GPR32RegClass);
1853 emitInst(Mips::SLL, TempReg).addReg(SrcReg).addImm(ShiftAmt);
1854 emitInst(Mips::SRA, DestReg).addReg(TempReg).addImm(ShiftAmt);
1855 return true;
1856}
1857
1858bool MipsFastISel::emitIntSExt32r2(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1859 unsigned DestReg) {
1860 switch (SrcVT.SimpleTy) {
1861 default:
1862 return false;
1863 case MVT::i8:
1864 emitInst(Mips::SEB, DestReg).addReg(SrcReg);
1865 break;
1866 case MVT::i16:
1867 emitInst(Mips::SEH, DestReg).addReg(SrcReg);
1868 break;
1869 }
1870 return true;
1871}
1872
1873bool MipsFastISel::emitIntSExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1874 unsigned DestReg) {
1875 if ((DestVT != MVT::i32) && (DestVT != MVT::i16))
1876 return false;
1877 if (Subtarget->hasMips32r2())
1878 return emitIntSExt32r2(SrcVT, SrcReg, DestVT, DestReg);
1879 return emitIntSExt32r1(SrcVT, SrcReg, DestVT, DestReg);
1880}
1881
1882bool MipsFastISel::emitIntZExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1883 unsigned DestReg) {
1884 int64_t Imm;
1885
1886 switch (SrcVT.SimpleTy) {
1887 default:
1888 return false;
1889 case MVT::i1:
1890 Imm = 1;
1891 break;
1892 case MVT::i8:
1893 Imm = 0xff;
1894 break;
1895 case MVT::i16:
1896 Imm = 0xffff;
1897 break;
1898 }
1899
1900 emitInst(Mips::ANDi, DestReg).addReg(SrcReg).addImm(Imm);
1901 return true;
1902}
1903
1904bool MipsFastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1905 unsigned DestReg, bool IsZExt) {
1906 // FastISel does not have plumbing to deal with extensions where the SrcVT or
1907 // DestVT are odd things, so test to make sure that they are both types we can
1908 // handle (i1/i8/i16/i32 for SrcVT and i8/i16/i32/i64 for DestVT), otherwise
1909 // bail out to SelectionDAG.
1910 if (((DestVT != MVT::i8) && (DestVT != MVT::i16) && (DestVT != MVT::i32)) ||
1911 ((SrcVT != MVT::i1) && (SrcVT != MVT::i8) && (SrcVT != MVT::i16)))
1912 return false;
1913 if (IsZExt)
1914 return emitIntZExt(SrcVT, SrcReg, DestVT, DestReg);
1915 return emitIntSExt(SrcVT, SrcReg, DestVT, DestReg);
1916}
1917
1918unsigned MipsFastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1919 bool isZExt) {
1920 unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
1921 bool Success = emitIntExt(SrcVT, SrcReg, DestVT, DestReg, isZExt);
1922 return Success ? DestReg : 0;
1923}
1924
1925bool MipsFastISel::selectDivRem(const Instruction *I, unsigned ISDOpcode) {
1926 EVT DestEVT = TLI.getValueType(DL, I->getType(), true);
1927 if (!DestEVT.isSimple())
1928 return false;
1929
1930 MVT DestVT = DestEVT.getSimpleVT();
1931 if (DestVT != MVT::i32)
1932 return false;
1933
1934 unsigned DivOpc;
1935 switch (ISDOpcode) {
1936 default:
1937 return false;
1938 case ISD::SDIV:
1939 case ISD::SREM:
1940 DivOpc = Mips::SDIV;
1941 break;
1942 case ISD::UDIV:
1943 case ISD::UREM:
1944 DivOpc = Mips::UDIV;
1945 break;
1946 }
1947
1948 Register Src0Reg = getRegForValue(I->getOperand(0));
1949 Register Src1Reg = getRegForValue(I->getOperand(1));
1950 if (!Src0Reg || !Src1Reg)
1951 return false;
1952
1953 emitInst(DivOpc).addReg(Src0Reg).addReg(Src1Reg);
1954 if (!isa<ConstantInt>(I->getOperand(1)) ||
1955 dyn_cast<ConstantInt>(I->getOperand(1))->isZero()) {
1956 emitInst(Mips::TEQ).addReg(Src1Reg).addReg(Mips::ZERO).addImm(7);
1957 }
1958
1959 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
1960 if (!ResultReg)
1961 return false;
1962
1963 unsigned MFOpc = (ISDOpcode == ISD::SREM || ISDOpcode == ISD::UREM)
1964 ? Mips::MFHI
1965 : Mips::MFLO;
1966 emitInst(MFOpc, ResultReg);
1967
1968 updateValueMap(I, ResultReg);
1969 return true;
1970}
1971
1972bool MipsFastISel::selectShift(const Instruction *I) {
1973 MVT RetVT;
1974
1975 if (!isTypeSupported(I->getType(), RetVT))
1976 return false;
1977
1978 Register ResultReg = createResultReg(&Mips::GPR32RegClass);
1979 if (!ResultReg)
1980 return false;
1981
1982 unsigned Opcode = I->getOpcode();
1983 const Value *Op0 = I->getOperand(0);
1984 Register Op0Reg = getRegForValue(Op0);
1985 if (!Op0Reg)
1986 return false;
1987
1988 // If AShr or LShr, then we need to make sure the operand0 is sign extended.
1989 if (Opcode == Instruction::AShr || Opcode == Instruction::LShr) {
1990 Register TempReg = createResultReg(&Mips::GPR32RegClass);
1991 if (!TempReg)
1992 return false;
1993
1994 MVT Op0MVT = TLI.getValueType(DL, Op0->getType(), true).getSimpleVT();
1995 bool IsZExt = Opcode == Instruction::LShr;
1996 if (!emitIntExt(Op0MVT, Op0Reg, MVT::i32, TempReg, IsZExt))
1997 return false;
1998
1999 Op0Reg = TempReg;
2000 }
2001
2002 if (const auto *C = dyn_cast<ConstantInt>(I->getOperand(1))) {
2003 uint64_t ShiftVal = C->getZExtValue();
2004
2005 switch (Opcode) {
2006 default:
2007 llvm_unreachable("Unexpected instruction.");
2008 case Instruction::Shl:
2009 Opcode = Mips::SLL;
2010 break;
2011 case Instruction::AShr:
2012 Opcode = Mips::SRA;
2013 break;
2014 case Instruction::LShr:
2015 Opcode = Mips::SRL;
2016 break;
2017 }
2018
2019 emitInst(Opcode, ResultReg).addReg(Op0Reg).addImm(ShiftVal);
2020 updateValueMap(I, ResultReg);
2021 return true;
2022 }
2023
2024 Register Op1Reg = getRegForValue(I->getOperand(1));
2025 if (!Op1Reg)
2026 return false;
2027
2028 switch (Opcode) {
2029 default:
2030 llvm_unreachable("Unexpected instruction.");
2031 case Instruction::Shl:
2032 Opcode = Mips::SLLV;
2033 break;
2034 case Instruction::AShr:
2035 Opcode = Mips::SRAV;
2036 break;
2037 case Instruction::LShr:
2038 Opcode = Mips::SRLV;
2039 break;
2040 }
2041
2042 emitInst(Opcode, ResultReg).addReg(Op0Reg).addReg(Op1Reg);
2043 updateValueMap(I, ResultReg);
2044 return true;
2045}
2046
2047bool MipsFastISel::fastSelectInstruction(const Instruction *I) {
2048 switch (I->getOpcode()) {
2049 default:
2050 break;
2051 case Instruction::Load:
2052 return selectLoad(I);
2053 case Instruction::Store:
2054 return selectStore(I);
2055 case Instruction::SDiv:
2056 if (!selectBinaryOp(I, ISD::SDIV))
2057 return selectDivRem(I, ISD::SDIV);
2058 return true;
2059 case Instruction::UDiv:
2060 if (!selectBinaryOp(I, ISD::UDIV))
2061 return selectDivRem(I, ISD::UDIV);
2062 return true;
2063 case Instruction::SRem:
2064 if (!selectBinaryOp(I, ISD::SREM))
2065 return selectDivRem(I, ISD::SREM);
2066 return true;
2067 case Instruction::URem:
2068 if (!selectBinaryOp(I, ISD::UREM))
2069 return selectDivRem(I, ISD::UREM);
2070 return true;
2071 case Instruction::Shl:
2072 case Instruction::LShr:
2073 case Instruction::AShr:
2074 return selectShift(I);
2075 case Instruction::And:
2076 case Instruction::Or:
2077 case Instruction::Xor:
2078 return selectLogicalOp(I);
2079 case Instruction::Br:
2080 return selectBranch(I);
2081 case Instruction::Ret:
2082 return selectRet(I);
2083 case Instruction::Trunc:
2084 return selectTrunc(I);
2085 case Instruction::ZExt:
2086 case Instruction::SExt:
2087 return selectIntExt(I);
2088 case Instruction::FPTrunc:
2089 return selectFPTrunc(I);
2090 case Instruction::FPExt:
2091 return selectFPExt(I);
2092 case Instruction::FPToSI:
2093 return selectFPToInt(I, /*isSigned*/ true);
2094 case Instruction::FPToUI:
2095 return selectFPToInt(I, /*isSigned*/ false);
2096 case Instruction::ICmp:
2097 case Instruction::FCmp:
2098 return selectCmp(I);
2099 case Instruction::Select:
2100 return selectSelect(I);
2101 }
2102 return false;
2103}
2104
2105unsigned MipsFastISel::getRegEnsuringSimpleIntegerWidening(const Value *V,
2106 bool IsUnsigned) {
2107 Register VReg = getRegForValue(V);
2108 if (VReg == 0)
2109 return 0;
2110 MVT VMVT = TLI.getValueType(DL, V->getType(), true).getSimpleVT();
2111
2112 if (VMVT == MVT::i1)
2113 return 0;
2114
2115 if ((VMVT == MVT::i8) || (VMVT == MVT::i16)) {
2116 Register TempReg = createResultReg(&Mips::GPR32RegClass);
2117 if (!emitIntExt(VMVT, VReg, MVT::i32, TempReg, IsUnsigned))
2118 return 0;
2119 VReg = TempReg;
2120 }
2121 return VReg;
2122}
2123
2124void MipsFastISel::simplifyAddress(Address &Addr) {
2125 if (!isInt<16>(Addr.getOffset())) {
2126 unsigned TempReg =
2127 materialize32BitInt(Addr.getOffset(), &Mips::GPR32RegClass);
2128 Register DestReg = createResultReg(&Mips::GPR32RegClass);
2129 emitInst(Mips::ADDu, DestReg).addReg(TempReg).addReg(Addr.getReg());
2130 Addr.setReg(DestReg);
2131 Addr.setOffset(0);
2132 }
2133}
2134
2135unsigned MipsFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
2136 const TargetRegisterClass *RC,
2137 unsigned Op0, unsigned Op1) {
2138 // We treat the MUL instruction in a special way because it clobbers
2139 // the HI0 & LO0 registers. The TableGen definition of this instruction can
2140 // mark these registers only as implicitly defined. As a result, the
2141 // register allocator runs out of registers when this instruction is
2142 // followed by another instruction that defines the same registers too.
2143 // We can fix this by explicitly marking those registers as dead.
2144 if (MachineInstOpcode == Mips::MUL) {
2145 Register ResultReg = createResultReg(RC);
2146 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2147 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2148 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2149 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2150 .addReg(Op0)
2151 .addReg(Op1)
2154 return ResultReg;
2155 }
2156
2157 return FastISel::fastEmitInst_rr(MachineInstOpcode, RC, Op0, Op1);
2158}
2159
2160namespace llvm {
2161
2163 const TargetLibraryInfo *libInfo) {
2164 return new MipsFastISel(funcInfo, libInfo);
2165}
2166
2167} // end namespace llvm
unsigned const MachineRegisterInfo * MRI
#define Success
static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)
Select the AArch64 opcode for the basic binary operation GenericOpc (such as G_OR or G_SDIV),...
static void emitLoad(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos, const TargetInstrInfo &TII, unsigned Reg1, unsigned Reg2, int Offset, bool IsPostDec)
Emit a load-pair instruction for frame-destroy.
static void emitStore(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator Pos, const TargetInstrInfo &TII, unsigned Reg1, unsigned Reg2, int Offset, bool IsPreDec)
Emit a store-pair instruction for frame-setup.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
#define LLVM_ATTRIBUTE_UNUSED
Definition Compiler.h:298
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
This file defines the FastISel class.
const HexagonInstrInfo * TII
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
#define G(x, y, z)
Definition MD5.cpp:56
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
cl::opt< bool > EmitJalrReloc
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool CC_Mips(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State) LLVM_ATTRIBUTE_UNUSED
static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State, ArrayRef< MCPhysReg > F64Regs)
uint64_t IntrinsicInst * II
#define P(N)
if(PassOpts->AAPipeline)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file defines the SmallVector class.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
This file describes how to lower LLVM code to machine code.
Value * RHS
Value * LHS
APInt bitcastToAPInt() const
Definition APFloat.h:1353
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1540
an instruction to allocate memory on the stack
PointerType * getType() const
Overload to return most specific pointer type.
BasicBlock * getSuccessor(unsigned i) const
Value * getCondition() const
CCState - This class holds information needed while lowering arguments and return values.
void convertToReg(MCRegister Reg)
Register getLocReg() const
LocInfo getLocInfo() const
bool needsCustom() const
int64_t getLocMemOffset() const
unsigned getValNo() const
This class is the base class for the comparison instructions.
Definition InstrTypes.h:666
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:678
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:681
@ ICMP_SLT
signed less than
Definition InstrTypes.h:707
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:708
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:684
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:682
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:683
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:702
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:701
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:705
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:703
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:700
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:706
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:694
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:704
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:767
bool isUnsigned() const
Definition InstrTypes.h:938
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:277
const APFloat & getValueAPF() const
Definition Constants.h:320
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition Constants.h:169
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:163
This is an important base class in LLVM.
Definition Constant.h:43
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition FastISel.h:66
Register fastEmitInst_rr(unsigned MachineInstOpcode, const TargetRegisterClass *RC, Register Op0, Register Op1)
Emit a MachineInstr with two register operands and a result register in the given register class.
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
bool hasLocalLinkage() const
bool hasInternalLinkage() const
LLVM_ABI bool isAtomic() const LLVM_READONLY
Return true if this instruction has an AtomicOrdering of unordered or higher.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
Align getAlign() const
Return the alignment of the access that is being performed.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition MCSymbol.h:42
Machine Value Type.
SimpleValueType SimpleTy
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
Value * getLength() const
bool isVolatile() const
MipsFunctionInfo - This class is derived from MachineFunction private Mips target-specific informatio...
Register getGlobalBaseReg(MachineFunction &MF)
bool isFP64bit() const
bool useSoftFloat() const
const MipsInstrInfo * getInstrInfo() const override
bool inMips16Mode() const
bool systemSupportsUnalignedAccess() const
Does the system support unaligned memory access.
bool hasMips32r2() const
const MipsTargetLowering * getTargetLowering() const override
Wrapper class representing virtual and physical registers.
Definition Register.h:19
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void push_back(const T &Elt)
TypeSize getElementOffset(unsigned Idx) const
Definition DataLayout.h:652
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
const Use * const_op_iterator
Definition User.h:280
Value * getOperand(unsigned i) const
Definition User.h:232
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
TypeSize getSequentialElementStride(const DataLayout &DL) const
const ParentTy * getParent() const
Definition ilist_node.h:34
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:730
Flag
These should be considered private to the implementation of the MCInstrDesc class.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Dead
Unused definition.
@ Define
Register definition.
@ User
could "use" a pointer
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
Definition Utils.cpp:56
LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:174
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
generic_gep_type_iterator<> gep_type_iterator
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:198
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:155
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
gep_type_iterator gep_type_begin(const User *GEP)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:853
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:137
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:316
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:168
static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.