Thanks to visit codestin.com
Credit goes to llvm.org

LLVM 22.0.0git
X86InstrInfo.cpp
Go to the documentation of this file.
1//===-- X86InstrInfo.cpp - X86 Instruction Information --------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the X86 implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "X86InstrInfo.h"
14#include "X86.h"
15#include "X86InstrBuilder.h"
16#include "X86InstrFoldTables.h"
18#include "X86Subtarget.h"
19#include "X86TargetMachine.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/Sequence.h"
36#include "llvm/IR/Function.h"
37#include "llvm/IR/InstrTypes.h"
38#include "llvm/IR/Module.h"
39#include "llvm/MC/MCAsmInfo.h"
40#include "llvm/MC/MCExpr.h"
41#include "llvm/MC/MCInst.h"
43#include "llvm/Support/Debug.h"
47#include <optional>
48
49using namespace llvm;
50
51#define DEBUG_TYPE "x86-instr-info"
52
53#define GET_INSTRINFO_CTOR_DTOR
54#include "X86GenInstrInfo.inc"
55
57
58static cl::opt<bool>
59 NoFusing("disable-spill-fusing",
60 cl::desc("Disable fusing of spill code into instructions"),
62static cl::opt<bool>
63 PrintFailedFusing("print-failed-fuse-candidates",
64 cl::desc("Print instructions that the allocator wants to"
65 " fuse, but the X86 backend currently can't"),
67static cl::opt<bool>
68 ReMatPICStubLoad("remat-pic-stub-load",
69 cl::desc("Re-materialize load from stub in PIC mode"),
70 cl::init(false), cl::Hidden);
72 PartialRegUpdateClearance("partial-reg-update-clearance",
73 cl::desc("Clearance between two register writes "
74 "for inserting XOR to avoid partial "
75 "register update"),
76 cl::init(64), cl::Hidden);
78 "undef-reg-clearance",
79 cl::desc("How many idle instructions we would like before "
80 "certain undef register reads"),
81 cl::init(128), cl::Hidden);
82
83// Pin the vtable to this file.
84void X86InstrInfo::anchor() {}
85
87 : X86GenInstrInfo(STI,
88 (STI.isTarget64BitLP64() ? X86::ADJCALLSTACKDOWN64
89 : X86::ADJCALLSTACKDOWN32),
90 (STI.isTarget64BitLP64() ? X86::ADJCALLSTACKUP64
91 : X86::ADJCALLSTACKUP32),
92 X86::CATCHRET, (STI.is64Bit() ? X86::RET64 : X86::RET32)),
93 Subtarget(STI), RI(STI.getTargetTriple()) {}
94
97 const TargetRegisterInfo *TRI) const {
98 auto *RC = TargetInstrInfo::getRegClass(MCID, OpNum, TRI);
99 // If the target does not have egpr, then r16-r31 will be resereved for all
100 // instructions.
101 if (!RC || !Subtarget.hasEGPR())
102 return RC;
103
105 return RC;
106
107 const X86RegisterInfo *RI = Subtarget.getRegisterInfo();
108 return RI->constrainRegClassToNonRex2(RC);
109}
110
112 Register &SrcReg, Register &DstReg,
113 unsigned &SubIdx) const {
114 switch (MI.getOpcode()) {
115 default:
116 break;
117 case X86::MOVSX16rr8:
118 case X86::MOVZX16rr8:
119 case X86::MOVSX32rr8:
120 case X86::MOVZX32rr8:
121 case X86::MOVSX64rr8:
122 if (!Subtarget.is64Bit())
123 // It's not always legal to reference the low 8-bit of the larger
124 // register in 32-bit mode.
125 return false;
126 [[fallthrough]];
127 case X86::MOVSX32rr16:
128 case X86::MOVZX32rr16:
129 case X86::MOVSX64rr16:
130 case X86::MOVSX64rr32: {
131 if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg())
132 // Be conservative.
133 return false;
134 SrcReg = MI.getOperand(1).getReg();
135 DstReg = MI.getOperand(0).getReg();
136 switch (MI.getOpcode()) {
137 default:
138 llvm_unreachable("Unreachable!");
139 case X86::MOVSX16rr8:
140 case X86::MOVZX16rr8:
141 case X86::MOVSX32rr8:
142 case X86::MOVZX32rr8:
143 case X86::MOVSX64rr8:
144 SubIdx = X86::sub_8bit;
145 break;
146 case X86::MOVSX32rr16:
147 case X86::MOVZX32rr16:
148 case X86::MOVSX64rr16:
149 SubIdx = X86::sub_16bit;
150 break;
151 case X86::MOVSX64rr32:
152 SubIdx = X86::sub_32bit;
153 break;
154 }
155 return true;
156 }
157 }
158 return false;
159}
160
162 if (MI.mayLoad() || MI.mayStore())
163 return false;
164
165 // Some target-independent operations that trivially lower to data-invariant
166 // instructions.
167 if (MI.isCopyLike() || MI.isInsertSubreg())
168 return true;
169
170 unsigned Opcode = MI.getOpcode();
171 using namespace X86;
172 // On x86 it is believed that imul is constant time w.r.t. the loaded data.
173 // However, they set flags and are perhaps the most surprisingly constant
174 // time operations so we call them out here separately.
175 if (isIMUL(Opcode))
176 return true;
177 // Bit scanning and counting instructions that are somewhat surprisingly
178 // constant time as they scan across bits and do other fairly complex
179 // operations like popcnt, but are believed to be constant time on x86.
180 // However, these set flags.
181 if (isBSF(Opcode) || isBSR(Opcode) || isLZCNT(Opcode) || isPOPCNT(Opcode) ||
182 isTZCNT(Opcode))
183 return true;
184 // Bit manipulation instructions are effectively combinations of basic
185 // arithmetic ops, and should still execute in constant time. These also
186 // set flags.
187 if (isBLCFILL(Opcode) || isBLCI(Opcode) || isBLCIC(Opcode) ||
188 isBLCMSK(Opcode) || isBLCS(Opcode) || isBLSFILL(Opcode) ||
189 isBLSI(Opcode) || isBLSIC(Opcode) || isBLSMSK(Opcode) || isBLSR(Opcode) ||
190 isTZMSK(Opcode))
191 return true;
192 // Bit extracting and clearing instructions should execute in constant time,
193 // and set flags.
194 if (isBEXTR(Opcode) || isBZHI(Opcode))
195 return true;
196 // Shift and rotate.
197 if (isROL(Opcode) || isROR(Opcode) || isSAR(Opcode) || isSHL(Opcode) ||
198 isSHR(Opcode) || isSHLD(Opcode) || isSHRD(Opcode))
199 return true;
200 // Basic arithmetic is constant time on the input but does set flags.
201 if (isADC(Opcode) || isADD(Opcode) || isAND(Opcode) || isOR(Opcode) ||
202 isSBB(Opcode) || isSUB(Opcode) || isXOR(Opcode))
203 return true;
204 // Arithmetic with just 32-bit and 64-bit variants and no immediates.
205 if (isANDN(Opcode))
206 return true;
207 // Unary arithmetic operations.
208 if (isDEC(Opcode) || isINC(Opcode) || isNEG(Opcode))
209 return true;
210 // Unlike other arithmetic, NOT doesn't set EFLAGS.
211 if (isNOT(Opcode))
212 return true;
213 // Various move instructions used to zero or sign extend things. Note that we
214 // intentionally don't support the _NOREX variants as we can't handle that
215 // register constraint anyways.
216 if (isMOVSX(Opcode) || isMOVZX(Opcode) || isMOVSXD(Opcode) || isMOV(Opcode))
217 return true;
218 // Arithmetic instructions that are both constant time and don't set flags.
219 if (isRORX(Opcode) || isSARX(Opcode) || isSHLX(Opcode) || isSHRX(Opcode))
220 return true;
221 // LEA doesn't actually access memory, and its arithmetic is constant time.
222 if (isLEA(Opcode))
223 return true;
224 // By default, assume that the instruction is not data invariant.
225 return false;
226}
227
229 switch (MI.getOpcode()) {
230 default:
231 // By default, assume that the load will immediately leak.
232 return false;
233
234 // On x86 it is believed that imul is constant time w.r.t. the loaded data.
235 // However, they set flags and are perhaps the most surprisingly constant
236 // time operations so we call them out here separately.
237 case X86::IMUL16rm:
238 case X86::IMUL16rmi:
239 case X86::IMUL32rm:
240 case X86::IMUL32rmi:
241 case X86::IMUL64rm:
242 case X86::IMUL64rmi32:
243
244 // Bit scanning and counting instructions that are somewhat surprisingly
245 // constant time as they scan across bits and do other fairly complex
246 // operations like popcnt, but are believed to be constant time on x86.
247 // However, these set flags.
248 case X86::BSF16rm:
249 case X86::BSF32rm:
250 case X86::BSF64rm:
251 case X86::BSR16rm:
252 case X86::BSR32rm:
253 case X86::BSR64rm:
254 case X86::LZCNT16rm:
255 case X86::LZCNT32rm:
256 case X86::LZCNT64rm:
257 case X86::POPCNT16rm:
258 case X86::POPCNT32rm:
259 case X86::POPCNT64rm:
260 case X86::TZCNT16rm:
261 case X86::TZCNT32rm:
262 case X86::TZCNT64rm:
263
264 // Bit manipulation instructions are effectively combinations of basic
265 // arithmetic ops, and should still execute in constant time. These also
266 // set flags.
267 case X86::BLCFILL32rm:
268 case X86::BLCFILL64rm:
269 case X86::BLCI32rm:
270 case X86::BLCI64rm:
271 case X86::BLCIC32rm:
272 case X86::BLCIC64rm:
273 case X86::BLCMSK32rm:
274 case X86::BLCMSK64rm:
275 case X86::BLCS32rm:
276 case X86::BLCS64rm:
277 case X86::BLSFILL32rm:
278 case X86::BLSFILL64rm:
279 case X86::BLSI32rm:
280 case X86::BLSI64rm:
281 case X86::BLSIC32rm:
282 case X86::BLSIC64rm:
283 case X86::BLSMSK32rm:
284 case X86::BLSMSK64rm:
285 case X86::BLSR32rm:
286 case X86::BLSR64rm:
287 case X86::TZMSK32rm:
288 case X86::TZMSK64rm:
289
290 // Bit extracting and clearing instructions should execute in constant time,
291 // and set flags.
292 case X86::BEXTR32rm:
293 case X86::BEXTR64rm:
294 case X86::BEXTRI32mi:
295 case X86::BEXTRI64mi:
296 case X86::BZHI32rm:
297 case X86::BZHI64rm:
298
299 // Basic arithmetic is constant time on the input but does set flags.
300 case X86::ADC8rm:
301 case X86::ADC16rm:
302 case X86::ADC32rm:
303 case X86::ADC64rm:
304 case X86::ADD8rm:
305 case X86::ADD16rm:
306 case X86::ADD32rm:
307 case X86::ADD64rm:
308 case X86::AND8rm:
309 case X86::AND16rm:
310 case X86::AND32rm:
311 case X86::AND64rm:
312 case X86::ANDN32rm:
313 case X86::ANDN64rm:
314 case X86::OR8rm:
315 case X86::OR16rm:
316 case X86::OR32rm:
317 case X86::OR64rm:
318 case X86::SBB8rm:
319 case X86::SBB16rm:
320 case X86::SBB32rm:
321 case X86::SBB64rm:
322 case X86::SUB8rm:
323 case X86::SUB16rm:
324 case X86::SUB32rm:
325 case X86::SUB64rm:
326 case X86::XOR8rm:
327 case X86::XOR16rm:
328 case X86::XOR32rm:
329 case X86::XOR64rm:
330
331 // Integer multiply w/o affecting flags is still believed to be constant
332 // time on x86. Called out separately as this is among the most surprising
333 // instructions to exhibit that behavior.
334 case X86::MULX32rm:
335 case X86::MULX64rm:
336
337 // Arithmetic instructions that are both constant time and don't set flags.
338 case X86::RORX32mi:
339 case X86::RORX64mi:
340 case X86::SARX32rm:
341 case X86::SARX64rm:
342 case X86::SHLX32rm:
343 case X86::SHLX64rm:
344 case X86::SHRX32rm:
345 case X86::SHRX64rm:
346
347 // Conversions are believed to be constant time and don't set flags.
348 case X86::CVTTSD2SI64rm:
349 case X86::VCVTTSD2SI64rm:
350 case X86::VCVTTSD2SI64Zrm:
351 case X86::CVTTSD2SIrm:
352 case X86::VCVTTSD2SIrm:
353 case X86::VCVTTSD2SIZrm:
354 case X86::CVTTSS2SI64rm:
355 case X86::VCVTTSS2SI64rm:
356 case X86::VCVTTSS2SI64Zrm:
357 case X86::CVTTSS2SIrm:
358 case X86::VCVTTSS2SIrm:
359 case X86::VCVTTSS2SIZrm:
360 case X86::CVTSI2SDrm:
361 case X86::VCVTSI2SDrm:
362 case X86::VCVTSI2SDZrm:
363 case X86::CVTSI2SSrm:
364 case X86::VCVTSI2SSrm:
365 case X86::VCVTSI2SSZrm:
366 case X86::CVTSI642SDrm:
367 case X86::VCVTSI642SDrm:
368 case X86::VCVTSI642SDZrm:
369 case X86::CVTSI642SSrm:
370 case X86::VCVTSI642SSrm:
371 case X86::VCVTSI642SSZrm:
372 case X86::CVTSS2SDrm:
373 case X86::VCVTSS2SDrm:
374 case X86::VCVTSS2SDZrm:
375 case X86::CVTSD2SSrm:
376 case X86::VCVTSD2SSrm:
377 case X86::VCVTSD2SSZrm:
378 // AVX512 added unsigned integer conversions.
379 case X86::VCVTTSD2USI64Zrm:
380 case X86::VCVTTSD2USIZrm:
381 case X86::VCVTTSS2USI64Zrm:
382 case X86::VCVTTSS2USIZrm:
383 case X86::VCVTUSI2SDZrm:
384 case X86::VCVTUSI642SDZrm:
385 case X86::VCVTUSI2SSZrm:
386 case X86::VCVTUSI642SSZrm:
387
388 // Loads to register don't set flags.
389 case X86::MOV8rm:
390 case X86::MOV8rm_NOREX:
391 case X86::MOV16rm:
392 case X86::MOV32rm:
393 case X86::MOV64rm:
394 case X86::MOVSX16rm8:
395 case X86::MOVSX32rm16:
396 case X86::MOVSX32rm8:
397 case X86::MOVSX32rm8_NOREX:
398 case X86::MOVSX64rm16:
399 case X86::MOVSX64rm32:
400 case X86::MOVSX64rm8:
401 case X86::MOVZX16rm8:
402 case X86::MOVZX32rm16:
403 case X86::MOVZX32rm8:
404 case X86::MOVZX32rm8_NOREX:
405 case X86::MOVZX64rm16:
406 case X86::MOVZX64rm8:
407 return true;
408 }
409}
410
412 const MachineFunction *MF = MI.getParent()->getParent();
414
415 if (isFrameInstr(MI)) {
416 int SPAdj = alignTo(getFrameSize(MI), TFI->getStackAlign());
417 SPAdj -= getFrameAdjustment(MI);
418 if (!isFrameSetup(MI))
419 SPAdj = -SPAdj;
420 return SPAdj;
421 }
422
423 // To know whether a call adjusts the stack, we need information
424 // that is bound to the following ADJCALLSTACKUP pseudo.
425 // Look for the next ADJCALLSTACKUP that follows the call.
426 if (MI.isCall()) {
427 const MachineBasicBlock *MBB = MI.getParent();
429 for (auto E = MBB->end(); I != E; ++I) {
430 if (I->getOpcode() == getCallFrameDestroyOpcode() || I->isCall())
431 break;
432 }
433
434 // If we could not find a frame destroy opcode, then it has already
435 // been simplified, so we don't care.
436 if (I->getOpcode() != getCallFrameDestroyOpcode())
437 return 0;
438
439 return -(I->getOperand(1).getImm());
440 }
441
442 // Currently handle only PUSHes we can reasonably expect to see
443 // in call sequences
444 switch (MI.getOpcode()) {
445 default:
446 return 0;
447 case X86::PUSH32r:
448 case X86::PUSH32rmm:
449 case X86::PUSH32rmr:
450 case X86::PUSH32i:
451 return 4;
452 case X86::PUSH64r:
453 case X86::PUSH64rmm:
454 case X86::PUSH64rmr:
455 case X86::PUSH64i32:
456 return 8;
457 }
458}
459
460/// Return true and the FrameIndex if the specified
461/// operand and follow operands form a reference to the stack frame.
462bool X86InstrInfo::isFrameOperand(const MachineInstr &MI, unsigned int Op,
463 int &FrameIndex) const {
464 if (MI.getOperand(Op + X86::AddrBaseReg).isFI() &&
465 MI.getOperand(Op + X86::AddrScaleAmt).isImm() &&
466 MI.getOperand(Op + X86::AddrIndexReg).isReg() &&
467 MI.getOperand(Op + X86::AddrDisp).isImm() &&
468 MI.getOperand(Op + X86::AddrScaleAmt).getImm() == 1 &&
469 MI.getOperand(Op + X86::AddrIndexReg).getReg() == 0 &&
470 MI.getOperand(Op + X86::AddrDisp).getImm() == 0) {
471 FrameIndex = MI.getOperand(Op + X86::AddrBaseReg).getIndex();
472 return true;
473 }
474 return false;
475}
476
477static bool isFrameLoadOpcode(int Opcode, TypeSize &MemBytes) {
478 switch (Opcode) {
479 default:
480 return false;
481 case X86::MOV8rm:
482 case X86::KMOVBkm:
483 case X86::KMOVBkm_EVEX:
484 MemBytes = TypeSize::getFixed(1);
485 return true;
486 case X86::MOV16rm:
487 case X86::KMOVWkm:
488 case X86::KMOVWkm_EVEX:
489 case X86::VMOVSHZrm:
490 case X86::VMOVSHZrm_alt:
491 MemBytes = TypeSize::getFixed(2);
492 return true;
493 case X86::MOV32rm:
494 case X86::MOVSSrm:
495 case X86::MOVSSrm_alt:
496 case X86::VMOVSSrm:
497 case X86::VMOVSSrm_alt:
498 case X86::VMOVSSZrm:
499 case X86::VMOVSSZrm_alt:
500 case X86::KMOVDkm:
501 case X86::KMOVDkm_EVEX:
502 MemBytes = TypeSize::getFixed(4);
503 return true;
504 case X86::MOV64rm:
505 case X86::LD_Fp64m:
506 case X86::MOVSDrm:
507 case X86::MOVSDrm_alt:
508 case X86::VMOVSDrm:
509 case X86::VMOVSDrm_alt:
510 case X86::VMOVSDZrm:
511 case X86::VMOVSDZrm_alt:
512 case X86::MMX_MOVD64rm:
513 case X86::MMX_MOVQ64rm:
514 case X86::KMOVQkm:
515 case X86::KMOVQkm_EVEX:
516 MemBytes = TypeSize::getFixed(8);
517 return true;
518 case X86::MOVAPSrm:
519 case X86::MOVUPSrm:
520 case X86::MOVAPDrm:
521 case X86::MOVUPDrm:
522 case X86::MOVDQArm:
523 case X86::MOVDQUrm:
524 case X86::VMOVAPSrm:
525 case X86::VMOVUPSrm:
526 case X86::VMOVAPDrm:
527 case X86::VMOVUPDrm:
528 case X86::VMOVDQArm:
529 case X86::VMOVDQUrm:
530 case X86::VMOVAPSZ128rm:
531 case X86::VMOVUPSZ128rm:
532 case X86::VMOVAPSZ128rm_NOVLX:
533 case X86::VMOVUPSZ128rm_NOVLX:
534 case X86::VMOVAPDZ128rm:
535 case X86::VMOVUPDZ128rm:
536 case X86::VMOVDQU8Z128rm:
537 case X86::VMOVDQU16Z128rm:
538 case X86::VMOVDQA32Z128rm:
539 case X86::VMOVDQU32Z128rm:
540 case X86::VMOVDQA64Z128rm:
541 case X86::VMOVDQU64Z128rm:
542 MemBytes = TypeSize::getFixed(16);
543 return true;
544 case X86::VMOVAPSYrm:
545 case X86::VMOVUPSYrm:
546 case X86::VMOVAPDYrm:
547 case X86::VMOVUPDYrm:
548 case X86::VMOVDQAYrm:
549 case X86::VMOVDQUYrm:
550 case X86::VMOVAPSZ256rm:
551 case X86::VMOVUPSZ256rm:
552 case X86::VMOVAPSZ256rm_NOVLX:
553 case X86::VMOVUPSZ256rm_NOVLX:
554 case X86::VMOVAPDZ256rm:
555 case X86::VMOVUPDZ256rm:
556 case X86::VMOVDQU8Z256rm:
557 case X86::VMOVDQU16Z256rm:
558 case X86::VMOVDQA32Z256rm:
559 case X86::VMOVDQU32Z256rm:
560 case X86::VMOVDQA64Z256rm:
561 case X86::VMOVDQU64Z256rm:
562 MemBytes = TypeSize::getFixed(32);
563 return true;
564 case X86::VMOVAPSZrm:
565 case X86::VMOVUPSZrm:
566 case X86::VMOVAPDZrm:
567 case X86::VMOVUPDZrm:
568 case X86::VMOVDQU8Zrm:
569 case X86::VMOVDQU16Zrm:
570 case X86::VMOVDQA32Zrm:
571 case X86::VMOVDQU32Zrm:
572 case X86::VMOVDQA64Zrm:
573 case X86::VMOVDQU64Zrm:
574 MemBytes = TypeSize::getFixed(64);
575 return true;
576 }
577}
578
579static bool isFrameStoreOpcode(int Opcode, TypeSize &MemBytes) {
580 switch (Opcode) {
581 default:
582 return false;
583 case X86::MOV8mr:
584 case X86::KMOVBmk:
585 case X86::KMOVBmk_EVEX:
586 MemBytes = TypeSize::getFixed(1);
587 return true;
588 case X86::MOV16mr:
589 case X86::KMOVWmk:
590 case X86::KMOVWmk_EVEX:
591 case X86::VMOVSHZmr:
592 MemBytes = TypeSize::getFixed(2);
593 return true;
594 case X86::MOV32mr:
595 case X86::MOVSSmr:
596 case X86::VMOVSSmr:
597 case X86::VMOVSSZmr:
598 case X86::KMOVDmk:
599 case X86::KMOVDmk_EVEX:
600 MemBytes = TypeSize::getFixed(4);
601 return true;
602 case X86::MOV64mr:
603 case X86::ST_FpP64m:
604 case X86::MOVSDmr:
605 case X86::VMOVSDmr:
606 case X86::VMOVSDZmr:
607 case X86::MMX_MOVD64mr:
608 case X86::MMX_MOVQ64mr:
609 case X86::MMX_MOVNTQmr:
610 case X86::KMOVQmk:
611 case X86::KMOVQmk_EVEX:
612 MemBytes = TypeSize::getFixed(8);
613 return true;
614 case X86::MOVAPSmr:
615 case X86::MOVUPSmr:
616 case X86::MOVAPDmr:
617 case X86::MOVUPDmr:
618 case X86::MOVDQAmr:
619 case X86::MOVDQUmr:
620 case X86::VMOVAPSmr:
621 case X86::VMOVUPSmr:
622 case X86::VMOVAPDmr:
623 case X86::VMOVUPDmr:
624 case X86::VMOVDQAmr:
625 case X86::VMOVDQUmr:
626 case X86::VMOVUPSZ128mr:
627 case X86::VMOVAPSZ128mr:
628 case X86::VMOVUPSZ128mr_NOVLX:
629 case X86::VMOVAPSZ128mr_NOVLX:
630 case X86::VMOVUPDZ128mr:
631 case X86::VMOVAPDZ128mr:
632 case X86::VMOVDQA32Z128mr:
633 case X86::VMOVDQU32Z128mr:
634 case X86::VMOVDQA64Z128mr:
635 case X86::VMOVDQU64Z128mr:
636 case X86::VMOVDQU8Z128mr:
637 case X86::VMOVDQU16Z128mr:
638 MemBytes = TypeSize::getFixed(16);
639 return true;
640 case X86::VMOVUPSYmr:
641 case X86::VMOVAPSYmr:
642 case X86::VMOVUPDYmr:
643 case X86::VMOVAPDYmr:
644 case X86::VMOVDQUYmr:
645 case X86::VMOVDQAYmr:
646 case X86::VMOVUPSZ256mr:
647 case X86::VMOVAPSZ256mr:
648 case X86::VMOVUPSZ256mr_NOVLX:
649 case X86::VMOVAPSZ256mr_NOVLX:
650 case X86::VMOVUPDZ256mr:
651 case X86::VMOVAPDZ256mr:
652 case X86::VMOVDQU8Z256mr:
653 case X86::VMOVDQU16Z256mr:
654 case X86::VMOVDQA32Z256mr:
655 case X86::VMOVDQU32Z256mr:
656 case X86::VMOVDQA64Z256mr:
657 case X86::VMOVDQU64Z256mr:
658 MemBytes = TypeSize::getFixed(32);
659 return true;
660 case X86::VMOVUPSZmr:
661 case X86::VMOVAPSZmr:
662 case X86::VMOVUPDZmr:
663 case X86::VMOVAPDZmr:
664 case X86::VMOVDQU8Zmr:
665 case X86::VMOVDQU16Zmr:
666 case X86::VMOVDQA32Zmr:
667 case X86::VMOVDQU32Zmr:
668 case X86::VMOVDQA64Zmr:
669 case X86::VMOVDQU64Zmr:
670 MemBytes = TypeSize::getFixed(64);
671 return true;
672 }
673 return false;
674}
675
677 int &FrameIndex) const {
678 TypeSize Dummy = TypeSize::getZero();
679 return X86InstrInfo::isLoadFromStackSlot(MI, FrameIndex, Dummy);
680}
681
683 int &FrameIndex,
684 TypeSize &MemBytes) const {
685 if (isFrameLoadOpcode(MI.getOpcode(), MemBytes))
686 if (MI.getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex))
687 return MI.getOperand(0).getReg();
688 return Register();
689}
690
692 int &FrameIndex) const {
693 TypeSize Dummy = TypeSize::getZero();
694 if (isFrameLoadOpcode(MI.getOpcode(), Dummy)) {
695 if (Register Reg = isLoadFromStackSlot(MI, FrameIndex))
696 return Reg;
697 // Check for post-frame index elimination operations
699 if (hasLoadFromStackSlot(MI, Accesses)) {
700 FrameIndex =
701 cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
702 ->getFrameIndex();
703 return MI.getOperand(0).getReg();
704 }
705 }
706 return Register();
707}
708
710 int &FrameIndex) const {
711 TypeSize Dummy = TypeSize::getZero();
712 return X86InstrInfo::isStoreToStackSlot(MI, FrameIndex, Dummy);
713}
714
716 int &FrameIndex,
717 TypeSize &MemBytes) const {
718 if (isFrameStoreOpcode(MI.getOpcode(), MemBytes))
719 if (MI.getOperand(X86::AddrNumOperands).getSubReg() == 0 &&
720 isFrameOperand(MI, 0, FrameIndex))
721 return MI.getOperand(X86::AddrNumOperands).getReg();
722 return Register();
723}
724
726 int &FrameIndex) const {
727 TypeSize Dummy = TypeSize::getZero();
728 if (isFrameStoreOpcode(MI.getOpcode(), Dummy)) {
729 if (Register Reg = isStoreToStackSlot(MI, FrameIndex))
730 return Reg;
731 // Check for post-frame index elimination operations
733 if (hasStoreToStackSlot(MI, Accesses)) {
734 FrameIndex =
735 cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue())
736 ->getFrameIndex();
737 return MI.getOperand(X86::AddrNumOperands).getReg();
738 }
739 }
740 return Register();
741}
742
743/// Return true if register is PIC base; i.e.g defined by X86::MOVPC32r.
744static bool regIsPICBase(Register BaseReg, const MachineRegisterInfo &MRI) {
745 // Don't waste compile time scanning use-def chains of physregs.
746 if (!BaseReg.isVirtual())
747 return false;
748 bool isPICBase = false;
749 for (const MachineInstr &DefMI : MRI.def_instructions(BaseReg)) {
750 if (DefMI.getOpcode() != X86::MOVPC32r)
751 return false;
752 assert(!isPICBase && "More than one PIC base?");
753 isPICBase = true;
754 }
755 return isPICBase;
756}
757
759 const MachineInstr &MI) const {
760 switch (MI.getOpcode()) {
761 default:
762 // This function should only be called for opcodes with the ReMaterializable
763 // flag set.
764 llvm_unreachable("Unknown rematerializable operation!");
765 break;
766 case X86::IMPLICIT_DEF:
767 // Defer to generic logic.
768 break;
769 case X86::LOAD_STACK_GUARD:
770 case X86::LD_Fp032:
771 case X86::LD_Fp064:
772 case X86::LD_Fp080:
773 case X86::LD_Fp132:
774 case X86::LD_Fp164:
775 case X86::LD_Fp180:
776 case X86::AVX1_SETALLONES:
777 case X86::AVX2_SETALLONES:
778 case X86::AVX512_128_SET0:
779 case X86::AVX512_256_SET0:
780 case X86::AVX512_512_SET0:
781 case X86::AVX512_512_SETALLONES:
782 case X86::AVX512_FsFLD0SD:
783 case X86::AVX512_FsFLD0SH:
784 case X86::AVX512_FsFLD0SS:
785 case X86::AVX512_FsFLD0F128:
786 case X86::AVX_SET0:
787 case X86::FsFLD0SD:
788 case X86::FsFLD0SS:
789 case X86::FsFLD0SH:
790 case X86::FsFLD0F128:
791 case X86::KSET0D:
792 case X86::KSET0Q:
793 case X86::KSET0W:
794 case X86::KSET1D:
795 case X86::KSET1Q:
796 case X86::KSET1W:
797 case X86::MMX_SET0:
798 case X86::MOV32ImmSExti8:
799 case X86::MOV32r0:
800 case X86::MOV32r1:
801 case X86::MOV32r_1:
802 case X86::MOV32ri64:
803 case X86::MOV64ImmSExti8:
804 case X86::V_SET0:
805 case X86::V_SETALLONES:
806 case X86::MOV16ri:
807 case X86::MOV32ri:
808 case X86::MOV64ri:
809 case X86::MOV64ri32:
810 case X86::MOV8ri:
811 case X86::PTILEZEROV:
812 return true;
813
814 case X86::MOV8rm:
815 case X86::MOV8rm_NOREX:
816 case X86::MOV16rm:
817 case X86::MOV32rm:
818 case X86::MOV64rm:
819 case X86::MOVSSrm:
820 case X86::MOVSSrm_alt:
821 case X86::MOVSDrm:
822 case X86::MOVSDrm_alt:
823 case X86::MOVAPSrm:
824 case X86::MOVUPSrm:
825 case X86::MOVAPDrm:
826 case X86::MOVUPDrm:
827 case X86::MOVDQArm:
828 case X86::MOVDQUrm:
829 case X86::VMOVSSrm:
830 case X86::VMOVSSrm_alt:
831 case X86::VMOVSDrm:
832 case X86::VMOVSDrm_alt:
833 case X86::VMOVAPSrm:
834 case X86::VMOVUPSrm:
835 case X86::VMOVAPDrm:
836 case X86::VMOVUPDrm:
837 case X86::VMOVDQArm:
838 case X86::VMOVDQUrm:
839 case X86::VMOVAPSYrm:
840 case X86::VMOVUPSYrm:
841 case X86::VMOVAPDYrm:
842 case X86::VMOVUPDYrm:
843 case X86::VMOVDQAYrm:
844 case X86::VMOVDQUYrm:
845 case X86::MMX_MOVD64rm:
846 case X86::MMX_MOVQ64rm:
847 case X86::VBROADCASTSSrm:
848 case X86::VBROADCASTSSYrm:
849 case X86::VBROADCASTSDYrm:
850 // AVX-512
851 case X86::VPBROADCASTBZ128rm:
852 case X86::VPBROADCASTBZ256rm:
853 case X86::VPBROADCASTBZrm:
854 case X86::VBROADCASTF32X2Z256rm:
855 case X86::VBROADCASTF32X2Zrm:
856 case X86::VBROADCASTI32X2Z128rm:
857 case X86::VBROADCASTI32X2Z256rm:
858 case X86::VBROADCASTI32X2Zrm:
859 case X86::VPBROADCASTWZ128rm:
860 case X86::VPBROADCASTWZ256rm:
861 case X86::VPBROADCASTWZrm:
862 case X86::VPBROADCASTDZ128rm:
863 case X86::VPBROADCASTDZ256rm:
864 case X86::VPBROADCASTDZrm:
865 case X86::VBROADCASTSSZ128rm:
866 case X86::VBROADCASTSSZ256rm:
867 case X86::VBROADCASTSSZrm:
868 case X86::VPBROADCASTQZ128rm:
869 case X86::VPBROADCASTQZ256rm:
870 case X86::VPBROADCASTQZrm:
871 case X86::VBROADCASTSDZ256rm:
872 case X86::VBROADCASTSDZrm:
873 case X86::VMOVSSZrm:
874 case X86::VMOVSSZrm_alt:
875 case X86::VMOVSDZrm:
876 case X86::VMOVSDZrm_alt:
877 case X86::VMOVSHZrm:
878 case X86::VMOVSHZrm_alt:
879 case X86::VMOVAPDZ128rm:
880 case X86::VMOVAPDZ256rm:
881 case X86::VMOVAPDZrm:
882 case X86::VMOVAPSZ128rm:
883 case X86::VMOVAPSZ256rm:
884 case X86::VMOVAPSZ128rm_NOVLX:
885 case X86::VMOVAPSZ256rm_NOVLX:
886 case X86::VMOVAPSZrm:
887 case X86::VMOVDQA32Z128rm:
888 case X86::VMOVDQA32Z256rm:
889 case X86::VMOVDQA32Zrm:
890 case X86::VMOVDQA64Z128rm:
891 case X86::VMOVDQA64Z256rm:
892 case X86::VMOVDQA64Zrm:
893 case X86::VMOVDQU16Z128rm:
894 case X86::VMOVDQU16Z256rm:
895 case X86::VMOVDQU16Zrm:
896 case X86::VMOVDQU32Z128rm:
897 case X86::VMOVDQU32Z256rm:
898 case X86::VMOVDQU32Zrm:
899 case X86::VMOVDQU64Z128rm:
900 case X86::VMOVDQU64Z256rm:
901 case X86::VMOVDQU64Zrm:
902 case X86::VMOVDQU8Z128rm:
903 case X86::VMOVDQU8Z256rm:
904 case X86::VMOVDQU8Zrm:
905 case X86::VMOVUPDZ128rm:
906 case X86::VMOVUPDZ256rm:
907 case X86::VMOVUPDZrm:
908 case X86::VMOVUPSZ128rm:
909 case X86::VMOVUPSZ256rm:
910 case X86::VMOVUPSZ128rm_NOVLX:
911 case X86::VMOVUPSZ256rm_NOVLX:
912 case X86::VMOVUPSZrm: {
913 // Loads from constant pools are trivially rematerializable.
914 if (MI.getOperand(1 + X86::AddrBaseReg).isReg() &&
915 MI.getOperand(1 + X86::AddrScaleAmt).isImm() &&
916 MI.getOperand(1 + X86::AddrIndexReg).isReg() &&
917 MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
918 MI.isDereferenceableInvariantLoad()) {
919 Register BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg();
920 if (BaseReg == 0 || BaseReg == X86::RIP)
921 return true;
922 // Allow re-materialization of PIC load.
923 if (!(!ReMatPICStubLoad && MI.getOperand(1 + X86::AddrDisp).isGlobal())) {
924 const MachineFunction &MF = *MI.getParent()->getParent();
925 const MachineRegisterInfo &MRI = MF.getRegInfo();
926 if (regIsPICBase(BaseReg, MRI))
927 return true;
928 }
929 }
930 break;
931 }
932
933 case X86::LEA32r:
934 case X86::LEA64r: {
935 if (MI.getOperand(1 + X86::AddrScaleAmt).isImm() &&
936 MI.getOperand(1 + X86::AddrIndexReg).isReg() &&
937 MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
938 !MI.getOperand(1 + X86::AddrDisp).isReg()) {
939 // lea fi#, lea GV, etc. are all rematerializable.
940 if (!MI.getOperand(1 + X86::AddrBaseReg).isReg())
941 return true;
942 Register BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg();
943 if (BaseReg == 0)
944 return true;
945 // Allow re-materialization of lea PICBase + x.
946 const MachineFunction &MF = *MI.getParent()->getParent();
947 const MachineRegisterInfo &MRI = MF.getRegInfo();
948 if (regIsPICBase(BaseReg, MRI))
949 return true;
950 }
951 break;
952 }
953 }
955}
956
959 Register DestReg, unsigned SubIdx,
960 const MachineInstr &Orig,
961 const TargetRegisterInfo &TRI) const {
962 bool ClobbersEFLAGS = Orig.modifiesRegister(X86::EFLAGS, &TRI);
963 if (ClobbersEFLAGS && MBB.computeRegisterLiveness(&TRI, X86::EFLAGS, I) !=
965 // The instruction clobbers EFLAGS. Re-materialize as MOV32ri to avoid side
966 // effects.
967 int Value;
968 switch (Orig.getOpcode()) {
969 case X86::MOV32r0:
970 Value = 0;
971 break;
972 case X86::MOV32r1:
973 Value = 1;
974 break;
975 case X86::MOV32r_1:
976 Value = -1;
977 break;
978 default:
979 llvm_unreachable("Unexpected instruction!");
980 }
981
982 const DebugLoc &DL = Orig.getDebugLoc();
983 BuildMI(MBB, I, DL, get(X86::MOV32ri))
984 .add(Orig.getOperand(0))
985 .addImm(Value);
986 } else {
987 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
988 MBB.insert(I, MI);
989 }
990
991 MachineInstr &NewMI = *std::prev(I);
992 NewMI.substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI);
993}
994
995/// True if MI has a condition code def, e.g. EFLAGS, that is not marked dead.
997 for (const MachineOperand &MO : MI.operands()) {
998 if (MO.isReg() && MO.isDef() && MO.getReg() == X86::EFLAGS &&
999 !MO.isDead()) {
1000 return true;
1001 }
1002 }
1003 return false;
1004}
1005
1006/// Check whether the shift count for a machine operand is non-zero.
1007inline static unsigned getTruncatedShiftCount(const MachineInstr &MI,
1008 unsigned ShiftAmtOperandIdx) {
1009 // The shift count is six bits with the REX.W prefix and five bits without.
1010 unsigned ShiftCountMask = (MI.getDesc().TSFlags & X86II::REX_W) ? 63 : 31;
1011 unsigned Imm = MI.getOperand(ShiftAmtOperandIdx).getImm();
1012 return Imm & ShiftCountMask;
1013}
1014
1015/// Check whether the given shift count is appropriate
1016/// can be represented by a LEA instruction.
1017inline static bool isTruncatedShiftCountForLEA(unsigned ShAmt) {
1018 // Left shift instructions can be transformed into load-effective-address
1019 // instructions if we can encode them appropriately.
1020 // A LEA instruction utilizes a SIB byte to encode its scale factor.
1021 // The SIB.scale field is two bits wide which means that we can encode any
1022 // shift amount less than 4.
1023 return ShAmt < 4 && ShAmt > 0;
1024}
1025
1026static bool
1028 const MachineRegisterInfo *MRI, MachineInstr **AndInstr,
1029 const TargetRegisterInfo *TRI, const X86Subtarget &ST,
1030 bool &NoSignFlag, bool &ClearsOverflowFlag) {
1031 if (!(CmpValDefInstr.getOpcode() == X86::SUBREG_TO_REG &&
1032 CmpInstr.getOpcode() == X86::TEST64rr) &&
1033 !(CmpValDefInstr.getOpcode() == X86::COPY &&
1034 CmpInstr.getOpcode() == X86::TEST16rr))
1035 return false;
1036
1037 // CmpInstr is a TEST16rr/TEST64rr instruction, and
1038 // `X86InstrInfo::analyzeCompare` guarantees that it's analyzable only if two
1039 // registers are identical.
1040 assert((CmpInstr.getOperand(0).getReg() == CmpInstr.getOperand(1).getReg()) &&
1041 "CmpInstr is an analyzable TEST16rr/TEST64rr, and "
1042 "`X86InstrInfo::analyzeCompare` requires two reg operands are the"
1043 "same.");
1044
1045 // Caller (`X86InstrInfo::optimizeCompareInstr`) guarantees that
1046 // `CmpValDefInstr` defines the value that's used by `CmpInstr`; in this case
1047 // if `CmpValDefInstr` sets the EFLAGS, it is likely that `CmpInstr` is
1048 // redundant.
1049 assert(
1050 (MRI->getVRegDef(CmpInstr.getOperand(0).getReg()) == &CmpValDefInstr) &&
1051 "Caller guarantees that TEST64rr is a user of SUBREG_TO_REG or TEST16rr "
1052 "is a user of COPY sub16bit.");
1053 MachineInstr *VregDefInstr = nullptr;
1054 if (CmpInstr.getOpcode() == X86::TEST16rr) {
1055 if (!CmpValDefInstr.getOperand(1).getReg().isVirtual())
1056 return false;
1057 VregDefInstr = MRI->getVRegDef(CmpValDefInstr.getOperand(1).getReg());
1058 if (!VregDefInstr)
1059 return false;
1060 // We can only remove test when AND32ri or AND64ri32 whose imm can fit 16bit
1061 // size, others 32/64 bit ops would test higher bits which test16rr don't
1062 // want to.
1063 if (!((VregDefInstr->getOpcode() == X86::AND32ri ||
1064 VregDefInstr->getOpcode() == X86::AND64ri32) &&
1065 isUInt<16>(VregDefInstr->getOperand(2).getImm())))
1066 return false;
1067 }
1068
1069 if (CmpInstr.getOpcode() == X86::TEST64rr) {
1070 // As seen in X86 td files, CmpValDefInstr.getOperand(1).getImm() is
1071 // typically 0.
1072 if (CmpValDefInstr.getOperand(1).getImm() != 0)
1073 return false;
1074
1075 // As seen in X86 td files, CmpValDefInstr.getOperand(3) is typically
1076 // sub_32bit or sub_xmm.
1077 if (CmpValDefInstr.getOperand(3).getImm() != X86::sub_32bit)
1078 return false;
1079
1080 VregDefInstr = MRI->getVRegDef(CmpValDefInstr.getOperand(2).getReg());
1081 }
1082
1083 assert(VregDefInstr && "Must have a definition (SSA)");
1084
1085 // Requires `CmpValDefInstr` and `VregDefInstr` are from the same MBB
1086 // to simplify the subsequent analysis.
1087 //
1088 // FIXME: If `VregDefInstr->getParent()` is the only predecessor of
1089 // `CmpValDefInstr.getParent()`, this could be handled.
1090 if (VregDefInstr->getParent() != CmpValDefInstr.getParent())
1091 return false;
1092
1093 if (X86::isAND(VregDefInstr->getOpcode()) &&
1094 (!ST.hasNF() || VregDefInstr->modifiesRegister(X86::EFLAGS, TRI))) {
1095 // Get a sequence of instructions like
1096 // %reg = and* ... // Set EFLAGS
1097 // ... // EFLAGS not changed
1098 // %extended_reg = subreg_to_reg 0, %reg, %subreg.sub_32bit
1099 // test64rr %extended_reg, %extended_reg, implicit-def $eflags
1100 // or
1101 // %reg = and32* ...
1102 // ... // EFLAGS not changed.
1103 // %src_reg = copy %reg.sub_16bit:gr32
1104 // test16rr %src_reg, %src_reg, implicit-def $eflags
1105 //
1106 // If subsequent readers use a subset of bits that don't change
1107 // after `and*` instructions, it's likely that the test64rr could
1108 // be optimized away.
1109 for (const MachineInstr &Instr :
1110 make_range(std::next(MachineBasicBlock::iterator(VregDefInstr)),
1111 MachineBasicBlock::iterator(CmpValDefInstr))) {
1112 // There are instructions between 'VregDefInstr' and
1113 // 'CmpValDefInstr' that modifies EFLAGS.
1114 if (Instr.modifiesRegister(X86::EFLAGS, TRI))
1115 return false;
1116 }
1117
1118 *AndInstr = VregDefInstr;
1119
1120 // AND instruction will essentially update SF and clear OF, so
1121 // NoSignFlag should be false in the sense that SF is modified by `AND`.
1122 //
1123 // However, the implementation artifically sets `NoSignFlag` to true
1124 // to poison the SF bit; that is to say, if SF is looked at later, the
1125 // optimization (to erase TEST64rr) will be disabled.
1126 //
1127 // The reason to poison SF bit is that SF bit value could be different
1128 // in the `AND` and `TEST` operation; signed bit is not known for `AND`,
1129 // and is known to be 0 as a result of `TEST64rr`.
1130 //
1131 // FIXME: As opposed to poisoning the SF bit directly, consider peeking into
1132 // the AND instruction and using the static information to guide peephole
1133 // optimization if possible. For example, it's possible to fold a
1134 // conditional move into a copy if the relevant EFLAG bits could be deduced
1135 // from an immediate operand of and operation.
1136 //
1137 NoSignFlag = true;
1138 // ClearsOverflowFlag is true for AND operation (no surprise).
1139 ClearsOverflowFlag = true;
1140 return true;
1141 }
1142 return false;
1143}
1144
1146 unsigned Opc, bool AllowSP, Register &NewSrc,
1147 unsigned &NewSrcSubReg, bool &isKill,
1148 MachineOperand &ImplicitOp, LiveVariables *LV,
1149 LiveIntervals *LIS) const {
1150 MachineFunction &MF = *MI.getParent()->getParent();
1151 const TargetRegisterClass *RC;
1152 if (AllowSP) {
1153 RC = Opc != X86::LEA32r ? &X86::GR64RegClass : &X86::GR32RegClass;
1154 } else {
1155 RC = Opc != X86::LEA32r ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass;
1156 }
1157 Register SrcReg = Src.getReg();
1158 unsigned SubReg = Src.getSubReg();
1159 isKill = MI.killsRegister(SrcReg, /*TRI=*/nullptr);
1160
1161 NewSrcSubReg = X86::NoSubRegister;
1162
1163 // For both LEA64 and LEA32 the register already has essentially the right
1164 // type (32-bit or 64-bit) we may just need to forbid SP.
1165 if (Opc != X86::LEA64_32r) {
1166 NewSrc = SrcReg;
1167 NewSrcSubReg = SubReg;
1168 assert(!Src.isUndef() && "Undef op doesn't need optimization");
1169
1170 if (NewSrc.isVirtual() && !MF.getRegInfo().constrainRegClass(NewSrc, RC))
1171 return false;
1172
1173 return true;
1174 }
1175
1176 // This is for an LEA64_32r and incoming registers are 32-bit. One way or
1177 // another we need to add 64-bit registers to the final MI.
1178 if (SrcReg.isPhysical()) {
1179 ImplicitOp = Src;
1180 ImplicitOp.setImplicit();
1181
1182 NewSrc = getX86SubSuperRegister(SrcReg, 64);
1183 assert(!SubReg && "no superregister for source");
1184 assert(NewSrc.isValid() && "Invalid Operand");
1185 assert(!Src.isUndef() && "Undef op doesn't need optimization");
1186 } else {
1187 // Virtual register of the wrong class, we have to create a temporary 64-bit
1188 // vreg to feed into the LEA.
1189 NewSrc = MF.getRegInfo().createVirtualRegister(RC);
1190 NewSrcSubReg = X86::NoSubRegister;
1191 MachineInstr *Copy =
1192 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(TargetOpcode::COPY))
1193 .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit)
1194 .addReg(SrcReg, getKillRegState(isKill), SubReg);
1195
1196 // Which is obviously going to be dead after we're done with it.
1197 isKill = true;
1198
1199 if (LV)
1200 LV->replaceKillInstruction(SrcReg, MI, *Copy);
1201
1202 if (LIS) {
1203 SlotIndex CopyIdx = LIS->InsertMachineInstrInMaps(*Copy);
1204 SlotIndex Idx = LIS->getInstructionIndex(MI);
1205 LiveInterval &LI = LIS->getInterval(SrcReg);
1207 if (S->end.getBaseIndex() == Idx)
1208 S->end = CopyIdx.getRegSlot();
1209 }
1210 }
1211
1212 // We've set all the parameters without issue.
1213 return true;
1214}
1215
1216MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
1218 LiveVariables *LV,
1219 LiveIntervals *LIS,
1220 bool Is8BitOp) const {
1221 // We handle 8-bit adds and various 16-bit opcodes in the switch below.
1222 MachineBasicBlock &MBB = *MI.getParent();
1223 MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
1224 assert((Is8BitOp ||
1225 RegInfo.getTargetRegisterInfo()->getRegSizeInBits(
1226 *RegInfo.getRegClass(MI.getOperand(0).getReg())) == 16) &&
1227 "Unexpected type for LEA transform");
1228
1229 // TODO: For a 32-bit target, we need to adjust the LEA variables with
1230 // something like this:
1231 // Opcode = X86::LEA32r;
1232 // InRegLEA = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
1233 // OutRegLEA =
1234 // Is8BitOp ? RegInfo.createVirtualRegister(&X86::GR32ABCD_RegClass)
1235 // : RegInfo.createVirtualRegister(&X86::GR32RegClass);
1236 if (!Subtarget.is64Bit())
1237 return nullptr;
1238
1239 unsigned Opcode = X86::LEA64_32r;
1240 Register InRegLEA = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
1241 Register OutRegLEA = RegInfo.createVirtualRegister(&X86::GR32RegClass);
1242 Register InRegLEA2;
1243
1244 // Build and insert into an implicit UNDEF value. This is OK because
1245 // we will be shifting and then extracting the lower 8/16-bits.
1246 // This has the potential to cause partial register stall. e.g.
1247 // movw (%rbp,%rcx,2), %dx
1248 // leal -65(%rdx), %esi
1249 // But testing has shown this *does* help performance in 64-bit mode (at
1250 // least on modern x86 machines).
1251 MachineBasicBlock::iterator MBBI = MI.getIterator();
1252 Register Dest = MI.getOperand(0).getReg();
1253 Register Src = MI.getOperand(1).getReg();
1254 unsigned SrcSubReg = MI.getOperand(1).getSubReg();
1255 Register Src2;
1256 unsigned Src2SubReg;
1257 bool IsDead = MI.getOperand(0).isDead();
1258 bool IsKill = MI.getOperand(1).isKill();
1259 unsigned SubReg = Is8BitOp ? X86::sub_8bit : X86::sub_16bit;
1260 assert(!MI.getOperand(1).isUndef() && "Undef op doesn't need optimization");
1261 MachineInstr *ImpDef =
1262 BuildMI(MBB, MBBI, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), InRegLEA);
1263 MachineInstr *InsMI =
1264 BuildMI(MBB, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY))
1265 .addReg(InRegLEA, RegState::Define, SubReg)
1266 .addReg(Src, getKillRegState(IsKill), SrcSubReg);
1267 MachineInstr *ImpDef2 = nullptr;
1268 MachineInstr *InsMI2 = nullptr;
1269
1271 BuildMI(MBB, MBBI, MI.getDebugLoc(), get(Opcode), OutRegLEA);
1272#define CASE_NF(OP) \
1273 case X86::OP: \
1274 case X86::OP##_NF:
1275 switch (MIOpc) {
1276 default:
1277 llvm_unreachable("Unreachable!");
1278 CASE_NF(SHL8ri)
1279 CASE_NF(SHL16ri) {
1280 unsigned ShAmt = MI.getOperand(2).getImm();
1281 MIB.addReg(0)
1282 .addImm(1LL << ShAmt)
1283 .addReg(InRegLEA, RegState::Kill)
1284 .addImm(0)
1285 .addReg(0);
1286 break;
1287 }
1288 CASE_NF(INC8r)
1289 CASE_NF(INC16r)
1290 addRegOffset(MIB, InRegLEA, true, 1);
1291 break;
1292 CASE_NF(DEC8r)
1293 CASE_NF(DEC16r)
1294 addRegOffset(MIB, InRegLEA, true, -1);
1295 break;
1296 CASE_NF(ADD8ri)
1297 CASE_NF(ADD16ri)
1298 case X86::ADD8ri_DB:
1299 case X86::ADD16ri_DB:
1300 addRegOffset(MIB, InRegLEA, true, MI.getOperand(2).getImm());
1301 break;
1302 CASE_NF(ADD8rr)
1303 CASE_NF(ADD16rr)
1304 case X86::ADD8rr_DB:
1305 case X86::ADD16rr_DB: {
1306 Src2 = MI.getOperand(2).getReg();
1307 Src2SubReg = MI.getOperand(2).getSubReg();
1308 bool IsKill2 = MI.getOperand(2).isKill();
1309 assert(!MI.getOperand(2).isUndef() && "Undef op doesn't need optimization");
1310 if (Src == Src2) {
1311 // ADD8rr/ADD16rr killed %reg1028, %reg1028
1312 // just a single insert_subreg.
1313 addRegReg(MIB, InRegLEA, true, X86::NoSubRegister, InRegLEA, false,
1314 X86::NoSubRegister);
1315 } else {
1316 if (Subtarget.is64Bit())
1317 InRegLEA2 = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
1318 else
1319 InRegLEA2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
1320 // Build and insert into an implicit UNDEF value. This is OK because
1321 // we will be shifting and then extracting the lower 8/16-bits.
1322 ImpDef2 = BuildMI(MBB, &*MIB, MI.getDebugLoc(), get(X86::IMPLICIT_DEF),
1323 InRegLEA2);
1324 InsMI2 = BuildMI(MBB, &*MIB, MI.getDebugLoc(), get(TargetOpcode::COPY))
1325 .addReg(InRegLEA2, RegState::Define, SubReg)
1326 .addReg(Src2, getKillRegState(IsKill2), Src2SubReg);
1327 addRegReg(MIB, InRegLEA, true, X86::NoSubRegister, InRegLEA2, true,
1328 X86::NoSubRegister);
1329 }
1330 if (LV && IsKill2 && InsMI2)
1331 LV->replaceKillInstruction(Src2, MI, *InsMI2);
1332 break;
1333 }
1334 }
1335
1336 MachineInstr *NewMI = MIB;
1337 MachineInstr *ExtMI =
1338 BuildMI(MBB, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY))
1340 .addReg(OutRegLEA, RegState::Kill, SubReg);
1341
1342 if (LV) {
1343 // Update live variables.
1344 LV->getVarInfo(InRegLEA).Kills.push_back(NewMI);
1345 if (InRegLEA2)
1346 LV->getVarInfo(InRegLEA2).Kills.push_back(NewMI);
1347 LV->getVarInfo(OutRegLEA).Kills.push_back(ExtMI);
1348 if (IsKill)
1349 LV->replaceKillInstruction(Src, MI, *InsMI);
1350 if (IsDead)
1351 LV->replaceKillInstruction(Dest, MI, *ExtMI);
1352 }
1353
1354 if (LIS) {
1355 LIS->InsertMachineInstrInMaps(*ImpDef);
1356 SlotIndex InsIdx = LIS->InsertMachineInstrInMaps(*InsMI);
1357 if (ImpDef2)
1358 LIS->InsertMachineInstrInMaps(*ImpDef2);
1359 SlotIndex Ins2Idx;
1360 if (InsMI2)
1361 Ins2Idx = LIS->InsertMachineInstrInMaps(*InsMI2);
1362 SlotIndex NewIdx = LIS->ReplaceMachineInstrInMaps(MI, *NewMI);
1363 SlotIndex ExtIdx = LIS->InsertMachineInstrInMaps(*ExtMI);
1364 LIS->getInterval(InRegLEA);
1365 LIS->getInterval(OutRegLEA);
1366 if (InRegLEA2)
1367 LIS->getInterval(InRegLEA2);
1368
1369 // Move the use of Src up to InsMI.
1370 LiveInterval &SrcLI = LIS->getInterval(Src);
1371 LiveRange::Segment *SrcSeg = SrcLI.getSegmentContaining(NewIdx);
1372 if (SrcSeg->end == NewIdx.getRegSlot())
1373 SrcSeg->end = InsIdx.getRegSlot();
1374
1375 if (InsMI2) {
1376 // Move the use of Src2 up to InsMI2.
1377 LiveInterval &Src2LI = LIS->getInterval(Src2);
1378 LiveRange::Segment *Src2Seg = Src2LI.getSegmentContaining(NewIdx);
1379 if (Src2Seg->end == NewIdx.getRegSlot())
1380 Src2Seg->end = Ins2Idx.getRegSlot();
1381 }
1382
1383 // Move the definition of Dest down to ExtMI.
1384 LiveInterval &DestLI = LIS->getInterval(Dest);
1385 LiveRange::Segment *DestSeg =
1386 DestLI.getSegmentContaining(NewIdx.getRegSlot());
1387 assert(DestSeg->start == NewIdx.getRegSlot() &&
1388 DestSeg->valno->def == NewIdx.getRegSlot());
1389 DestSeg->start = ExtIdx.getRegSlot();
1390 DestSeg->valno->def = ExtIdx.getRegSlot();
1391 }
1392
1393 return ExtMI;
1394}
1395
1396/// This method must be implemented by targets that
1397/// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
1398/// may be able to convert a two-address instruction into a true
1399/// three-address instruction on demand. This allows the X86 target (for
1400/// example) to convert ADD and SHL instructions into LEA instructions if they
1401/// would require register copies due to two-addressness.
1402///
1403/// This method returns a null pointer if the transformation cannot be
1404/// performed, otherwise it returns the new instruction.
1405///
1407 LiveVariables *LV,
1408 LiveIntervals *LIS) const {
1409 // The following opcodes also sets the condition code register(s). Only
1410 // convert them to equivalent lea if the condition code register def's
1411 // are dead!
1413 return nullptr;
1414
1415 MachineFunction &MF = *MI.getParent()->getParent();
1416 // All instructions input are two-addr instructions. Get the known operands.
1417 const MachineOperand &Dest = MI.getOperand(0);
1418 const MachineOperand &Src = MI.getOperand(1);
1419
1420 // Ideally, operations with undef should be folded before we get here, but we
1421 // can't guarantee it. Bail out because optimizing undefs is a waste of time.
1422 // Without this, we have to forward undef state to new register operands to
1423 // avoid machine verifier errors.
1424 if (Src.isUndef())
1425 return nullptr;
1426 if (MI.getNumOperands() > 2)
1427 if (MI.getOperand(2).isReg() && MI.getOperand(2).isUndef())
1428 return nullptr;
1429
1430 MachineInstr *NewMI = nullptr;
1431 Register SrcReg, SrcReg2;
1432 unsigned SrcSubReg, SrcSubReg2;
1433 bool Is64Bit = Subtarget.is64Bit();
1434
1435 bool Is8BitOp = false;
1436 unsigned NumRegOperands = 2;
1437 unsigned MIOpc = MI.getOpcode();
1438 switch (MIOpc) {
1439 default:
1440 llvm_unreachable("Unreachable!");
1441 CASE_NF(SHL64ri) {
1442 assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
1443 unsigned ShAmt = getTruncatedShiftCount(MI, 2);
1444 if (!isTruncatedShiftCountForLEA(ShAmt))
1445 return nullptr;
1446
1447 // LEA can't handle RSP.
1448 if (Src.getReg().isVirtual() && !MF.getRegInfo().constrainRegClass(
1449 Src.getReg(), &X86::GR64_NOSPRegClass))
1450 return nullptr;
1451
1452 NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r))
1453 .add(Dest)
1454 .addReg(0)
1455 .addImm(1LL << ShAmt)
1456 .add(Src)
1457 .addImm(0)
1458 .addReg(0);
1459 break;
1460 }
1461 CASE_NF(SHL32ri) {
1462 assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
1463 unsigned ShAmt = getTruncatedShiftCount(MI, 2);
1464 if (!isTruncatedShiftCountForLEA(ShAmt))
1465 return nullptr;
1466
1467 unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1468
1469 // LEA can't handle ESP.
1470 bool isKill;
1471 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1472 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/false, SrcReg, SrcSubReg,
1473 isKill, ImplicitOp, LV, LIS))
1474 return nullptr;
1475
1477 BuildMI(MF, MI.getDebugLoc(), get(Opc))
1478 .add(Dest)
1479 .addReg(0)
1480 .addImm(1LL << ShAmt)
1481 .addReg(SrcReg, getKillRegState(isKill), SrcSubReg)
1482 .addImm(0)
1483 .addReg(0);
1484 if (ImplicitOp.getReg() != 0)
1485 MIB.add(ImplicitOp);
1486 NewMI = MIB;
1487
1488 // Add kills if classifyLEAReg created a new register.
1489 if (LV && SrcReg != Src.getReg())
1490 LV->getVarInfo(SrcReg).Kills.push_back(NewMI);
1491 break;
1492 }
1493 CASE_NF(SHL8ri)
1494 Is8BitOp = true;
1495 [[fallthrough]];
1496 CASE_NF(SHL16ri) {
1497 assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
1498 unsigned ShAmt = getTruncatedShiftCount(MI, 2);
1499 if (!isTruncatedShiftCountForLEA(ShAmt))
1500 return nullptr;
1501 return convertToThreeAddressWithLEA(MIOpc, MI, LV, LIS, Is8BitOp);
1502 }
1503 CASE_NF(INC64r)
1504 CASE_NF(INC32r) {
1505 assert(MI.getNumOperands() >= 2 && "Unknown inc instruction!");
1506 unsigned Opc = (MIOpc == X86::INC64r || MIOpc == X86::INC64r_NF)
1507 ? X86::LEA64r
1508 : (Is64Bit ? X86::LEA64_32r : X86::LEA32r);
1509 bool isKill;
1510 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1511 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/false, SrcReg, SrcSubReg,
1512 isKill, ImplicitOp, LV, LIS))
1513 return nullptr;
1514
1515 MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1516 .add(Dest)
1517 .addReg(SrcReg, getKillRegState(isKill));
1518 if (ImplicitOp.getReg() != 0)
1519 MIB.add(ImplicitOp);
1520
1521 NewMI = addOffset(MIB, 1);
1522
1523 // Add kills if classifyLEAReg created a new register.
1524 if (LV && SrcReg != Src.getReg())
1525 LV->getVarInfo(SrcReg).Kills.push_back(NewMI);
1526 break;
1527 }
1528 CASE_NF(DEC64r)
1529 CASE_NF(DEC32r) {
1530 assert(MI.getNumOperands() >= 2 && "Unknown dec instruction!");
1531 unsigned Opc = (MIOpc == X86::DEC64r || MIOpc == X86::DEC64r_NF)
1532 ? X86::LEA64r
1533 : (Is64Bit ? X86::LEA64_32r : X86::LEA32r);
1534
1535 bool isKill;
1536 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1537 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/false, SrcReg, SrcSubReg,
1538 isKill, ImplicitOp, LV, LIS))
1539 return nullptr;
1540
1541 MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1542 .add(Dest)
1543 .addReg(SrcReg, getKillRegState(isKill));
1544 if (ImplicitOp.getReg() != 0)
1545 MIB.add(ImplicitOp);
1546
1547 NewMI = addOffset(MIB, -1);
1548
1549 // Add kills if classifyLEAReg created a new register.
1550 if (LV && SrcReg != Src.getReg())
1551 LV->getVarInfo(SrcReg).Kills.push_back(NewMI);
1552 break;
1553 }
1554 CASE_NF(DEC8r)
1555 CASE_NF(INC8r)
1556 Is8BitOp = true;
1557 [[fallthrough]];
1558 CASE_NF(DEC16r)
1559 CASE_NF(INC16r)
1560 return convertToThreeAddressWithLEA(MIOpc, MI, LV, LIS, Is8BitOp);
1561 CASE_NF(ADD64rr)
1562 CASE_NF(ADD32rr)
1563 case X86::ADD64rr_DB:
1564 case X86::ADD32rr_DB: {
1565 assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1566 unsigned Opc;
1567 if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_NF ||
1568 MIOpc == X86::ADD64rr_DB)
1569 Opc = X86::LEA64r;
1570 else
1571 Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1572
1573 const MachineOperand &Src2 = MI.getOperand(2);
1574 bool isKill2;
1575 MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false);
1576 if (!classifyLEAReg(MI, Src2, Opc, /*AllowSP=*/false, SrcReg2, SrcSubReg2,
1577 isKill2, ImplicitOp2, LV, LIS))
1578 return nullptr;
1579
1580 bool isKill;
1581 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1582 if (Src.getReg() == Src2.getReg()) {
1583 // Don't call classify LEAReg a second time on the same register, in case
1584 // the first call inserted a COPY from Src2 and marked it as killed.
1585 isKill = isKill2;
1586 SrcReg = SrcReg2;
1587 SrcSubReg = SrcSubReg2;
1588 } else {
1589 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/true, SrcReg, SrcSubReg,
1590 isKill, ImplicitOp, LV, LIS))
1591 return nullptr;
1592 }
1593
1594 MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)).add(Dest);
1595 if (ImplicitOp.getReg() != 0)
1596 MIB.add(ImplicitOp);
1597 if (ImplicitOp2.getReg() != 0)
1598 MIB.add(ImplicitOp2);
1599
1600 NewMI =
1601 addRegReg(MIB, SrcReg, isKill, SrcSubReg, SrcReg2, isKill2, SrcSubReg2);
1602
1603 // Add kills if classifyLEAReg created a new register.
1604 if (LV) {
1605 if (SrcReg2 != Src2.getReg())
1606 LV->getVarInfo(SrcReg2).Kills.push_back(NewMI);
1607 if (SrcReg != SrcReg2 && SrcReg != Src.getReg())
1608 LV->getVarInfo(SrcReg).Kills.push_back(NewMI);
1609 }
1610 NumRegOperands = 3;
1611 break;
1612 }
1613 CASE_NF(ADD8rr)
1614 case X86::ADD8rr_DB:
1615 Is8BitOp = true;
1616 [[fallthrough]];
1617 CASE_NF(ADD16rr)
1618 case X86::ADD16rr_DB:
1619 return convertToThreeAddressWithLEA(MIOpc, MI, LV, LIS, Is8BitOp);
1620 CASE_NF(ADD64ri32)
1621 case X86::ADD64ri32_DB:
1622 assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1623 NewMI = addOffset(
1624 BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)).add(Dest).add(Src),
1625 MI.getOperand(2));
1626 break;
1627 CASE_NF(ADD32ri)
1628 case X86::ADD32ri_DB: {
1629 assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1630 unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1631
1632 bool isKill;
1633 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1634 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/true, SrcReg, SrcSubReg,
1635 isKill, ImplicitOp, LV, LIS))
1636 return nullptr;
1637
1639 BuildMI(MF, MI.getDebugLoc(), get(Opc))
1640 .add(Dest)
1641 .addReg(SrcReg, getKillRegState(isKill), SrcSubReg);
1642 if (ImplicitOp.getReg() != 0)
1643 MIB.add(ImplicitOp);
1644
1645 NewMI = addOffset(MIB, MI.getOperand(2));
1646
1647 // Add kills if classifyLEAReg created a new register.
1648 if (LV && SrcReg != Src.getReg())
1649 LV->getVarInfo(SrcReg).Kills.push_back(NewMI);
1650 break;
1651 }
1652 CASE_NF(ADD8ri)
1653 case X86::ADD8ri_DB:
1654 Is8BitOp = true;
1655 [[fallthrough]];
1656 CASE_NF(ADD16ri)
1657 case X86::ADD16ri_DB:
1658 return convertToThreeAddressWithLEA(MIOpc, MI, LV, LIS, Is8BitOp);
1659 CASE_NF(SUB8ri)
1660 CASE_NF(SUB16ri)
1661 /// FIXME: Support these similar to ADD8ri/ADD16ri*.
1662 return nullptr;
1663 CASE_NF(SUB32ri) {
1664 if (!MI.getOperand(2).isImm())
1665 return nullptr;
1666 int64_t Imm = MI.getOperand(2).getImm();
1667 if (!isInt<32>(-Imm))
1668 return nullptr;
1669
1670 assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
1671 unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
1672
1673 bool isKill;
1674 MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
1675 if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/true, SrcReg, SrcSubReg,
1676 isKill, ImplicitOp, LV, LIS))
1677 return nullptr;
1678
1680 BuildMI(MF, MI.getDebugLoc(), get(Opc))
1681 .add(Dest)
1682 .addReg(SrcReg, getKillRegState(isKill), SrcSubReg);
1683 if (ImplicitOp.getReg() != 0)
1684 MIB.add(ImplicitOp);
1685
1686 NewMI = addOffset(MIB, -Imm);
1687
1688 // Add kills if classifyLEAReg created a new register.
1689 if (LV && SrcReg != Src.getReg())
1690 LV->getVarInfo(SrcReg).Kills.push_back(NewMI);
1691 break;
1692 }
1693
1694 CASE_NF(SUB64ri32) {
1695 if (!MI.getOperand(2).isImm())
1696 return nullptr;
1697 int64_t Imm = MI.getOperand(2).getImm();
1698 if (!isInt<32>(-Imm))
1699 return nullptr;
1700
1701 assert(MI.getNumOperands() >= 3 && "Unknown sub instruction!");
1702
1704 BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)).add(Dest).add(Src);
1705 NewMI = addOffset(MIB, -Imm);
1706 break;
1707 }
1708
1709 case X86::VMOVDQU8Z128rmk:
1710 case X86::VMOVDQU8Z256rmk:
1711 case X86::VMOVDQU8Zrmk:
1712 case X86::VMOVDQU16Z128rmk:
1713 case X86::VMOVDQU16Z256rmk:
1714 case X86::VMOVDQU16Zrmk:
1715 case X86::VMOVDQU32Z128rmk:
1716 case X86::VMOVDQA32Z128rmk:
1717 case X86::VMOVDQU32Z256rmk:
1718 case X86::VMOVDQA32Z256rmk:
1719 case X86::VMOVDQU32Zrmk:
1720 case X86::VMOVDQA32Zrmk:
1721 case X86::VMOVDQU64Z128rmk:
1722 case X86::VMOVDQA64Z128rmk:
1723 case X86::VMOVDQU64Z256rmk:
1724 case X86::VMOVDQA64Z256rmk:
1725 case X86::VMOVDQU64Zrmk:
1726 case X86::VMOVDQA64Zrmk:
1727 case X86::VMOVUPDZ128rmk:
1728 case X86::VMOVAPDZ128rmk:
1729 case X86::VMOVUPDZ256rmk:
1730 case X86::VMOVAPDZ256rmk:
1731 case X86::VMOVUPDZrmk:
1732 case X86::VMOVAPDZrmk:
1733 case X86::VMOVUPSZ128rmk:
1734 case X86::VMOVAPSZ128rmk:
1735 case X86::VMOVUPSZ256rmk:
1736 case X86::VMOVAPSZ256rmk:
1737 case X86::VMOVUPSZrmk:
1738 case X86::VMOVAPSZrmk:
1739 case X86::VBROADCASTSDZ256rmk:
1740 case X86::VBROADCASTSDZrmk:
1741 case X86::VBROADCASTSSZ128rmk:
1742 case X86::VBROADCASTSSZ256rmk:
1743 case X86::VBROADCASTSSZrmk:
1744 case X86::VPBROADCASTDZ128rmk:
1745 case X86::VPBROADCASTDZ256rmk:
1746 case X86::VPBROADCASTDZrmk:
1747 case X86::VPBROADCASTQZ128rmk:
1748 case X86::VPBROADCASTQZ256rmk:
1749 case X86::VPBROADCASTQZrmk: {
1750 unsigned Opc;
1751 switch (MIOpc) {
1752 default:
1753 llvm_unreachable("Unreachable!");
1754 case X86::VMOVDQU8Z128rmk:
1755 Opc = X86::VPBLENDMBZ128rmk;
1756 break;
1757 case X86::VMOVDQU8Z256rmk:
1758 Opc = X86::VPBLENDMBZ256rmk;
1759 break;
1760 case X86::VMOVDQU8Zrmk:
1761 Opc = X86::VPBLENDMBZrmk;
1762 break;
1763 case X86::VMOVDQU16Z128rmk:
1764 Opc = X86::VPBLENDMWZ128rmk;
1765 break;
1766 case X86::VMOVDQU16Z256rmk:
1767 Opc = X86::VPBLENDMWZ256rmk;
1768 break;
1769 case X86::VMOVDQU16Zrmk:
1770 Opc = X86::VPBLENDMWZrmk;
1771 break;
1772 case X86::VMOVDQU32Z128rmk:
1773 Opc = X86::VPBLENDMDZ128rmk;
1774 break;
1775 case X86::VMOVDQU32Z256rmk:
1776 Opc = X86::VPBLENDMDZ256rmk;
1777 break;
1778 case X86::VMOVDQU32Zrmk:
1779 Opc = X86::VPBLENDMDZrmk;
1780 break;
1781 case X86::VMOVDQU64Z128rmk:
1782 Opc = X86::VPBLENDMQZ128rmk;
1783 break;
1784 case X86::VMOVDQU64Z256rmk:
1785 Opc = X86::VPBLENDMQZ256rmk;
1786 break;
1787 case X86::VMOVDQU64Zrmk:
1788 Opc = X86::VPBLENDMQZrmk;
1789 break;
1790 case X86::VMOVUPDZ128rmk:
1791 Opc = X86::VBLENDMPDZ128rmk;
1792 break;
1793 case X86::VMOVUPDZ256rmk:
1794 Opc = X86::VBLENDMPDZ256rmk;
1795 break;
1796 case X86::VMOVUPDZrmk:
1797 Opc = X86::VBLENDMPDZrmk;
1798 break;
1799 case X86::VMOVUPSZ128rmk:
1800 Opc = X86::VBLENDMPSZ128rmk;
1801 break;
1802 case X86::VMOVUPSZ256rmk:
1803 Opc = X86::VBLENDMPSZ256rmk;
1804 break;
1805 case X86::VMOVUPSZrmk:
1806 Opc = X86::VBLENDMPSZrmk;
1807 break;
1808 case X86::VMOVDQA32Z128rmk:
1809 Opc = X86::VPBLENDMDZ128rmk;
1810 break;
1811 case X86::VMOVDQA32Z256rmk:
1812 Opc = X86::VPBLENDMDZ256rmk;
1813 break;
1814 case X86::VMOVDQA32Zrmk:
1815 Opc = X86::VPBLENDMDZrmk;
1816 break;
1817 case X86::VMOVDQA64Z128rmk:
1818 Opc = X86::VPBLENDMQZ128rmk;
1819 break;
1820 case X86::VMOVDQA64Z256rmk:
1821 Opc = X86::VPBLENDMQZ256rmk;
1822 break;
1823 case X86::VMOVDQA64Zrmk:
1824 Opc = X86::VPBLENDMQZrmk;
1825 break;
1826 case X86::VMOVAPDZ128rmk:
1827 Opc = X86::VBLENDMPDZ128rmk;
1828 break;
1829 case X86::VMOVAPDZ256rmk:
1830 Opc = X86::VBLENDMPDZ256rmk;
1831 break;
1832 case X86::VMOVAPDZrmk:
1833 Opc = X86::VBLENDMPDZrmk;
1834 break;
1835 case X86::VMOVAPSZ128rmk:
1836 Opc = X86::VBLENDMPSZ128rmk;
1837 break;
1838 case X86::VMOVAPSZ256rmk:
1839 Opc = X86::VBLENDMPSZ256rmk;
1840 break;
1841 case X86::VMOVAPSZrmk:
1842 Opc = X86::VBLENDMPSZrmk;
1843 break;
1844 case X86::VBROADCASTSDZ256rmk:
1845 Opc = X86::VBLENDMPDZ256rmbk;
1846 break;
1847 case X86::VBROADCASTSDZrmk:
1848 Opc = X86::VBLENDMPDZrmbk;
1849 break;
1850 case X86::VBROADCASTSSZ128rmk:
1851 Opc = X86::VBLENDMPSZ128rmbk;
1852 break;
1853 case X86::VBROADCASTSSZ256rmk:
1854 Opc = X86::VBLENDMPSZ256rmbk;
1855 break;
1856 case X86::VBROADCASTSSZrmk:
1857 Opc = X86::VBLENDMPSZrmbk;
1858 break;
1859 case X86::VPBROADCASTDZ128rmk:
1860 Opc = X86::VPBLENDMDZ128rmbk;
1861 break;
1862 case X86::VPBROADCASTDZ256rmk:
1863 Opc = X86::VPBLENDMDZ256rmbk;
1864 break;
1865 case X86::VPBROADCASTDZrmk:
1866 Opc = X86::VPBLENDMDZrmbk;
1867 break;
1868 case X86::VPBROADCASTQZ128rmk:
1869 Opc = X86::VPBLENDMQZ128rmbk;
1870 break;
1871 case X86::VPBROADCASTQZ256rmk:
1872 Opc = X86::VPBLENDMQZ256rmbk;
1873 break;
1874 case X86::VPBROADCASTQZrmk:
1875 Opc = X86::VPBLENDMQZrmbk;
1876 break;
1877 }
1878
1879 NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc))
1880 .add(Dest)
1881 .add(MI.getOperand(2))
1882 .add(Src)
1883 .add(MI.getOperand(3))
1884 .add(MI.getOperand(4))
1885 .add(MI.getOperand(5))
1886 .add(MI.getOperand(6))
1887 .add(MI.getOperand(7));
1888 NumRegOperands = 4;
1889 break;
1890 }
1891
1892 case X86::VMOVDQU8Z128rrk:
1893 case X86::VMOVDQU8Z256rrk:
1894 case X86::VMOVDQU8Zrrk:
1895 case X86::VMOVDQU16Z128rrk:
1896 case X86::VMOVDQU16Z256rrk:
1897 case X86::VMOVDQU16Zrrk:
1898 case X86::VMOVDQU32Z128rrk:
1899 case X86::VMOVDQA32Z128rrk:
1900 case X86::VMOVDQU32Z256rrk:
1901 case X86::VMOVDQA32Z256rrk:
1902 case X86::VMOVDQU32Zrrk:
1903 case X86::VMOVDQA32Zrrk:
1904 case X86::VMOVDQU64Z128rrk:
1905 case X86::VMOVDQA64Z128rrk:
1906 case X86::VMOVDQU64Z256rrk:
1907 case X86::VMOVDQA64Z256rrk:
1908 case X86::VMOVDQU64Zrrk:
1909 case X86::VMOVDQA64Zrrk:
1910 case X86::VMOVUPDZ128rrk:
1911 case X86::VMOVAPDZ128rrk:
1912 case X86::VMOVUPDZ256rrk:
1913 case X86::VMOVAPDZ256rrk:
1914 case X86::VMOVUPDZrrk:
1915 case X86::VMOVAPDZrrk:
1916 case X86::VMOVUPSZ128rrk:
1917 case X86::VMOVAPSZ128rrk:
1918 case X86::VMOVUPSZ256rrk:
1919 case X86::VMOVAPSZ256rrk:
1920 case X86::VMOVUPSZrrk:
1921 case X86::VMOVAPSZrrk: {
1922 unsigned Opc;
1923 switch (MIOpc) {
1924 default:
1925 llvm_unreachable("Unreachable!");
1926 case X86::VMOVDQU8Z128rrk:
1927 Opc = X86::VPBLENDMBZ128rrk;
1928 break;
1929 case X86::VMOVDQU8Z256rrk:
1930 Opc = X86::VPBLENDMBZ256rrk;
1931 break;
1932 case X86::VMOVDQU8Zrrk:
1933 Opc = X86::VPBLENDMBZrrk;
1934 break;
1935 case X86::VMOVDQU16Z128rrk:
1936 Opc = X86::VPBLENDMWZ128rrk;
1937 break;
1938 case X86::VMOVDQU16Z256rrk:
1939 Opc = X86::VPBLENDMWZ256rrk;
1940 break;
1941 case X86::VMOVDQU16Zrrk:
1942 Opc = X86::VPBLENDMWZrrk;
1943 break;
1944 case X86::VMOVDQU32Z128rrk:
1945 Opc = X86::VPBLENDMDZ128rrk;
1946 break;
1947 case X86::VMOVDQU32Z256rrk:
1948 Opc = X86::VPBLENDMDZ256rrk;
1949 break;
1950 case X86::VMOVDQU32Zrrk:
1951 Opc = X86::VPBLENDMDZrrk;
1952 break;
1953 case X86::VMOVDQU64Z128rrk:
1954 Opc = X86::VPBLENDMQZ128rrk;
1955 break;
1956 case X86::VMOVDQU64Z256rrk:
1957 Opc = X86::VPBLENDMQZ256rrk;
1958 break;
1959 case X86::VMOVDQU64Zrrk:
1960 Opc = X86::VPBLENDMQZrrk;
1961 break;
1962 case X86::VMOVUPDZ128rrk:
1963 Opc = X86::VBLENDMPDZ128rrk;
1964 break;
1965 case X86::VMOVUPDZ256rrk:
1966 Opc = X86::VBLENDMPDZ256rrk;
1967 break;
1968 case X86::VMOVUPDZrrk:
1969 Opc = X86::VBLENDMPDZrrk;
1970 break;
1971 case X86::VMOVUPSZ128rrk:
1972 Opc = X86::VBLENDMPSZ128rrk;
1973 break;
1974 case X86::VMOVUPSZ256rrk:
1975 Opc = X86::VBLENDMPSZ256rrk;
1976 break;
1977 case X86::VMOVUPSZrrk:
1978 Opc = X86::VBLENDMPSZrrk;
1979 break;
1980 case X86::VMOVDQA32Z128rrk:
1981 Opc = X86::VPBLENDMDZ128rrk;
1982 break;
1983 case X86::VMOVDQA32Z256rrk:
1984 Opc = X86::VPBLENDMDZ256rrk;
1985 break;
1986 case X86::VMOVDQA32Zrrk:
1987 Opc = X86::VPBLENDMDZrrk;
1988 break;
1989 case X86::VMOVDQA64Z128rrk:
1990 Opc = X86::VPBLENDMQZ128rrk;
1991 break;
1992 case X86::VMOVDQA64Z256rrk:
1993 Opc = X86::VPBLENDMQZ256rrk;
1994 break;
1995 case X86::VMOVDQA64Zrrk:
1996 Opc = X86::VPBLENDMQZrrk;
1997 break;
1998 case X86::VMOVAPDZ128rrk:
1999 Opc = X86::VBLENDMPDZ128rrk;
2000 break;
2001 case X86::VMOVAPDZ256rrk:
2002 Opc = X86::VBLENDMPDZ256rrk;
2003 break;
2004 case X86::VMOVAPDZrrk:
2005 Opc = X86::VBLENDMPDZrrk;
2006 break;
2007 case X86::VMOVAPSZ128rrk:
2008 Opc = X86::VBLENDMPSZ128rrk;
2009 break;
2010 case X86::VMOVAPSZ256rrk:
2011 Opc = X86::VBLENDMPSZ256rrk;
2012 break;
2013 case X86::VMOVAPSZrrk:
2014 Opc = X86::VBLENDMPSZrrk;
2015 break;
2016 }
2017
2018 NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc))
2019 .add(Dest)
2020 .add(MI.getOperand(2))
2021 .add(Src)
2022 .add(MI.getOperand(3));
2023 NumRegOperands = 4;
2024 break;
2025 }
2026 }
2027#undef CASE_NF
2028
2029 if (!NewMI)
2030 return nullptr;
2031
2032 if (LV) { // Update live variables
2033 for (unsigned I = 0; I < NumRegOperands; ++I) {
2034 MachineOperand &Op = MI.getOperand(I);
2035 if (Op.isReg() && (Op.isDead() || Op.isKill()))
2036 LV->replaceKillInstruction(Op.getReg(), MI, *NewMI);
2037 }
2038 }
2039
2040 MachineBasicBlock &MBB = *MI.getParent();
2041 MBB.insert(MI.getIterator(), NewMI); // Insert the new inst
2042
2043 if (LIS) {
2044 LIS->ReplaceMachineInstrInMaps(MI, *NewMI);
2045 if (SrcReg)
2046 LIS->getInterval(SrcReg);
2047 if (SrcReg2)
2048 LIS->getInterval(SrcReg2);
2049 }
2050
2051 return NewMI;
2052}
2053
2054/// This determines which of three possible cases of a three source commute
2055/// the source indexes correspond to taking into account any mask operands.
2056/// All prevents commuting a passthru operand. Returns -1 if the commute isn't
2057/// possible.
2058/// Case 0 - Possible to commute the first and second operands.
2059/// Case 1 - Possible to commute the first and third operands.
2060/// Case 2 - Possible to commute the second and third operands.
2061static unsigned getThreeSrcCommuteCase(uint64_t TSFlags, unsigned SrcOpIdx1,
2062 unsigned SrcOpIdx2) {
2063 // Put the lowest index to SrcOpIdx1 to simplify the checks below.
2064 if (SrcOpIdx1 > SrcOpIdx2)
2065 std::swap(SrcOpIdx1, SrcOpIdx2);
2066
2067 unsigned Op1 = 1, Op2 = 2, Op3 = 3;
2068 if (X86II::isKMasked(TSFlags)) {
2069 Op2++;
2070 Op3++;
2071 }
2072
2073 if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op2)
2074 return 0;
2075 if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op3)
2076 return 1;
2077 if (SrcOpIdx1 == Op2 && SrcOpIdx2 == Op3)
2078 return 2;
2079 llvm_unreachable("Unknown three src commute case.");
2080}
2081
2083 const MachineInstr &MI, unsigned SrcOpIdx1, unsigned SrcOpIdx2,
2084 const X86InstrFMA3Group &FMA3Group) const {
2085
2086 unsigned Opc = MI.getOpcode();
2087
2088 // TODO: Commuting the 1st operand of FMA*_Int requires some additional
2089 // analysis. The commute optimization is legal only if all users of FMA*_Int
2090 // use only the lowest element of the FMA*_Int instruction. Such analysis are
2091 // not implemented yet. So, just return 0 in that case.
2092 // When such analysis are available this place will be the right place for
2093 // calling it.
2094 assert(!(FMA3Group.isIntrinsic() && (SrcOpIdx1 == 1 || SrcOpIdx2 == 1)) &&
2095 "Intrinsic instructions can't commute operand 1");
2096
2097 // Determine which case this commute is or if it can't be done.
2098 unsigned Case =
2099 getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1, SrcOpIdx2);
2100 assert(Case < 3 && "Unexpected case number!");
2101
2102 // Define the FMA forms mapping array that helps to map input FMA form
2103 // to output FMA form to preserve the operation semantics after
2104 // commuting the operands.
2105 const unsigned Form132Index = 0;
2106 const unsigned Form213Index = 1;
2107 const unsigned Form231Index = 2;
2108 static const unsigned FormMapping[][3] = {
2109 // 0: SrcOpIdx1 == 1 && SrcOpIdx2 == 2;
2110 // FMA132 A, C, b; ==> FMA231 C, A, b;
2111 // FMA213 B, A, c; ==> FMA213 A, B, c;
2112 // FMA231 C, A, b; ==> FMA132 A, C, b;
2113 {Form231Index, Form213Index, Form132Index},
2114 // 1: SrcOpIdx1 == 1 && SrcOpIdx2 == 3;
2115 // FMA132 A, c, B; ==> FMA132 B, c, A;
2116 // FMA213 B, a, C; ==> FMA231 C, a, B;
2117 // FMA231 C, a, B; ==> FMA213 B, a, C;
2118 {Form132Index, Form231Index, Form213Index},
2119 // 2: SrcOpIdx1 == 2 && SrcOpIdx2 == 3;
2120 // FMA132 a, C, B; ==> FMA213 a, B, C;
2121 // FMA213 b, A, C; ==> FMA132 b, C, A;
2122 // FMA231 c, A, B; ==> FMA231 c, B, A;
2123 {Form213Index, Form132Index, Form231Index}};
2124
2125 unsigned FMAForms[3];
2126 FMAForms[0] = FMA3Group.get132Opcode();
2127 FMAForms[1] = FMA3Group.get213Opcode();
2128 FMAForms[2] = FMA3Group.get231Opcode();
2129
2130 // Everything is ready, just adjust the FMA opcode and return it.
2131 for (unsigned FormIndex = 0; FormIndex < 3; FormIndex++)
2132 if (Opc == FMAForms[FormIndex])
2133 return FMAForms[FormMapping[Case][FormIndex]];
2134
2135 llvm_unreachable("Illegal FMA3 format");
2136}
2137
2138static void commuteVPTERNLOG(MachineInstr &MI, unsigned SrcOpIdx1,
2139 unsigned SrcOpIdx2) {
2140 // Determine which case this commute is or if it can't be done.
2141 unsigned Case =
2142 getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1, SrcOpIdx2);
2143 assert(Case < 3 && "Unexpected case value!");
2144
2145 // For each case we need to swap two pairs of bits in the final immediate.
2146 static const uint8_t SwapMasks[3][4] = {
2147 {0x04, 0x10, 0x08, 0x20}, // Swap bits 2/4 and 3/5.
2148 {0x02, 0x10, 0x08, 0x40}, // Swap bits 1/4 and 3/6.
2149 {0x02, 0x04, 0x20, 0x40}, // Swap bits 1/2 and 5/6.
2150 };
2151
2152 uint8_t Imm = MI.getOperand(MI.getNumOperands() - 1).getImm();
2153 // Clear out the bits we are swapping.
2154 uint8_t NewImm = Imm & ~(SwapMasks[Case][0] | SwapMasks[Case][1] |
2155 SwapMasks[Case][2] | SwapMasks[Case][3]);
2156 // If the immediate had a bit of the pair set, then set the opposite bit.
2157 if (Imm & SwapMasks[Case][0])
2158 NewImm |= SwapMasks[Case][1];
2159 if (Imm & SwapMasks[Case][1])
2160 NewImm |= SwapMasks[Case][0];
2161 if (Imm & SwapMasks[Case][2])
2162 NewImm |= SwapMasks[Case][3];
2163 if (Imm & SwapMasks[Case][3])
2164 NewImm |= SwapMasks[Case][2];
2165 MI.getOperand(MI.getNumOperands() - 1).setImm(NewImm);
2166}
2167
2168// Returns true if this is a VPERMI2 or VPERMT2 instruction that can be
2169// commuted.
2170static bool isCommutableVPERMV3Instruction(unsigned Opcode) {
2171#define VPERM_CASES(Suffix) \
2172 case X86::VPERMI2##Suffix##Z128rr: \
2173 case X86::VPERMT2##Suffix##Z128rr: \
2174 case X86::VPERMI2##Suffix##Z256rr: \
2175 case X86::VPERMT2##Suffix##Z256rr: \
2176 case X86::VPERMI2##Suffix##Zrr: \
2177 case X86::VPERMT2##Suffix##Zrr: \
2178 case X86::VPERMI2##Suffix##Z128rm: \
2179 case X86::VPERMT2##Suffix##Z128rm: \
2180 case X86::VPERMI2##Suffix##Z256rm: \
2181 case X86::VPERMT2##Suffix##Z256rm: \
2182 case X86::VPERMI2##Suffix##Zrm: \
2183 case X86::VPERMT2##Suffix##Zrm: \
2184 case X86::VPERMI2##Suffix##Z128rrkz: \
2185 case X86::VPERMT2##Suffix##Z128rrkz: \
2186 case X86::VPERMI2##Suffix##Z256rrkz: \
2187 case X86::VPERMT2##Suffix##Z256rrkz: \
2188 case X86::VPERMI2##Suffix##Zrrkz: \
2189 case X86::VPERMT2##Suffix##Zrrkz: \
2190 case X86::VPERMI2##Suffix##Z128rmkz: \
2191 case X86::VPERMT2##Suffix##Z128rmkz: \
2192 case X86::VPERMI2##Suffix##Z256rmkz: \
2193 case X86::VPERMT2##Suffix##Z256rmkz: \
2194 case X86::VPERMI2##Suffix##Zrmkz: \
2195 case X86::VPERMT2##Suffix##Zrmkz:
2196
2197#define VPERM_CASES_BROADCAST(Suffix) \
2198 VPERM_CASES(Suffix) \
2199 case X86::VPERMI2##Suffix##Z128rmb: \
2200 case X86::VPERMT2##Suffix##Z128rmb: \
2201 case X86::VPERMI2##Suffix##Z256rmb: \
2202 case X86::VPERMT2##Suffix##Z256rmb: \
2203 case X86::VPERMI2##Suffix##Zrmb: \
2204 case X86::VPERMT2##Suffix##Zrmb: \
2205 case X86::VPERMI2##Suffix##Z128rmbkz: \
2206 case X86::VPERMT2##Suffix##Z128rmbkz: \
2207 case X86::VPERMI2##Suffix##Z256rmbkz: \
2208 case X86::VPERMT2##Suffix##Z256rmbkz: \
2209 case X86::VPERMI2##Suffix##Zrmbkz: \
2210 case X86::VPERMT2##Suffix##Zrmbkz:
2211
2212 switch (Opcode) {
2213 default:
2214 return false;
2215 VPERM_CASES(B)
2220 VPERM_CASES(W)
2221 return true;
2222 }
2223#undef VPERM_CASES_BROADCAST
2224#undef VPERM_CASES
2225}
2226
2227// Returns commuted opcode for VPERMI2 and VPERMT2 instructions by switching
2228// from the I opcode to the T opcode and vice versa.
2229static unsigned getCommutedVPERMV3Opcode(unsigned Opcode) {
2230#define VPERM_CASES(Orig, New) \
2231 case X86::Orig##Z128rr: \
2232 return X86::New##Z128rr; \
2233 case X86::Orig##Z128rrkz: \
2234 return X86::New##Z128rrkz; \
2235 case X86::Orig##Z128rm: \
2236 return X86::New##Z128rm; \
2237 case X86::Orig##Z128rmkz: \
2238 return X86::New##Z128rmkz; \
2239 case X86::Orig##Z256rr: \
2240 return X86::New##Z256rr; \
2241 case X86::Orig##Z256rrkz: \
2242 return X86::New##Z256rrkz; \
2243 case X86::Orig##Z256rm: \
2244 return X86::New##Z256rm; \
2245 case X86::Orig##Z256rmkz: \
2246 return X86::New##Z256rmkz; \
2247 case X86::Orig##Zrr: \
2248 return X86::New##Zrr; \
2249 case X86::Orig##Zrrkz: \
2250 return X86::New##Zrrkz; \
2251 case X86::Orig##Zrm: \
2252 return X86::New##Zrm; \
2253 case X86::Orig##Zrmkz: \
2254 return X86::New##Zrmkz;
2255
2256#define VPERM_CASES_BROADCAST(Orig, New) \
2257 VPERM_CASES(Orig, New) \
2258 case X86::Orig##Z128rmb: \
2259 return X86::New##Z128rmb; \
2260 case X86::Orig##Z128rmbkz: \
2261 return X86::New##Z128rmbkz; \
2262 case X86::Orig##Z256rmb: \
2263 return X86::New##Z256rmb; \
2264 case X86::Orig##Z256rmbkz: \
2265 return X86::New##Z256rmbkz; \
2266 case X86::Orig##Zrmb: \
2267 return X86::New##Zrmb; \
2268 case X86::Orig##Zrmbkz: \
2269 return X86::New##Zrmbkz;
2270
2271 switch (Opcode) {
2272 VPERM_CASES(VPERMI2B, VPERMT2B)
2273 VPERM_CASES_BROADCAST(VPERMI2D, VPERMT2D)
2274 VPERM_CASES_BROADCAST(VPERMI2PD, VPERMT2PD)
2275 VPERM_CASES_BROADCAST(VPERMI2PS, VPERMT2PS)
2276 VPERM_CASES_BROADCAST(VPERMI2Q, VPERMT2Q)
2277 VPERM_CASES(VPERMI2W, VPERMT2W)
2278 VPERM_CASES(VPERMT2B, VPERMI2B)
2279 VPERM_CASES_BROADCAST(VPERMT2D, VPERMI2D)
2280 VPERM_CASES_BROADCAST(VPERMT2PD, VPERMI2PD)
2281 VPERM_CASES_BROADCAST(VPERMT2PS, VPERMI2PS)
2282 VPERM_CASES_BROADCAST(VPERMT2Q, VPERMI2Q)
2283 VPERM_CASES(VPERMT2W, VPERMI2W)
2284 }
2285
2286 llvm_unreachable("Unreachable!");
2287#undef VPERM_CASES_BROADCAST
2288#undef VPERM_CASES
2289}
2290
2292 unsigned OpIdx1,
2293 unsigned OpIdx2) const {
2294 auto CloneIfNew = [&](MachineInstr &MI) {
2295 return std::exchange(NewMI, false)
2296 ? MI.getParent()->getParent()->CloneMachineInstr(&MI)
2297 : &MI;
2298 };
2299 MachineInstr *WorkingMI = nullptr;
2300 unsigned Opc = MI.getOpcode();
2301
2302#define CASE_ND(OP) \
2303 case X86::OP: \
2304 case X86::OP##_ND:
2305
2306 switch (Opc) {
2307 // SHLD B, C, I <-> SHRD C, B, (BitWidth - I)
2308 CASE_ND(SHRD16rri8)
2309 CASE_ND(SHLD16rri8)
2310 CASE_ND(SHRD32rri8)
2311 CASE_ND(SHLD32rri8)
2312 CASE_ND(SHRD64rri8)
2313 CASE_ND(SHLD64rri8) {
2314 unsigned Size;
2315 switch (Opc) {
2316 default:
2317 llvm_unreachable("Unreachable!");
2318#define FROM_TO_SIZE(A, B, S) \
2319 case X86::A: \
2320 Opc = X86::B; \
2321 Size = S; \
2322 break; \
2323 case X86::A##_ND: \
2324 Opc = X86::B##_ND; \
2325 Size = S; \
2326 break; \
2327 case X86::B: \
2328 Opc = X86::A; \
2329 Size = S; \
2330 break; \
2331 case X86::B##_ND: \
2332 Opc = X86::A##_ND; \
2333 Size = S; \
2334 break;
2335
2336 FROM_TO_SIZE(SHRD16rri8, SHLD16rri8, 16)
2337 FROM_TO_SIZE(SHRD32rri8, SHLD32rri8, 32)
2338 FROM_TO_SIZE(SHRD64rri8, SHLD64rri8, 64)
2339#undef FROM_TO_SIZE
2340 }
2341 WorkingMI = CloneIfNew(MI);
2342 WorkingMI->setDesc(get(Opc));
2343 WorkingMI->getOperand(3).setImm(Size - MI.getOperand(3).getImm());
2344 break;
2345 }
2346 case X86::PFSUBrr:
2347 case X86::PFSUBRrr:
2348 // PFSUB x, y: x = x - y
2349 // PFSUBR x, y: x = y - x
2350 WorkingMI = CloneIfNew(MI);
2351 WorkingMI->setDesc(
2352 get(X86::PFSUBRrr == Opc ? X86::PFSUBrr : X86::PFSUBRrr));
2353 break;
2354 case X86::BLENDPDrri:
2355 case X86::BLENDPSrri:
2356 case X86::PBLENDWrri:
2357 case X86::VBLENDPDrri:
2358 case X86::VBLENDPSrri:
2359 case X86::VBLENDPDYrri:
2360 case X86::VBLENDPSYrri:
2361 case X86::VPBLENDDrri:
2362 case X86::VPBLENDWrri:
2363 case X86::VPBLENDDYrri:
2364 case X86::VPBLENDWYrri: {
2365 int8_t Mask;
2366 switch (Opc) {
2367 default:
2368 llvm_unreachable("Unreachable!");
2369 case X86::BLENDPDrri:
2370 Mask = (int8_t)0x03;
2371 break;
2372 case X86::BLENDPSrri:
2373 Mask = (int8_t)0x0F;
2374 break;
2375 case X86::PBLENDWrri:
2376 Mask = (int8_t)0xFF;
2377 break;
2378 case X86::VBLENDPDrri:
2379 Mask = (int8_t)0x03;
2380 break;
2381 case X86::VBLENDPSrri:
2382 Mask = (int8_t)0x0F;
2383 break;
2384 case X86::VBLENDPDYrri:
2385 Mask = (int8_t)0x0F;
2386 break;
2387 case X86::VBLENDPSYrri:
2388 Mask = (int8_t)0xFF;
2389 break;
2390 case X86::VPBLENDDrri:
2391 Mask = (int8_t)0x0F;
2392 break;
2393 case X86::VPBLENDWrri:
2394 Mask = (int8_t)0xFF;
2395 break;
2396 case X86::VPBLENDDYrri:
2397 Mask = (int8_t)0xFF;
2398 break;
2399 case X86::VPBLENDWYrri:
2400 Mask = (int8_t)0xFF;
2401 break;
2402 }
2403 // Only the least significant bits of Imm are used.
2404 // Using int8_t to ensure it will be sign extended to the int64_t that
2405 // setImm takes in order to match isel behavior.
2406 int8_t Imm = MI.getOperand(3).getImm() & Mask;
2407 WorkingMI = CloneIfNew(MI);
2408 WorkingMI->getOperand(3).setImm(Mask ^ Imm);
2409 break;
2410 }
2411 case X86::INSERTPSrri:
2412 case X86::VINSERTPSrri:
2413 case X86::VINSERTPSZrri: {
2414 unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm();
2415 unsigned ZMask = Imm & 15;
2416 unsigned DstIdx = (Imm >> 4) & 3;
2417 unsigned SrcIdx = (Imm >> 6) & 3;
2418
2419 // We can commute insertps if we zero 2 of the elements, the insertion is
2420 // "inline" and we don't override the insertion with a zero.
2421 if (DstIdx == SrcIdx && (ZMask & (1 << DstIdx)) == 0 &&
2422 llvm::popcount(ZMask) == 2) {
2423 unsigned AltIdx = llvm::countr_zero((ZMask | (1 << DstIdx)) ^ 15);
2424 assert(AltIdx < 4 && "Illegal insertion index");
2425 unsigned AltImm = (AltIdx << 6) | (AltIdx << 4) | ZMask;
2426 WorkingMI = CloneIfNew(MI);
2427 WorkingMI->getOperand(MI.getNumOperands() - 1).setImm(AltImm);
2428 break;
2429 }
2430 return nullptr;
2431 }
2432 case X86::MOVSDrr:
2433 case X86::MOVSSrr:
2434 case X86::VMOVSDrr:
2435 case X86::VMOVSSrr: {
2436 // On SSE41 or later we can commute a MOVSS/MOVSD to a BLENDPS/BLENDPD.
2437 if (Subtarget.hasSSE41()) {
2438 unsigned Mask;
2439 switch (Opc) {
2440 default:
2441 llvm_unreachable("Unreachable!");
2442 case X86::MOVSDrr:
2443 Opc = X86::BLENDPDrri;
2444 Mask = 0x02;
2445 break;
2446 case X86::MOVSSrr:
2447 Opc = X86::BLENDPSrri;
2448 Mask = 0x0E;
2449 break;
2450 case X86::VMOVSDrr:
2451 Opc = X86::VBLENDPDrri;
2452 Mask = 0x02;
2453 break;
2454 case X86::VMOVSSrr:
2455 Opc = X86::VBLENDPSrri;
2456 Mask = 0x0E;
2457 break;
2458 }
2459
2460 WorkingMI = CloneIfNew(MI);
2461 WorkingMI->setDesc(get(Opc));
2462 WorkingMI->addOperand(MachineOperand::CreateImm(Mask));
2463 break;
2464 }
2465
2466 assert(Opc == X86::MOVSDrr && "Only MOVSD can commute to SHUFPD");
2467 WorkingMI = CloneIfNew(MI);
2468 WorkingMI->setDesc(get(X86::SHUFPDrri));
2469 WorkingMI->addOperand(MachineOperand::CreateImm(0x02));
2470 break;
2471 }
2472 case X86::SHUFPDrri: {
2473 // Commute to MOVSD.
2474 assert(MI.getOperand(3).getImm() == 0x02 && "Unexpected immediate!");
2475 WorkingMI = CloneIfNew(MI);
2476 WorkingMI->setDesc(get(X86::MOVSDrr));
2477 WorkingMI->removeOperand(3);
2478 break;
2479 }
2480 case X86::PCLMULQDQrri:
2481 case X86::VPCLMULQDQrri:
2482 case X86::VPCLMULQDQYrri:
2483 case X86::VPCLMULQDQZrri:
2484 case X86::VPCLMULQDQZ128rri:
2485 case X86::VPCLMULQDQZ256rri: {
2486 // SRC1 64bits = Imm[0] ? SRC1[127:64] : SRC1[63:0]
2487 // SRC2 64bits = Imm[4] ? SRC2[127:64] : SRC2[63:0]
2488 unsigned Imm = MI.getOperand(3).getImm();
2489 unsigned Src1Hi = Imm & 0x01;
2490 unsigned Src2Hi = Imm & 0x10;
2491 WorkingMI = CloneIfNew(MI);
2492 WorkingMI->getOperand(3).setImm((Src1Hi << 4) | (Src2Hi >> 4));
2493 break;
2494 }
2495 case X86::VPCMPBZ128rri:
2496 case X86::VPCMPUBZ128rri:
2497 case X86::VPCMPBZ256rri:
2498 case X86::VPCMPUBZ256rri:
2499 case X86::VPCMPBZrri:
2500 case X86::VPCMPUBZrri:
2501 case X86::VPCMPDZ128rri:
2502 case X86::VPCMPUDZ128rri:
2503 case X86::VPCMPDZ256rri:
2504 case X86::VPCMPUDZ256rri:
2505 case X86::VPCMPDZrri:
2506 case X86::VPCMPUDZrri:
2507 case X86::VPCMPQZ128rri:
2508 case X86::VPCMPUQZ128rri:
2509 case X86::VPCMPQZ256rri:
2510 case X86::VPCMPUQZ256rri:
2511 case X86::VPCMPQZrri:
2512 case X86::VPCMPUQZrri:
2513 case X86::VPCMPWZ128rri:
2514 case X86::VPCMPUWZ128rri:
2515 case X86::VPCMPWZ256rri:
2516 case X86::VPCMPUWZ256rri:
2517 case X86::VPCMPWZrri:
2518 case X86::VPCMPUWZrri:
2519 case X86::VPCMPBZ128rrik:
2520 case X86::VPCMPUBZ128rrik:
2521 case X86::VPCMPBZ256rrik:
2522 case X86::VPCMPUBZ256rrik:
2523 case X86::VPCMPBZrrik:
2524 case X86::VPCMPUBZrrik:
2525 case X86::VPCMPDZ128rrik:
2526 case X86::VPCMPUDZ128rrik:
2527 case X86::VPCMPDZ256rrik:
2528 case X86::VPCMPUDZ256rrik:
2529 case X86::VPCMPDZrrik:
2530 case X86::VPCMPUDZrrik:
2531 case X86::VPCMPQZ128rrik:
2532 case X86::VPCMPUQZ128rrik:
2533 case X86::VPCMPQZ256rrik:
2534 case X86::VPCMPUQZ256rrik:
2535 case X86::VPCMPQZrrik:
2536 case X86::VPCMPUQZrrik:
2537 case X86::VPCMPWZ128rrik:
2538 case X86::VPCMPUWZ128rrik:
2539 case X86::VPCMPWZ256rrik:
2540 case X86::VPCMPUWZ256rrik:
2541 case X86::VPCMPWZrrik:
2542 case X86::VPCMPUWZrrik:
2543 WorkingMI = CloneIfNew(MI);
2544 // Flip comparison mode immediate (if necessary).
2545 WorkingMI->getOperand(MI.getNumOperands() - 1)
2547 MI.getOperand(MI.getNumOperands() - 1).getImm() & 0x7));
2548 break;
2549 case X86::VPCOMBri:
2550 case X86::VPCOMUBri:
2551 case X86::VPCOMDri:
2552 case X86::VPCOMUDri:
2553 case X86::VPCOMQri:
2554 case X86::VPCOMUQri:
2555 case X86::VPCOMWri:
2556 case X86::VPCOMUWri:
2557 WorkingMI = CloneIfNew(MI);
2558 // Flip comparison mode immediate (if necessary).
2559 WorkingMI->getOperand(3).setImm(
2560 X86::getSwappedVPCOMImm(MI.getOperand(3).getImm() & 0x7));
2561 break;
2562 case X86::VCMPSDZrri:
2563 case X86::VCMPSSZrri:
2564 case X86::VCMPPDZrri:
2565 case X86::VCMPPSZrri:
2566 case X86::VCMPSHZrri:
2567 case X86::VCMPPHZrri:
2568 case X86::VCMPPHZ128rri:
2569 case X86::VCMPPHZ256rri:
2570 case X86::VCMPPDZ128rri:
2571 case X86::VCMPPSZ128rri:
2572 case X86::VCMPPDZ256rri:
2573 case X86::VCMPPSZ256rri:
2574 case X86::VCMPPDZrrik:
2575 case X86::VCMPPSZrrik:
2576 case X86::VCMPPHZrrik:
2577 case X86::VCMPPDZ128rrik:
2578 case X86::VCMPPSZ128rrik:
2579 case X86::VCMPPHZ128rrik:
2580 case X86::VCMPPDZ256rrik:
2581 case X86::VCMPPSZ256rrik:
2582 case X86::VCMPPHZ256rrik:
2583 WorkingMI = CloneIfNew(MI);
2584 WorkingMI->getOperand(MI.getNumExplicitOperands() - 1)
2586 MI.getOperand(MI.getNumExplicitOperands() - 1).getImm() & 0x1f));
2587 break;
2588 case X86::VPERM2F128rri:
2589 case X86::VPERM2I128rri:
2590 // Flip permute source immediate.
2591 // Imm & 0x02: lo = if set, select Op1.lo/hi else Op0.lo/hi.
2592 // Imm & 0x20: hi = if set, select Op1.lo/hi else Op0.lo/hi.
2593 WorkingMI = CloneIfNew(MI);
2594 WorkingMI->getOperand(3).setImm((MI.getOperand(3).getImm() & 0xFF) ^ 0x22);
2595 break;
2596 case X86::MOVHLPSrr:
2597 case X86::UNPCKHPDrr:
2598 case X86::VMOVHLPSrr:
2599 case X86::VUNPCKHPDrr:
2600 case X86::VMOVHLPSZrr:
2601 case X86::VUNPCKHPDZ128rr:
2602 assert(Subtarget.hasSSE2() && "Commuting MOVHLP/UNPCKHPD requires SSE2!");
2603
2604 switch (Opc) {
2605 default:
2606 llvm_unreachable("Unreachable!");
2607 case X86::MOVHLPSrr:
2608 Opc = X86::UNPCKHPDrr;
2609 break;
2610 case X86::UNPCKHPDrr:
2611 Opc = X86::MOVHLPSrr;
2612 break;
2613 case X86::VMOVHLPSrr:
2614 Opc = X86::VUNPCKHPDrr;
2615 break;
2616 case X86::VUNPCKHPDrr:
2617 Opc = X86::VMOVHLPSrr;
2618 break;
2619 case X86::VMOVHLPSZrr:
2620 Opc = X86::VUNPCKHPDZ128rr;
2621 break;
2622 case X86::VUNPCKHPDZ128rr:
2623 Opc = X86::VMOVHLPSZrr;
2624 break;
2625 }
2626 WorkingMI = CloneIfNew(MI);
2627 WorkingMI->setDesc(get(Opc));
2628 break;
2629 CASE_ND(CMOV16rr)
2630 CASE_ND(CMOV32rr)
2631 CASE_ND(CMOV64rr) {
2632 WorkingMI = CloneIfNew(MI);
2633 unsigned OpNo = MI.getDesc().getNumOperands() - 1;
2634 X86::CondCode CC = static_cast<X86::CondCode>(MI.getOperand(OpNo).getImm());
2636 break;
2637 }
2638 case X86::VPTERNLOGDZrri:
2639 case X86::VPTERNLOGDZrmi:
2640 case X86::VPTERNLOGDZ128rri:
2641 case X86::VPTERNLOGDZ128rmi:
2642 case X86::VPTERNLOGDZ256rri:
2643 case X86::VPTERNLOGDZ256rmi:
2644 case X86::VPTERNLOGQZrri:
2645 case X86::VPTERNLOGQZrmi:
2646 case X86::VPTERNLOGQZ128rri:
2647 case X86::VPTERNLOGQZ128rmi:
2648 case X86::VPTERNLOGQZ256rri:
2649 case X86::VPTERNLOGQZ256rmi:
2650 case X86::VPTERNLOGDZrrik:
2651 case X86::VPTERNLOGDZ128rrik:
2652 case X86::VPTERNLOGDZ256rrik:
2653 case X86::VPTERNLOGQZrrik:
2654 case X86::VPTERNLOGQZ128rrik:
2655 case X86::VPTERNLOGQZ256rrik:
2656 case X86::VPTERNLOGDZrrikz:
2657 case X86::VPTERNLOGDZrmikz:
2658 case X86::VPTERNLOGDZ128rrikz:
2659 case X86::VPTERNLOGDZ128rmikz:
2660 case X86::VPTERNLOGDZ256rrikz:
2661 case X86::VPTERNLOGDZ256rmikz:
2662 case X86::VPTERNLOGQZrrikz:
2663 case X86::VPTERNLOGQZrmikz:
2664 case X86::VPTERNLOGQZ128rrikz:
2665 case X86::VPTERNLOGQZ128rmikz:
2666 case X86::VPTERNLOGQZ256rrikz:
2667 case X86::VPTERNLOGQZ256rmikz:
2668 case X86::VPTERNLOGDZ128rmbi:
2669 case X86::VPTERNLOGDZ256rmbi:
2670 case X86::VPTERNLOGDZrmbi:
2671 case X86::VPTERNLOGQZ128rmbi:
2672 case X86::VPTERNLOGQZ256rmbi:
2673 case X86::VPTERNLOGQZrmbi:
2674 case X86::VPTERNLOGDZ128rmbikz:
2675 case X86::VPTERNLOGDZ256rmbikz:
2676 case X86::VPTERNLOGDZrmbikz:
2677 case X86::VPTERNLOGQZ128rmbikz:
2678 case X86::VPTERNLOGQZ256rmbikz:
2679 case X86::VPTERNLOGQZrmbikz: {
2680 WorkingMI = CloneIfNew(MI);
2681 commuteVPTERNLOG(*WorkingMI, OpIdx1, OpIdx2);
2682 break;
2683 }
2684 default:
2686 WorkingMI = CloneIfNew(MI);
2688 break;
2689 }
2690
2691 if (auto *FMA3Group = getFMA3Group(Opc, MI.getDesc().TSFlags)) {
2692 WorkingMI = CloneIfNew(MI);
2693 WorkingMI->setDesc(
2694 get(getFMA3OpcodeToCommuteOperands(MI, OpIdx1, OpIdx2, *FMA3Group)));
2695 break;
2696 }
2697 }
2698 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
2699}
2700
2701bool X86InstrInfo::findThreeSrcCommutedOpIndices(const MachineInstr &MI,
2702 unsigned &SrcOpIdx1,
2703 unsigned &SrcOpIdx2,
2704 bool IsIntrinsic) const {
2705 uint64_t TSFlags = MI.getDesc().TSFlags;
2706
2707 unsigned FirstCommutableVecOp = 1;
2708 unsigned LastCommutableVecOp = 3;
2709 unsigned KMaskOp = -1U;
2710 if (X86II::isKMasked(TSFlags)) {
2711 // For k-zero-masked operations it is Ok to commute the first vector
2712 // operand. Unless this is an intrinsic instruction.
2713 // For regular k-masked operations a conservative choice is done as the
2714 // elements of the first vector operand, for which the corresponding bit
2715 // in the k-mask operand is set to 0, are copied to the result of the
2716 // instruction.
2717 // TODO/FIXME: The commute still may be legal if it is known that the
2718 // k-mask operand is set to either all ones or all zeroes.
2719 // It is also Ok to commute the 1st operand if all users of MI use only
2720 // the elements enabled by the k-mask operand. For example,
2721 // v4 = VFMADD213PSZrk v1, k, v2, v3; // v1[i] = k[i] ? v2[i]*v1[i]+v3[i]
2722 // : v1[i];
2723 // VMOVAPSZmrk <mem_addr>, k, v4; // this is the ONLY user of v4 ->
2724 // // Ok, to commute v1 in FMADD213PSZrk.
2725
2726 // The k-mask operand has index = 2 for masked and zero-masked operations.
2727 KMaskOp = 2;
2728
2729 // The operand with index = 1 is used as a source for those elements for
2730 // which the corresponding bit in the k-mask is set to 0.
2731 if (X86II::isKMergeMasked(TSFlags) || IsIntrinsic)
2732 FirstCommutableVecOp = 3;
2733
2734 LastCommutableVecOp++;
2735 } else if (IsIntrinsic) {
2736 // Commuting the first operand of an intrinsic instruction isn't possible
2737 // unless we can prove that only the lowest element of the result is used.
2738 FirstCommutableVecOp = 2;
2739 }
2740
2741 if (isMem(MI, LastCommutableVecOp))
2742 LastCommutableVecOp--;
2743
2744 // Only the first RegOpsNum operands are commutable.
2745 // Also, the value 'CommuteAnyOperandIndex' is valid here as it means
2746 // that the operand is not specified/fixed.
2747 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
2748 (SrcOpIdx1 < FirstCommutableVecOp || SrcOpIdx1 > LastCommutableVecOp ||
2749 SrcOpIdx1 == KMaskOp))
2750 return false;
2751 if (SrcOpIdx2 != CommuteAnyOperandIndex &&
2752 (SrcOpIdx2 < FirstCommutableVecOp || SrcOpIdx2 > LastCommutableVecOp ||
2753 SrcOpIdx2 == KMaskOp))
2754 return false;
2755
2756 // Look for two different register operands assumed to be commutable
2757 // regardless of the FMA opcode. The FMA opcode is adjusted later.
2758 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
2759 SrcOpIdx2 == CommuteAnyOperandIndex) {
2760 unsigned CommutableOpIdx2 = SrcOpIdx2;
2761
2762 // At least one of operands to be commuted is not specified and
2763 // this method is free to choose appropriate commutable operands.
2764 if (SrcOpIdx1 == SrcOpIdx2)
2765 // Both of operands are not fixed. By default set one of commutable
2766 // operands to the last register operand of the instruction.
2767 CommutableOpIdx2 = LastCommutableVecOp;
2768 else if (SrcOpIdx2 == CommuteAnyOperandIndex)
2769 // Only one of operands is not fixed.
2770 CommutableOpIdx2 = SrcOpIdx1;
2771
2772 // CommutableOpIdx2 is well defined now. Let's choose another commutable
2773 // operand and assign its index to CommutableOpIdx1.
2774 Register Op2Reg = MI.getOperand(CommutableOpIdx2).getReg();
2775
2776 unsigned CommutableOpIdx1;
2777 for (CommutableOpIdx1 = LastCommutableVecOp;
2778 CommutableOpIdx1 >= FirstCommutableVecOp; CommutableOpIdx1--) {
2779 // Just ignore and skip the k-mask operand.
2780 if (CommutableOpIdx1 == KMaskOp)
2781 continue;
2782
2783 // The commuted operands must have different registers.
2784 // Otherwise, the commute transformation does not change anything and
2785 // is useless then.
2786 if (Op2Reg != MI.getOperand(CommutableOpIdx1).getReg())
2787 break;
2788 }
2789
2790 // No appropriate commutable operands were found.
2791 if (CommutableOpIdx1 < FirstCommutableVecOp)
2792 return false;
2793
2794 // Assign the found pair of commutable indices to SrcOpIdx1 and SrcOpidx2
2795 // to return those values.
2796 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
2797 CommutableOpIdx2))
2798 return false;
2799 }
2800
2801 return true;
2802}
2803
2805 unsigned &SrcOpIdx1,
2806 unsigned &SrcOpIdx2) const {
2807 const MCInstrDesc &Desc = MI.getDesc();
2808 if (!Desc.isCommutable())
2809 return false;
2810
2811 switch (MI.getOpcode()) {
2812 case X86::CMPSDrri:
2813 case X86::CMPSSrri:
2814 case X86::CMPPDrri:
2815 case X86::CMPPSrri:
2816 case X86::VCMPSDrri:
2817 case X86::VCMPSSrri:
2818 case X86::VCMPPDrri:
2819 case X86::VCMPPSrri:
2820 case X86::VCMPPDYrri:
2821 case X86::VCMPPSYrri:
2822 case X86::VCMPSDZrri:
2823 case X86::VCMPSSZrri:
2824 case X86::VCMPPDZrri:
2825 case X86::VCMPPSZrri:
2826 case X86::VCMPSHZrri:
2827 case X86::VCMPPHZrri:
2828 case X86::VCMPPHZ128rri:
2829 case X86::VCMPPHZ256rri:
2830 case X86::VCMPPDZ128rri:
2831 case X86::VCMPPSZ128rri:
2832 case X86::VCMPPDZ256rri:
2833 case X86::VCMPPSZ256rri:
2834 case X86::VCMPPDZrrik:
2835 case X86::VCMPPSZrrik:
2836 case X86::VCMPPHZrrik:
2837 case X86::VCMPPDZ128rrik:
2838 case X86::VCMPPSZ128rrik:
2839 case X86::VCMPPHZ128rrik:
2840 case X86::VCMPPDZ256rrik:
2841 case X86::VCMPPSZ256rrik:
2842 case X86::VCMPPHZ256rrik: {
2843 unsigned OpOffset = X86II::isKMasked(Desc.TSFlags) ? 1 : 0;
2844
2845 // Float comparison can be safely commuted for
2846 // Ordered/Unordered/Equal/NotEqual tests
2847 unsigned Imm = MI.getOperand(3 + OpOffset).getImm() & 0x7;
2848 switch (Imm) {
2849 default:
2850 // EVEX versions can be commuted.
2851 if ((Desc.TSFlags & X86II::EncodingMask) == X86II::EVEX)
2852 break;
2853 return false;
2854 case 0x00: // EQUAL
2855 case 0x03: // UNORDERED
2856 case 0x04: // NOT EQUAL
2857 case 0x07: // ORDERED
2858 break;
2859 }
2860
2861 // The indices of the commutable operands are 1 and 2 (or 2 and 3
2862 // when masked).
2863 // Assign them to the returned operand indices here.
2864 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1 + OpOffset,
2865 2 + OpOffset);
2866 }
2867 case X86::MOVSSrr:
2868 // X86::MOVSDrr is always commutable. MOVSS is only commutable if we can
2869 // form sse4.1 blend. We assume VMOVSSrr/VMOVSDrr is always commutable since
2870 // AVX implies sse4.1.
2871 if (Subtarget.hasSSE41())
2872 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2873 return false;
2874 case X86::SHUFPDrri:
2875 // We can commute this to MOVSD.
2876 if (MI.getOperand(3).getImm() == 0x02)
2877 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2878 return false;
2879 case X86::MOVHLPSrr:
2880 case X86::UNPCKHPDrr:
2881 case X86::VMOVHLPSrr:
2882 case X86::VUNPCKHPDrr:
2883 case X86::VMOVHLPSZrr:
2884 case X86::VUNPCKHPDZ128rr:
2885 if (Subtarget.hasSSE2())
2886 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2887 return false;
2888 case X86::VPTERNLOGDZrri:
2889 case X86::VPTERNLOGDZrmi:
2890 case X86::VPTERNLOGDZ128rri:
2891 case X86::VPTERNLOGDZ128rmi:
2892 case X86::VPTERNLOGDZ256rri:
2893 case X86::VPTERNLOGDZ256rmi:
2894 case X86::VPTERNLOGQZrri:
2895 case X86::VPTERNLOGQZrmi:
2896 case X86::VPTERNLOGQZ128rri:
2897 case X86::VPTERNLOGQZ128rmi:
2898 case X86::VPTERNLOGQZ256rri:
2899 case X86::VPTERNLOGQZ256rmi:
2900 case X86::VPTERNLOGDZrrik:
2901 case X86::VPTERNLOGDZ128rrik:
2902 case X86::VPTERNLOGDZ256rrik:
2903 case X86::VPTERNLOGQZrrik:
2904 case X86::VPTERNLOGQZ128rrik:
2905 case X86::VPTERNLOGQZ256rrik:
2906 case X86::VPTERNLOGDZrrikz:
2907 case X86::VPTERNLOGDZrmikz:
2908 case X86::VPTERNLOGDZ128rrikz:
2909 case X86::VPTERNLOGDZ128rmikz:
2910 case X86::VPTERNLOGDZ256rrikz:
2911 case X86::VPTERNLOGDZ256rmikz:
2912 case X86::VPTERNLOGQZrrikz:
2913 case X86::VPTERNLOGQZrmikz:
2914 case X86::VPTERNLOGQZ128rrikz:
2915 case X86::VPTERNLOGQZ128rmikz:
2916 case X86::VPTERNLOGQZ256rrikz:
2917 case X86::VPTERNLOGQZ256rmikz:
2918 case X86::VPTERNLOGDZ128rmbi:
2919 case X86::VPTERNLOGDZ256rmbi:
2920 case X86::VPTERNLOGDZrmbi:
2921 case X86::VPTERNLOGQZ128rmbi:
2922 case X86::VPTERNLOGQZ256rmbi:
2923 case X86::VPTERNLOGQZrmbi:
2924 case X86::VPTERNLOGDZ128rmbikz:
2925 case X86::VPTERNLOGDZ256rmbikz:
2926 case X86::VPTERNLOGDZrmbikz:
2927 case X86::VPTERNLOGQZ128rmbikz:
2928 case X86::VPTERNLOGQZ256rmbikz:
2929 case X86::VPTERNLOGQZrmbikz:
2930 return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
2931 case X86::VPDPWSSDYrr:
2932 case X86::VPDPWSSDrr:
2933 case X86::VPDPWSSDSYrr:
2934 case X86::VPDPWSSDSrr:
2935 case X86::VPDPWUUDrr:
2936 case X86::VPDPWUUDYrr:
2937 case X86::VPDPWUUDSrr:
2938 case X86::VPDPWUUDSYrr:
2939 case X86::VPDPBSSDSrr:
2940 case X86::VPDPBSSDSYrr:
2941 case X86::VPDPBSSDrr:
2942 case X86::VPDPBSSDYrr:
2943 case X86::VPDPBUUDSrr:
2944 case X86::VPDPBUUDSYrr:
2945 case X86::VPDPBUUDrr:
2946 case X86::VPDPBUUDYrr:
2947 case X86::VPDPBSSDSZ128rr:
2948 case X86::VPDPBSSDSZ128rrk:
2949 case X86::VPDPBSSDSZ128rrkz:
2950 case X86::VPDPBSSDSZ256rr:
2951 case X86::VPDPBSSDSZ256rrk:
2952 case X86::VPDPBSSDSZ256rrkz:
2953 case X86::VPDPBSSDSZrr:
2954 case X86::VPDPBSSDSZrrk:
2955 case X86::VPDPBSSDSZrrkz:
2956 case X86::VPDPBSSDZ128rr:
2957 case X86::VPDPBSSDZ128rrk:
2958 case X86::VPDPBSSDZ128rrkz:
2959 case X86::VPDPBSSDZ256rr:
2960 case X86::VPDPBSSDZ256rrk:
2961 case X86::VPDPBSSDZ256rrkz:
2962 case X86::VPDPBSSDZrr:
2963 case X86::VPDPBSSDZrrk:
2964 case X86::VPDPBSSDZrrkz:
2965 case X86::VPDPBUUDSZ128rr:
2966 case X86::VPDPBUUDSZ128rrk:
2967 case X86::VPDPBUUDSZ128rrkz:
2968 case X86::VPDPBUUDSZ256rr:
2969 case X86::VPDPBUUDSZ256rrk:
2970 case X86::VPDPBUUDSZ256rrkz:
2971 case X86::VPDPBUUDSZrr:
2972 case X86::VPDPBUUDSZrrk:
2973 case X86::VPDPBUUDSZrrkz:
2974 case X86::VPDPBUUDZ128rr:
2975 case X86::VPDPBUUDZ128rrk:
2976 case X86::VPDPBUUDZ128rrkz:
2977 case X86::VPDPBUUDZ256rr:
2978 case X86::VPDPBUUDZ256rrk:
2979 case X86::VPDPBUUDZ256rrkz:
2980 case X86::VPDPBUUDZrr:
2981 case X86::VPDPBUUDZrrk:
2982 case X86::VPDPBUUDZrrkz:
2983 case X86::VPDPWSSDZ128rr:
2984 case X86::VPDPWSSDZ128rrk:
2985 case X86::VPDPWSSDZ128rrkz:
2986 case X86::VPDPWSSDZ256rr:
2987 case X86::VPDPWSSDZ256rrk:
2988 case X86::VPDPWSSDZ256rrkz:
2989 case X86::VPDPWSSDZrr:
2990 case X86::VPDPWSSDZrrk:
2991 case X86::VPDPWSSDZrrkz:
2992 case X86::VPDPWSSDSZ128rr:
2993 case X86::VPDPWSSDSZ128rrk:
2994 case X86::VPDPWSSDSZ128rrkz:
2995 case X86::VPDPWSSDSZ256rr:
2996 case X86::VPDPWSSDSZ256rrk:
2997 case X86::VPDPWSSDSZ256rrkz:
2998 case X86::VPDPWSSDSZrr:
2999 case X86::VPDPWSSDSZrrk:
3000 case X86::VPDPWSSDSZrrkz:
3001 case X86::VPDPWUUDZ128rr:
3002 case X86::VPDPWUUDZ128rrk:
3003 case X86::VPDPWUUDZ128rrkz:
3004 case X86::VPDPWUUDZ256rr:
3005 case X86::VPDPWUUDZ256rrk:
3006 case X86::VPDPWUUDZ256rrkz:
3007 case X86::VPDPWUUDZrr:
3008 case X86::VPDPWUUDZrrk:
3009 case X86::VPDPWUUDZrrkz:
3010 case X86::VPDPWUUDSZ128rr:
3011 case X86::VPDPWUUDSZ128rrk:
3012 case X86::VPDPWUUDSZ128rrkz:
3013 case X86::VPDPWUUDSZ256rr:
3014 case X86::VPDPWUUDSZ256rrk:
3015 case X86::VPDPWUUDSZ256rrkz:
3016 case X86::VPDPWUUDSZrr:
3017 case X86::VPDPWUUDSZrrk:
3018 case X86::VPDPWUUDSZrrkz:
3019 case X86::VPMADD52HUQrr:
3020 case X86::VPMADD52HUQYrr:
3021 case X86::VPMADD52HUQZ128r:
3022 case X86::VPMADD52HUQZ128rk:
3023 case X86::VPMADD52HUQZ128rkz:
3024 case X86::VPMADD52HUQZ256r:
3025 case X86::VPMADD52HUQZ256rk:
3026 case X86::VPMADD52HUQZ256rkz:
3027 case X86::VPMADD52HUQZr:
3028 case X86::VPMADD52HUQZrk:
3029 case X86::VPMADD52HUQZrkz:
3030 case X86::VPMADD52LUQrr:
3031 case X86::VPMADD52LUQYrr:
3032 case X86::VPMADD52LUQZ128r:
3033 case X86::VPMADD52LUQZ128rk:
3034 case X86::VPMADD52LUQZ128rkz:
3035 case X86::VPMADD52LUQZ256r:
3036 case X86::VPMADD52LUQZ256rk:
3037 case X86::VPMADD52LUQZ256rkz:
3038 case X86::VPMADD52LUQZr:
3039 case X86::VPMADD52LUQZrk:
3040 case X86::VPMADD52LUQZrkz:
3041 case X86::VFMADDCPHZr:
3042 case X86::VFMADDCPHZrk:
3043 case X86::VFMADDCPHZrkz:
3044 case X86::VFMADDCPHZ128r:
3045 case X86::VFMADDCPHZ128rk:
3046 case X86::VFMADDCPHZ128rkz:
3047 case X86::VFMADDCPHZ256r:
3048 case X86::VFMADDCPHZ256rk:
3049 case X86::VFMADDCPHZ256rkz:
3050 case X86::VFMADDCSHZr:
3051 case X86::VFMADDCSHZrk:
3052 case X86::VFMADDCSHZrkz: {
3053 unsigned CommutableOpIdx1 = 2;
3054 unsigned CommutableOpIdx2 = 3;
3055 if (X86II::isKMasked(Desc.TSFlags)) {
3056 // Skip the mask register.
3057 ++CommutableOpIdx1;
3058 ++CommutableOpIdx2;
3059 }
3060 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3061 CommutableOpIdx2))
3062 return false;
3063 if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
3064 // No idea.
3065 return false;
3066 return true;
3067 }
3068
3069 default:
3070 const X86InstrFMA3Group *FMA3Group =
3071 getFMA3Group(MI.getOpcode(), MI.getDesc().TSFlags);
3072 if (FMA3Group)
3073 return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2,
3074 FMA3Group->isIntrinsic());
3075
3076 // Handled masked instructions since we need to skip over the mask input
3077 // and the preserved input.
3078 if (X86II::isKMasked(Desc.TSFlags)) {
3079 // First assume that the first input is the mask operand and skip past it.
3080 unsigned CommutableOpIdx1 = Desc.getNumDefs() + 1;
3081 unsigned CommutableOpIdx2 = Desc.getNumDefs() + 2;
3082 // Check if the first input is tied. If there isn't one then we only
3083 // need to skip the mask operand which we did above.
3084 if ((MI.getDesc().getOperandConstraint(Desc.getNumDefs(),
3085 MCOI::TIED_TO) != -1)) {
3086 // If this is zero masking instruction with a tied operand, we need to
3087 // move the first index back to the first input since this must
3088 // be a 3 input instruction and we want the first two non-mask inputs.
3089 // Otherwise this is a 2 input instruction with a preserved input and
3090 // mask, so we need to move the indices to skip one more input.
3091 if (X86II::isKMergeMasked(Desc.TSFlags)) {
3092 ++CommutableOpIdx1;
3093 ++CommutableOpIdx2;
3094 } else {
3095 --CommutableOpIdx1;
3096 }
3097 }
3098
3099 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3100 CommutableOpIdx2))
3101 return false;
3102
3103 if (!MI.getOperand(SrcOpIdx1).isReg() ||
3104 !MI.getOperand(SrcOpIdx2).isReg())
3105 // No idea.
3106 return false;
3107 return true;
3108 }
3109
3110 return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
3111 }
3112 return false;
3113}
3114
3116 unsigned Opcode = MI->getOpcode();
3117 if (Opcode != X86::LEA32r && Opcode != X86::LEA64r &&
3118 Opcode != X86::LEA64_32r)
3119 return false;
3120
3121 const MachineOperand &Scale = MI->getOperand(1 + X86::AddrScaleAmt);
3122 const MachineOperand &Disp = MI->getOperand(1 + X86::AddrDisp);
3123 const MachineOperand &Segment = MI->getOperand(1 + X86::AddrSegmentReg);
3124
3125 if (Segment.getReg() != 0 || !Disp.isImm() || Disp.getImm() != 0 ||
3126 Scale.getImm() > 1)
3127 return false;
3128
3129 return true;
3130}
3131
3133 // Currently we're interested in following sequence only.
3134 // r3 = lea r1, r2
3135 // r5 = add r3, r4
3136 // Both r3 and r4 are killed in add, we hope the add instruction has the
3137 // operand order
3138 // r5 = add r4, r3
3139 // So later in X86FixupLEAs the lea instruction can be rewritten as add.
3140 unsigned Opcode = MI.getOpcode();
3141 if (Opcode != X86::ADD32rr && Opcode != X86::ADD64rr)
3142 return false;
3143
3144 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
3145 Register Reg1 = MI.getOperand(1).getReg();
3146 Register Reg2 = MI.getOperand(2).getReg();
3147
3148 // Check if Reg1 comes from LEA in the same MBB.
3149 if (MachineInstr *Inst = MRI.getUniqueVRegDef(Reg1)) {
3150 if (isConvertibleLEA(Inst) && Inst->getParent() == MI.getParent()) {
3151 Commute = true;
3152 return true;
3153 }
3154 }
3155
3156 // Check if Reg2 comes from LEA in the same MBB.
3157 if (MachineInstr *Inst = MRI.getUniqueVRegDef(Reg2)) {
3158 if (isConvertibleLEA(Inst) && Inst->getParent() == MI.getParent()) {
3159 Commute = false;
3160 return true;
3161 }
3162 }
3163
3164 return false;
3165}
3166
3168 unsigned Opcode = MCID.getOpcode();
3169 if (!(X86::isJCC(Opcode) || X86::isSETCC(Opcode) || X86::isSETZUCC(Opcode) ||
3170 X86::isCMOVCC(Opcode) || X86::isCFCMOVCC(Opcode) ||
3171 X86::isCCMPCC(Opcode) || X86::isCTESTCC(Opcode)))
3172 return -1;
3173 // Assume that condition code is always the last use operand.
3174 unsigned NumUses = MCID.getNumOperands() - MCID.getNumDefs();
3175 return NumUses - 1;
3176}
3177
3179 const MCInstrDesc &MCID = MI.getDesc();
3180 int CondNo = getCondSrcNoFromDesc(MCID);
3181 if (CondNo < 0)
3182 return X86::COND_INVALID;
3183 CondNo += MCID.getNumDefs();
3184 return static_cast<X86::CondCode>(MI.getOperand(CondNo).getImm());
3185}
3186
3188 return X86::isJCC(MI.getOpcode()) ? X86::getCondFromMI(MI)
3190}
3191
3193 return X86::isSETCC(MI.getOpcode()) || X86::isSETZUCC(MI.getOpcode())
3196}
3197
3199 return X86::isCMOVCC(MI.getOpcode()) ? X86::getCondFromMI(MI)
3201}
3202
3204 return X86::isCFCMOVCC(MI.getOpcode()) ? X86::getCondFromMI(MI)
3206}
3207
3209 return X86::isCCMPCC(MI.getOpcode()) || X86::isCTESTCC(MI.getOpcode())
3212}
3213
3215 // CCMP/CTEST has two conditional operands:
3216 // - SCC: source conditonal code (same as CMOV)
3217 // - DCF: destination conditional flags, which has 4 valid bits
3218 //
3219 // +----+----+----+----+
3220 // | OF | SF | ZF | CF |
3221 // +----+----+----+----+
3222 //
3223 // If SCC(source conditional code) evaluates to false, CCMP/CTEST will updates
3224 // the conditional flags by as follows:
3225 //
3226 // OF = DCF.OF
3227 // SF = DCF.SF
3228 // ZF = DCF.ZF
3229 // CF = DCF.CF
3230 // PF = DCF.CF
3231 // AF = 0 (Auxiliary Carry Flag)
3232 //
3233 // Otherwise, the CMP or TEST is executed and it updates the
3234 // CSPAZO flags normally.
3235 //
3236 // NOTE:
3237 // If SCC = P, then SCC evaluates to true regardless of the CSPAZO value.
3238 // If SCC = NP, then SCC evaluates to false regardless of the CSPAZO value.
3239
3240 enum { CF = 1, ZF = 2, SF = 4, OF = 8, PF = CF };
3241
3242 switch (CC) {
3243 default:
3244 llvm_unreachable("Illegal condition code!");
3245 case X86::COND_NO:
3246 case X86::COND_NE:
3247 case X86::COND_GE:
3248 case X86::COND_G:
3249 case X86::COND_AE:
3250 case X86::COND_A:
3251 case X86::COND_NS:
3252 case X86::COND_NP:
3253 return 0;
3254 case X86::COND_O:
3255 return OF;
3256 case X86::COND_B:
3257 case X86::COND_BE:
3258 return CF;
3259 break;
3260 case X86::COND_E:
3261 case X86::COND_LE:
3262 return ZF;
3263 case X86::COND_S:
3264 case X86::COND_L:
3265 return SF;
3266 case X86::COND_P:
3267 return PF;
3268 }
3269}
3270
3271#define GET_X86_NF_TRANSFORM_TABLE
3272#define GET_X86_ND2NONND_TABLE
3273#include "X86GenInstrMapping.inc"
3274
3276 unsigned Opc) {
3277 const auto I = llvm::lower_bound(Table, Opc);
3278 return (I == Table.end() || I->OldOpc != Opc) ? 0U : I->NewOpc;
3279}
3280unsigned X86::getNFVariant(unsigned Opc) {
3281#if defined(EXPENSIVE_CHECKS) && !defined(NDEBUG)
3282 // Make sure the tables are sorted.
3283 static std::atomic<bool> NFTableChecked(false);
3284 if (!NFTableChecked.load(std::memory_order_relaxed)) {
3285 assert(llvm::is_sorted(X86NFTransformTable) &&
3286 "X86NFTransformTable is not sorted!");
3287 NFTableChecked.store(true, std::memory_order_relaxed);
3288 }
3289#endif
3290 return getNewOpcFromTable(X86NFTransformTable, Opc);
3291}
3292
3293unsigned X86::getNonNDVariant(unsigned Opc) {
3294#if defined(EXPENSIVE_CHECKS) && !defined(NDEBUG)
3295 // Make sure the tables are sorted.
3296 static std::atomic<bool> NDTableChecked(false);
3297 if (!NDTableChecked.load(std::memory_order_relaxed)) {
3298 assert(llvm::is_sorted(X86ND2NonNDTable) &&
3299 "X86ND2NonNDTableis not sorted!");
3300 NDTableChecked.store(true, std::memory_order_relaxed);
3301 }
3302#endif
3303 return getNewOpcFromTable(X86ND2NonNDTable, Opc);
3304}
3305
3306/// Return the inverse of the specified condition,
3307/// e.g. turning COND_E to COND_NE.
3309 switch (CC) {
3310 default:
3311 llvm_unreachable("Illegal condition code!");
3312 case X86::COND_E:
3313 return X86::COND_NE;
3314 case X86::COND_NE:
3315 return X86::COND_E;
3316 case X86::COND_L:
3317 return X86::COND_GE;
3318 case X86::COND_LE:
3319 return X86::COND_G;
3320 case X86::COND_G:
3321 return X86::COND_LE;
3322 case X86::COND_GE:
3323 return X86::COND_L;
3324 case X86::COND_B:
3325 return X86::COND_AE;
3326 case X86::COND_BE:
3327 return X86::COND_A;
3328 case X86::COND_A:
3329 return X86::COND_BE;
3330 case X86::COND_AE:
3331 return X86::COND_B;
3332 case X86::COND_S:
3333 return X86::COND_NS;
3334 case X86::COND_NS:
3335 return X86::COND_S;
3336 case X86::COND_P:
3337 return X86::COND_NP;
3338 case X86::COND_NP:
3339 return X86::COND_P;
3340 case X86::COND_O:
3341 return X86::COND_NO;
3342 case X86::COND_NO:
3343 return X86::COND_O;
3344 case X86::COND_NE_OR_P:
3345 return X86::COND_E_AND_NP;
3346 case X86::COND_E_AND_NP:
3347 return X86::COND_NE_OR_P;
3348 }
3349}
3350
3351/// Assuming the flags are set by MI(a,b), return the condition code if we
3352/// modify the instructions such that flags are set by MI(b,a).
3354 switch (CC) {
3355 default:
3356 return X86::COND_INVALID;
3357 case X86::COND_E:
3358 return X86::COND_E;
3359 case X86::COND_NE:
3360 return X86::COND_NE;
3361 case X86::COND_L:
3362 return X86::COND_G;
3363 case X86::COND_LE:
3364 return X86::COND_GE;
3365 case X86::COND_G:
3366 return X86::COND_L;
3367 case X86::COND_GE:
3368 return X86::COND_LE;
3369 case X86::COND_B:
3370 return X86::COND_A;
3371 case X86::COND_BE:
3372 return X86::COND_AE;
3373 case X86::COND_A:
3374 return X86::COND_B;
3375 case X86::COND_AE:
3376 return X86::COND_BE;
3377 }
3378}
3379
3380std::pair<X86::CondCode, bool>
3383 bool NeedSwap = false;
3384 switch (Predicate) {
3385 default:
3386 break;
3387 // Floating-point Predicates
3388 case CmpInst::FCMP_UEQ:
3389 CC = X86::COND_E;
3390 break;
3391 case CmpInst::FCMP_OLT:
3392 NeedSwap = true;
3393 [[fallthrough]];
3394 case CmpInst::FCMP_OGT:
3395 CC = X86::COND_A;
3396 break;
3397 case CmpInst::FCMP_OLE:
3398 NeedSwap = true;
3399 [[fallthrough]];
3400 case CmpInst::FCMP_OGE:
3401 CC = X86::COND_AE;
3402 break;
3403 case CmpInst::FCMP_UGT:
3404 NeedSwap = true;
3405 [[fallthrough]];
3406 case CmpInst::FCMP_ULT:
3407 CC = X86::COND_B;
3408 break;
3409 case CmpInst::FCMP_UGE:
3410 NeedSwap = true;
3411 [[fallthrough]];
3412 case CmpInst::FCMP_ULE:
3413 CC = X86::COND_BE;
3414 break;
3415 case CmpInst::FCMP_ONE:
3416 CC = X86::COND_NE;
3417 break;
3418 case CmpInst::FCMP_UNO:
3419 CC = X86::COND_P;
3420 break;
3421 case CmpInst::FCMP_ORD:
3422 CC = X86::COND_NP;
3423 break;
3424 case CmpInst::FCMP_OEQ:
3425 [[fallthrough]];
3426 case CmpInst::FCMP_UNE:
3427 CC = X86::COND_INVALID;
3428 break;
3429
3430 // Integer Predicates
3431 case CmpInst::ICMP_EQ:
3432 CC = X86::COND_E;
3433 break;
3434 case CmpInst::ICMP_NE:
3435 CC = X86::COND_NE;
3436 break;
3437 case CmpInst::ICMP_UGT:
3438 CC = X86::COND_A;
3439 break;
3440 case CmpInst::ICMP_UGE:
3441 CC = X86::COND_AE;
3442 break;
3443 case CmpInst::ICMP_ULT:
3444 CC = X86::COND_B;
3445 break;
3446 case CmpInst::ICMP_ULE:
3447 CC = X86::COND_BE;
3448 break;
3449 case CmpInst::ICMP_SGT:
3450 CC = X86::COND_G;
3451 break;
3452 case CmpInst::ICMP_SGE:
3453 CC = X86::COND_GE;
3454 break;
3455 case CmpInst::ICMP_SLT:
3456 CC = X86::COND_L;
3457 break;
3458 case CmpInst::ICMP_SLE:
3459 CC = X86::COND_LE;
3460 break;
3461 }
3462
3463 return std::make_pair(CC, NeedSwap);
3464}
3465
3466/// Return a cmov opcode for the given register size in bytes, and operand type.
3467unsigned X86::getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand,
3468 bool HasNDD) {
3469 switch (RegBytes) {
3470 default:
3471 llvm_unreachable("Illegal register size!");
3472#define GET_ND_IF_ENABLED(OPC) (HasNDD ? OPC##_ND : OPC)
3473 case 2:
3474 return HasMemoryOperand ? GET_ND_IF_ENABLED(X86::CMOV16rm)
3475 : GET_ND_IF_ENABLED(X86::CMOV16rr);
3476 case 4:
3477 return HasMemoryOperand ? GET_ND_IF_ENABLED(X86::CMOV32rm)
3478 : GET_ND_IF_ENABLED(X86::CMOV32rr);
3479 case 8:
3480 return HasMemoryOperand ? GET_ND_IF_ENABLED(X86::CMOV64rm)
3481 : GET_ND_IF_ENABLED(X86::CMOV64rr);
3482 }
3483}
3484
3485/// Get the VPCMP immediate for the given condition.
3487 switch (CC) {
3488 default:
3489 llvm_unreachable("Unexpected SETCC condition");
3490 case ISD::SETNE:
3491 return 4;
3492 case ISD::SETEQ:
3493 return 0;
3494 case ISD::SETULT:
3495 case ISD::SETLT:
3496 return 1;
3497 case ISD::SETUGT:
3498 case ISD::SETGT:
3499 return 6;
3500 case ISD::SETUGE:
3501 case ISD::SETGE:
3502 return 5;
3503 case ISD::SETULE:
3504 case ISD::SETLE:
3505 return 2;
3506 }
3507}
3508
3509/// Get the VPCMP immediate if the operands are swapped.
3510unsigned X86::getSwappedVPCMPImm(unsigned Imm) {
3511 switch (Imm) {
3512 default:
3513 llvm_unreachable("Unreachable!");
3514 case 0x01:
3515 Imm = 0x06;
3516 break; // LT -> NLE
3517 case 0x02:
3518 Imm = 0x05;
3519 break; // LE -> NLT
3520 case 0x05:
3521 Imm = 0x02;
3522 break; // NLT -> LE
3523 case 0x06:
3524 Imm = 0x01;
3525 break; // NLE -> LT
3526 case 0x00: // EQ
3527 case 0x03: // FALSE
3528 case 0x04: // NE
3529 case 0x07: // TRUE
3530 break;
3531 }
3532
3533 return Imm;
3534}
3535
3536/// Get the VPCOM immediate if the operands are swapped.
3537unsigned X86::getSwappedVPCOMImm(unsigned Imm) {
3538 switch (Imm) {
3539 default:
3540 llvm_unreachable("Unreachable!");
3541 case 0x00:
3542 Imm = 0x02;
3543 break; // LT -> GT
3544 case 0x01:
3545 Imm = 0x03;
3546 break; // LE -> GE
3547 case 0x02:
3548 Imm = 0x00;
3549 break; // GT -> LT
3550 case 0x03:
3551 Imm = 0x01;
3552 break; // GE -> LE
3553 case 0x04: // EQ
3554 case 0x05: // NE
3555 case 0x06: // FALSE
3556 case 0x07: // TRUE
3557 break;
3558 }
3559
3560 return Imm;
3561}
3562
3563/// Get the VCMP immediate if the operands are swapped.
3564unsigned X86::getSwappedVCMPImm(unsigned Imm) {
3565 // Only need the lower 2 bits to distinquish.
3566 switch (Imm & 0x3) {
3567 default:
3568 llvm_unreachable("Unreachable!");
3569 case 0x00:
3570 case 0x03:
3571 // EQ/NE/TRUE/FALSE/ORD/UNORD don't change immediate when commuted.
3572 break;
3573 case 0x01:
3574 case 0x02:
3575 // Need to toggle bits 3:0. Bit 4 stays the same.
3576 Imm ^= 0xf;
3577 break;
3578 }
3579
3580 return Imm;
3581}
3582
3584 if (Info.RegClass == X86::VR128RegClassID ||
3585 Info.RegClass == X86::VR128XRegClassID)
3586 return 128;
3587 if (Info.RegClass == X86::VR256RegClassID ||
3588 Info.RegClass == X86::VR256XRegClassID)
3589 return 256;
3590 if (Info.RegClass == X86::VR512RegClassID)
3591 return 512;
3592 llvm_unreachable("Unknown register class!");
3593}
3594
3595/// Return true if the Reg is X87 register.
3596static bool isX87Reg(Register Reg) {
3597 return (Reg == X86::FPCW || Reg == X86::FPSW ||
3598 (Reg >= X86::ST0 && Reg <= X86::ST7));
3599}
3600
3601/// check if the instruction is X87 instruction
3603 // Call and inlineasm defs X87 register, so we special case it here because
3604 // otherwise calls are incorrectly flagged as x87 instructions
3605 // as a result.
3606 if (MI.isCall() || MI.isInlineAsm())
3607 return false;
3608 for (const MachineOperand &MO : MI.operands()) {
3609 if (!MO.isReg())
3610 continue;
3611 if (isX87Reg(MO.getReg()))
3612 return true;
3613 }
3614 return false;
3615}
3616
3618 auto IsMemOp = [](const MCOperandInfo &OpInfo) {
3619 return OpInfo.OperandType == MCOI::OPERAND_MEMORY;
3620 };
3621
3622 const MCInstrDesc &Desc = MI.getDesc();
3623
3624 // Directly invoke the MC-layer routine for real (i.e., non-pseudo)
3625 // instructions (fast case).
3626 if (!X86II::isPseudo(Desc.TSFlags)) {
3627 int MemRefIdx = X86II::getMemoryOperandNo(Desc.TSFlags);
3628 if (MemRefIdx >= 0)
3629 return MemRefIdx + X86II::getOperandBias(Desc);
3630#ifdef EXPENSIVE_CHECKS
3631 assert(none_of(Desc.operands(), IsMemOp) &&
3632 "Got false negative from X86II::getMemoryOperandNo()!");
3633#endif
3634 return -1;
3635 }
3636
3637 // Otherwise, handle pseudo instructions by examining the type of their
3638 // operands (slow case). An instruction cannot have a memory reference if it
3639 // has fewer than AddrNumOperands (= 5) explicit operands.
3640 unsigned NumOps = Desc.getNumOperands();
3642#ifdef EXPENSIVE_CHECKS
3643 assert(none_of(Desc.operands(), IsMemOp) &&
3644 "Expected no operands to have OPERAND_MEMORY type!");
3645#endif
3646 return -1;
3647 }
3648
3649 // The first operand with type OPERAND_MEMORY indicates the start of a memory
3650 // reference. We expect the following AddrNumOperand-1 operands to also have
3651 // OPERAND_MEMORY type.
3652 for (unsigned I = 0, E = NumOps - X86::AddrNumOperands; I != E; ++I) {
3653 if (IsMemOp(Desc.operands()[I])) {
3654#ifdef EXPENSIVE_CHECKS
3655 assert(std::all_of(Desc.operands().begin() + I,
3656 Desc.operands().begin() + I + X86::AddrNumOperands,
3657 IsMemOp) &&
3658 "Expected all five operands in the memory reference to have "
3659 "OPERAND_MEMORY type!");
3660#endif
3661 return I;
3662 }
3663 }
3664
3665 return -1;
3666}
3667
3669 unsigned OpNo) {
3670 assert(MI.getNumOperands() >= (OpNo + X86::AddrNumOperands) &&
3671 "Unexpected number of operands!");
3672
3673 const MachineOperand &Index = MI.getOperand(OpNo + X86::AddrIndexReg);
3674 if (!Index.isReg() || Index.getReg() != X86::NoRegister)
3675 return nullptr;
3676
3677 const MachineOperand &Disp = MI.getOperand(OpNo + X86::AddrDisp);
3678 if (!Disp.isCPI() || Disp.getOffset() != 0)
3679 return nullptr;
3680
3682 MI.getParent()->getParent()->getConstantPool()->getConstants();
3683 const MachineConstantPoolEntry &ConstantEntry = Constants[Disp.getIndex()];
3684
3685 // Bail if this is a machine constant pool entry, we won't be able to dig out
3686 // anything useful.
3687 if (ConstantEntry.isMachineConstantPoolEntry())
3688 return nullptr;
3689
3690 return ConstantEntry.Val.ConstVal;
3691}
3692
3694 switch (MI.getOpcode()) {
3695 case X86::TCRETURNdi:
3696 case X86::TCRETURNri:
3697 case X86::TCRETURNmi:
3698 case X86::TCRETURNdi64:
3699 case X86::TCRETURNri64:
3700 case X86::TCRETURNri64_ImpCall:
3701 case X86::TCRETURNmi64:
3702 return true;
3703 default:
3704 return false;
3705 }
3706}
3707
3710 const MachineInstr &TailCall) const {
3711
3712 const MachineFunction *MF = TailCall.getMF();
3713
3714 if (MF->getTarget().getCodeModel() == CodeModel::Kernel) {
3715 // Kernel patches thunk calls in runtime, these should never be conditional.
3716 const MachineOperand &Target = TailCall.getOperand(0);
3717 if (Target.isSymbol()) {
3718 StringRef Symbol(Target.getSymbolName());
3719 // this is currently only relevant to r11/kernel indirect thunk.
3720 if (Symbol == "__x86_indirect_thunk_r11")
3721 return false;
3722 }
3723 }
3724
3725 if (TailCall.getOpcode() != X86::TCRETURNdi &&
3726 TailCall.getOpcode() != X86::TCRETURNdi64) {
3727 // Only direct calls can be done with a conditional branch.
3728 return false;
3729 }
3730
3731 if (Subtarget.isTargetWin64() && MF->hasWinCFI()) {
3732 // Conditional tail calls confuse the Win64 unwinder.
3733 return false;
3734 }
3735
3736 assert(BranchCond.size() == 1);
3737 if (BranchCond[0].getImm() > X86::LAST_VALID_COND) {
3738 // Can't make a conditional tail call with this condition.
3739 return false;
3740 }
3741
3743 if (X86FI->getTCReturnAddrDelta() != 0 ||
3744 TailCall.getOperand(1).getImm() != 0) {
3745 // A conditional tail call cannot do any stack adjustment.
3746 return false;
3747 }
3748
3749 return true;
3750}
3751
3754 const MachineInstr &TailCall) const {
3755 assert(canMakeTailCallConditional(BranchCond, TailCall));
3756
3758 while (I != MBB.begin()) {
3759 --I;
3760 if (I->isDebugInstr())
3761 continue;
3762 if (!I->isBranch())
3763 assert(0 && "Can't find the branch to replace!");
3764
3766 assert(BranchCond.size() == 1);
3767 if (CC != BranchCond[0].getImm())
3768 continue;
3769
3770 break;
3771 }
3772
3773 unsigned Opc = TailCall.getOpcode() == X86::TCRETURNdi ? X86::TCRETURNdicc
3774 : X86::TCRETURNdi64cc;
3775
3776 auto MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opc));
3777 MIB->addOperand(TailCall.getOperand(0)); // Destination.
3778 MIB.addImm(0); // Stack offset (not used).
3779 MIB->addOperand(BranchCond[0]); // Condition.
3780 MIB.copyImplicitOps(TailCall); // Regmask and (imp-used) parameters.
3781
3782 // Add implicit uses and defs of all live regs potentially clobbered by the
3783 // call. This way they still appear live across the call.
3785 LiveRegs.addLiveOuts(MBB);
3787 LiveRegs.stepForward(*MIB, Clobbers);
3788 for (const auto &C : Clobbers) {
3789 MIB.addReg(C.first, RegState::Implicit);
3791 }
3792
3793 I->eraseFromParent();
3794}
3795
3796// Given a MBB and its TBB, find the FBB which was a fallthrough MBB (it may
3797// not be a fallthrough MBB now due to layout changes). Return nullptr if the
3798// fallthrough MBB cannot be identified.
3801 // Look for non-EHPad successors other than TBB. If we find exactly one, it
3802 // is the fallthrough MBB. If we find zero, then TBB is both the target MBB
3803 // and fallthrough MBB. If we find more than one, we cannot identify the
3804 // fallthrough MBB and should return nullptr.
3805 MachineBasicBlock *FallthroughBB = nullptr;
3806 for (MachineBasicBlock *Succ : MBB->successors()) {
3807 if (Succ->isEHPad() || (Succ == TBB && FallthroughBB))
3808 continue;
3809 // Return a nullptr if we found more than one fallthrough successor.
3810 if (FallthroughBB && FallthroughBB != TBB)
3811 return nullptr;
3812 FallthroughBB = Succ;
3813 }
3814 return FallthroughBB;
3815}
3816
3817bool X86InstrInfo::analyzeBranchImpl(
3820 SmallVectorImpl<MachineInstr *> &CondBranches, bool AllowModify) const {
3821
3822 // Start from the bottom of the block and work up, examining the
3823 // terminator instructions.
3825 MachineBasicBlock::iterator UnCondBrIter = MBB.end();
3826 while (I != MBB.begin()) {
3827 --I;
3828 if (I->isDebugInstr())
3829 continue;
3830
3831 // Working from the bottom, when we see a non-terminator instruction, we're
3832 // done.
3833 if (!isUnpredicatedTerminator(*I))
3834 break;
3835
3836 // A terminator that isn't a branch can't easily be handled by this
3837 // analysis.
3838 if (!I->isBranch())
3839 return true;
3840
3841 // Handle unconditional branches.
3842 if (I->getOpcode() == X86::JMP_1) {
3843 UnCondBrIter = I;
3844
3845 if (!AllowModify) {
3846 TBB = I->getOperand(0).getMBB();
3847 continue;
3848 }
3849
3850 // If the block has any instructions after a JMP, delete them.
3851 MBB.erase(std::next(I), MBB.end());
3852
3853 Cond.clear();
3854 FBB = nullptr;
3855
3856 // Delete the JMP if it's equivalent to a fall-through.
3857 if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
3858 TBB = nullptr;
3859 I->eraseFromParent();
3860 I = MBB.end();
3861 UnCondBrIter = MBB.end();
3862 continue;
3863 }
3864
3865 // TBB is used to indicate the unconditional destination.
3866 TBB = I->getOperand(0).getMBB();
3867 continue;
3868 }
3869
3870 // Handle conditional branches.
3871 X86::CondCode BranchCode = X86::getCondFromBranch(*I);
3872 if (BranchCode == X86::COND_INVALID)
3873 return true; // Can't handle indirect branch.
3874
3875 // In practice we should never have an undef eflags operand, if we do
3876 // abort here as we are not prepared to preserve the flag.
3877 if (I->findRegisterUseOperand(X86::EFLAGS, /*TRI=*/nullptr)->isUndef())
3878 return true;
3879
3880 // Working from the bottom, handle the first conditional branch.
3881 if (Cond.empty()) {
3882 FBB = TBB;
3883 TBB = I->getOperand(0).getMBB();
3885 CondBranches.push_back(&*I);
3886 continue;
3887 }
3888
3889 // Handle subsequent conditional branches. Only handle the case where all
3890 // conditional branches branch to the same destination and their condition
3891 // opcodes fit one of the special multi-branch idioms.
3892 assert(Cond.size() == 1);
3893 assert(TBB);
3894
3895 // If the conditions are the same, we can leave them alone.
3896 X86::CondCode OldBranchCode = (X86::CondCode)Cond[0].getImm();
3897 auto NewTBB = I->getOperand(0).getMBB();
3898 if (OldBranchCode == BranchCode && TBB == NewTBB)
3899 continue;
3900
3901 // If they differ, see if they fit one of the known patterns. Theoretically,
3902 // we could handle more patterns here, but we shouldn't expect to see them
3903 // if instruction selection has done a reasonable job.
3904 if (TBB == NewTBB &&
3905 ((OldBranchCode == X86::COND_P && BranchCode == X86::COND_NE) ||
3906 (OldBranchCode == X86::COND_NE && BranchCode == X86::COND_P))) {
3907 BranchCode = X86::COND_NE_OR_P;
3908 } else if ((OldBranchCode == X86::COND_NP && BranchCode == X86::COND_NE) ||
3909 (OldBranchCode == X86::COND_E && BranchCode == X86::COND_P)) {
3910 if (NewTBB != (FBB ? FBB : getFallThroughMBB(&MBB, TBB)))
3911 return true;
3912
3913 // X86::COND_E_AND_NP usually has two different branch destinations.
3914 //
3915 // JP B1
3916 // JE B2
3917 // JMP B1
3918 // B1:
3919 // B2:
3920 //
3921 // Here this condition branches to B2 only if NP && E. It has another
3922 // equivalent form:
3923 //
3924 // JNE B1
3925 // JNP B2
3926 // JMP B1
3927 // B1:
3928 // B2:
3929 //
3930 // Similarly it branches to B2 only if E && NP. That is why this condition
3931 // is named with COND_E_AND_NP.
3932 BranchCode = X86::COND_E_AND_NP;
3933 } else
3934 return true;
3935
3936 // Update the MachineOperand.
3937 Cond[0].setImm(BranchCode);
3938 CondBranches.push_back(&*I);
3939 }
3940
3941 return false;
3942}
3943
3946 MachineBasicBlock *&FBB,
3948 bool AllowModify) const {
3949 SmallVector<MachineInstr *, 4> CondBranches;
3950 return analyzeBranchImpl(MBB, TBB, FBB, Cond, CondBranches, AllowModify);
3951}
3952
3954 const MCInstrDesc &Desc = MI.getDesc();
3955 int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags);
3956 assert(MemRefBegin >= 0 && "instr should have memory operand");
3957 MemRefBegin += X86II::getOperandBias(Desc);
3958
3959 const MachineOperand &MO = MI.getOperand(MemRefBegin + X86::AddrDisp);
3960 if (!MO.isJTI())
3961 return -1;
3962
3963 return MO.getIndex();
3964}
3965
3967 Register Reg) {
3968 if (!Reg.isVirtual())
3969 return -1;
3970 MachineInstr *MI = MRI.getUniqueVRegDef(Reg);
3971 if (MI == nullptr)
3972 return -1;
3973 unsigned Opcode = MI->getOpcode();
3974 if (Opcode != X86::LEA64r && Opcode != X86::LEA32r)
3975 return -1;
3977}
3978
3980 unsigned Opcode = MI.getOpcode();
3981 // Switch-jump pattern for non-PIC code looks like:
3982 // JMP64m $noreg, 8, %X, %jump-table.X, $noreg
3983 if (Opcode == X86::JMP64m || Opcode == X86::JMP32m) {
3985 }
3986 // The pattern for PIC code looks like:
3987 // %0 = LEA64r $rip, 1, $noreg, %jump-table.X
3988 // %1 = MOVSX64rm32 %0, 4, XX, 0, $noreg
3989 // %2 = ADD64rr %1, %0
3990 // JMP64r %2
3991 if (Opcode == X86::JMP64r || Opcode == X86::JMP32r) {
3992 Register Reg = MI.getOperand(0).getReg();
3993 if (!Reg.isVirtual())
3994 return -1;
3995 const MachineFunction &MF = *MI.getParent()->getParent();
3996 const MachineRegisterInfo &MRI = MF.getRegInfo();
3997 MachineInstr *Add = MRI.getUniqueVRegDef(Reg);
3998 if (Add == nullptr)
3999 return -1;
4000 if (Add->getOpcode() != X86::ADD64rr && Add->getOpcode() != X86::ADD32rr)
4001 return -1;
4002 int JTI1 = getJumpTableIndexFromReg(MRI, Add->getOperand(1).getReg());
4003 if (JTI1 >= 0)
4004 return JTI1;
4005 int JTI2 = getJumpTableIndexFromReg(MRI, Add->getOperand(2).getReg());
4006 if (JTI2 >= 0)
4007 return JTI2;
4008 }
4009 return -1;
4010}
4011
4013 MachineBranchPredicate &MBP,
4014 bool AllowModify) const {
4015 using namespace std::placeholders;
4016
4018 SmallVector<MachineInstr *, 4> CondBranches;
4019 if (analyzeBranchImpl(MBB, MBP.TrueDest, MBP.FalseDest, Cond, CondBranches,
4020 AllowModify))
4021 return true;
4022
4023 if (Cond.size() != 1)
4024 return true;
4025
4026 assert(MBP.TrueDest && "expected!");
4027
4028 if (!MBP.FalseDest)
4029 MBP.FalseDest = MBB.getNextNode();
4030
4032
4033 MachineInstr *ConditionDef = nullptr;
4034 bool SingleUseCondition = true;
4035
4037 if (MI.modifiesRegister(X86::EFLAGS, TRI)) {
4038 ConditionDef = &MI;
4039 break;
4040 }
4041
4042 if (MI.readsRegister(X86::EFLAGS, TRI))
4043 SingleUseCondition = false;
4044 }
4045
4046 if (!ConditionDef)
4047 return true;
4048
4049 if (SingleUseCondition) {
4050 for (auto *Succ : MBB.successors())
4051 if (Succ->isLiveIn(X86::EFLAGS))
4052 SingleUseCondition = false;
4053 }
4054
4055 MBP.ConditionDef = ConditionDef;
4056 MBP.SingleUseCondition = SingleUseCondition;
4057
4058 // Currently we only recognize the simple pattern:
4059 //
4060 // test %reg, %reg
4061 // je %label
4062 //
4063 const unsigned TestOpcode =
4064 Subtarget.is64Bit() ? X86::TEST64rr : X86::TEST32rr;
4065
4066 if (ConditionDef->getOpcode() == TestOpcode &&
4067 ConditionDef->getNumOperands() == 3 &&
4068 ConditionDef->getOperand(0).isIdenticalTo(ConditionDef->getOperand(1)) &&
4069 (Cond[0].getImm() == X86::COND_NE || Cond[0].getImm() == X86::COND_E)) {
4070 MBP.LHS = ConditionDef->getOperand(0);
4071 MBP.RHS = MachineOperand::CreateImm(0);
4072 MBP.Predicate = Cond[0].getImm() == X86::COND_NE
4073 ? MachineBranchPredicate::PRED_NE
4074 : MachineBranchPredicate::PRED_EQ;
4075 return false;
4076 }
4077
4078 return true;
4079}
4080
4082 int *BytesRemoved) const {
4083 assert(!BytesRemoved && "code size not handled");
4084
4086 unsigned Count = 0;
4087
4088 while (I != MBB.begin()) {
4089 --I;
4090 if (I->isDebugInstr())
4091 continue;
4092 if (I->getOpcode() != X86::JMP_1 &&
4094 break;
4095 // Remove the branch.
4096 I->eraseFromParent();
4097 I = MBB.end();
4098 ++Count;
4099 }
4100
4101 return Count;
4102}
4103
4106 MachineBasicBlock *FBB,
4108 const DebugLoc &DL, int *BytesAdded) const {
4109 // Shouldn't be a fall through.
4110 assert(TBB && "insertBranch must not be told to insert a fallthrough");
4111 assert((Cond.size() == 1 || Cond.size() == 0) &&
4112 "X86 branch conditions have one component!");
4113 assert(!BytesAdded && "code size not handled");
4114
4115 if (Cond.empty()) {
4116 // Unconditional branch?
4117 assert(!FBB && "Unconditional branch with multiple successors!");
4118 BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(TBB);
4119 return 1;
4120 }
4121
4122 // If FBB is null, it is implied to be a fall-through block.
4123 bool FallThru = FBB == nullptr;
4124
4125 // Conditional branch.
4126 unsigned Count = 0;
4128 switch (CC) {
4129 case X86::COND_NE_OR_P:
4130 // Synthesize NE_OR_P with two branches.
4131 BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_NE);
4132 ++Count;
4133 BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_P);
4134 ++Count;
4135 break;
4136 case X86::COND_E_AND_NP:
4137 // Use the next block of MBB as FBB if it is null.
4138 if (FBB == nullptr) {
4139 FBB = getFallThroughMBB(&MBB, TBB);
4140 assert(FBB && "MBB cannot be the last block in function when the false "
4141 "body is a fall-through.");
4142 }
4143 // Synthesize COND_E_AND_NP with two branches.
4144 BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(FBB).addImm(X86::COND_NE);
4145 ++Count;
4146 BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(X86::COND_NP);
4147 ++Count;
4148 break;
4149 default: {
4150 BuildMI(&MBB, DL, get(X86::JCC_1)).addMBB(TBB).addImm(CC);
4151 ++Count;
4152 }
4153 }
4154 if (!FallThru) {
4155 // Two-way Conditional branch. Insert the second branch.
4156 BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(FBB);
4157 ++Count;
4158 }
4159 return Count;
4160}
4161
4164 Register DstReg, Register TrueReg,
4165 Register FalseReg, int &CondCycles,
4166 int &TrueCycles, int &FalseCycles) const {
4167 // Not all subtargets have cmov instructions.
4168 if (!Subtarget.canUseCMOV())
4169 return false;
4170 if (Cond.size() != 1)
4171 return false;
4172 // We cannot do the composite conditions, at least not in SSA form.
4174 return false;
4175
4176 // Check register classes.
4177 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
4178 const TargetRegisterClass *RC =
4179 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
4180 if (!RC)
4181 return false;
4182
4183 // We have cmov instructions for 16, 32, and 64 bit general purpose registers.
4184 if (X86::GR16RegClass.hasSubClassEq(RC) ||
4185 X86::GR32RegClass.hasSubClassEq(RC) ||
4186 X86::GR64RegClass.hasSubClassEq(RC)) {
4187 // This latency applies to Pentium M, Merom, Wolfdale, Nehalem, and Sandy
4188 // Bridge. Probably Ivy Bridge as well.
4189 CondCycles = 2;
4190 TrueCycles = 2;
4191 FalseCycles = 2;
4192 return true;
4193 }
4194
4195 // Can't do vectors.
4196 return false;
4197}
4198
4201 const DebugLoc &DL, Register DstReg,
4203 Register FalseReg) const {
4204 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
4205 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
4206 const TargetRegisterClass &RC = *MRI.getRegClass(DstReg);
4207 assert(Cond.size() == 1 && "Invalid Cond array");
4208 unsigned Opc =
4209 X86::getCMovOpcode(TRI.getRegSizeInBits(RC) / 8,
4210 false /*HasMemoryOperand*/, Subtarget.hasNDD());
4211 BuildMI(MBB, I, DL, get(Opc), DstReg)
4212 .addReg(FalseReg)
4213 .addReg(TrueReg)
4214 .addImm(Cond[0].getImm());
4215}
4216
4217/// Test if the given register is a physical h register.
4218static bool isHReg(Register Reg) {
4219 return X86::GR8_ABCD_HRegClass.contains(Reg);
4220}
4221
4222// Try and copy between VR128/VR64 and GR64 registers.
4223static unsigned CopyToFromAsymmetricReg(Register DestReg, Register SrcReg,
4224 const X86Subtarget &Subtarget) {
4225 bool HasAVX = Subtarget.hasAVX();
4226 bool HasAVX512 = Subtarget.hasAVX512();
4227 bool HasEGPR = Subtarget.hasEGPR();
4228
4229 // SrcReg(MaskReg) -> DestReg(GR64)
4230 // SrcReg(MaskReg) -> DestReg(GR32)
4231
4232 // All KMASK RegClasses hold the same k registers, can be tested against
4233 // anyone.
4234 if (X86::VK16RegClass.contains(SrcReg)) {
4235 if (X86::GR64RegClass.contains(DestReg)) {
4236 assert(Subtarget.hasBWI());
4237 return HasEGPR ? X86::KMOVQrk_EVEX : X86::KMOVQrk;
4238 }
4239 if (X86::GR32RegClass.contains(DestReg))
4240 return Subtarget.hasBWI() ? (HasEGPR ? X86::KMOVDrk_EVEX : X86::KMOVDrk)
4241 : (HasEGPR ? X86::KMOVWrk_EVEX : X86::KMOVWrk);
4242 }
4243
4244 // SrcReg(GR64) -> DestReg(MaskReg)
4245 // SrcReg(GR32) -> DestReg(MaskReg)
4246
4247 // All KMASK RegClasses hold the same k registers, can be tested against
4248 // anyone.
4249 if (X86::VK16RegClass.contains(DestReg)) {
4250 if (X86::GR64RegClass.contains(SrcReg)) {
4251 assert(Subtarget.hasBWI());
4252 return HasEGPR ? X86::KMOVQkr_EVEX : X86::KMOVQkr;
4253 }
4254 if (X86::GR32RegClass.contains(SrcReg))
4255 return Subtarget.hasBWI() ? (HasEGPR ? X86::KMOVDkr_EVEX : X86::KMOVDkr)
4256 : (HasEGPR ? X86::KMOVWkr_EVEX : X86::KMOVWkr);
4257 }
4258
4259 // SrcReg(VR128) -> DestReg(GR64)
4260 // SrcReg(VR64) -> DestReg(GR64)
4261 // SrcReg(GR64) -> DestReg(VR128)
4262 // SrcReg(GR64) -> DestReg(VR64)
4263
4264 if (X86::GR64RegClass.contains(DestReg)) {
4265 if (X86::VR128XRegClass.contains(SrcReg))
4266 // Copy from a VR128 register to a GR64 register.
4267 return HasAVX512 ? X86::VMOVPQIto64Zrr
4268 : HasAVX ? X86::VMOVPQIto64rr
4269 : X86::MOVPQIto64rr;
4270 if (X86::VR64RegClass.contains(SrcReg))
4271 // Copy from a VR64 register to a GR64 register.
4272 return X86::MMX_MOVD64from64rr;
4273 } else if (X86::GR64RegClass.contains(SrcReg)) {
4274 // Copy from a GR64 register to a VR128 register.
4275 if (X86::VR128XRegClass.contains(DestReg))
4276 return HasAVX512 ? X86::VMOV64toPQIZrr
4277 : HasAVX ? X86::VMOV64toPQIrr
4278 : X86::MOV64toPQIrr;
4279 // Copy from a GR64 register to a VR64 register.
4280 if (X86::VR64RegClass.contains(DestReg))
4281 return X86::MMX_MOVD64to64rr;
4282 }
4283
4284 // SrcReg(VR128) -> DestReg(GR32)
4285 // SrcReg(GR32) -> DestReg(VR128)
4286
4287 if (X86::GR32RegClass.contains(DestReg) &&
4288 X86::VR128XRegClass.contains(SrcReg))
4289 // Copy from a VR128 register to a GR32 register.
4290 return HasAVX512 ? X86::VMOVPDI2DIZrr
4291 : HasAVX ? X86::VMOVPDI2DIrr
4292 : X86::MOVPDI2DIrr;
4293
4294 if (X86::VR128XRegClass.contains(DestReg) &&
4295 X86::GR32RegClass.contains(SrcReg))
4296 // Copy from a VR128 register to a VR128 register.
4297 return HasAVX512 ? X86::VMOVDI2PDIZrr
4298 : HasAVX ? X86::VMOVDI2PDIrr
4299 : X86::MOVDI2PDIrr;
4300 return 0;
4301}
4302
4305 const DebugLoc &DL, Register DestReg,
4306 Register SrcReg, bool KillSrc,
4307 bool RenamableDest, bool RenamableSrc) const {
4308 // First deal with the normal symmetric copies.
4309 bool HasAVX = Subtarget.hasAVX();
4310 bool HasVLX = Subtarget.hasVLX();
4311 bool HasEGPR = Subtarget.hasEGPR();
4312 unsigned Opc = 0;
4313 if (X86::GR64RegClass.contains(DestReg, SrcReg))
4314 Opc = X86::MOV64rr;
4315 else if (X86::GR32RegClass.contains(DestReg, SrcReg))
4316 Opc = X86::MOV32rr;
4317 else if (X86::GR16RegClass.contains(DestReg, SrcReg))
4318 Opc = X86::MOV16rr;
4319 else if (X86::GR8RegClass.contains(DestReg, SrcReg)) {
4320 // Copying to or from a physical H register on x86-64 requires a NOREX
4321 // move. Otherwise use a normal move.
4322 if ((isHReg(DestReg) || isHReg(SrcReg)) && Subtarget.is64Bit()) {
4323 Opc = X86::MOV8rr_NOREX;
4324 // Both operands must be encodable without an REX prefix.
4325 assert(X86::GR8_NOREXRegClass.contains(SrcReg, DestReg) &&
4326 "8-bit H register can not be copied outside GR8_NOREX");
4327 } else
4328 Opc = X86::MOV8rr;
4329 } else if (X86::VR64RegClass.contains(DestReg, SrcReg))
4330 Opc = X86::MMX_MOVQ64rr;
4331 else if (X86::VR128XRegClass.contains(DestReg, SrcReg)) {
4332 if (HasVLX)
4333 Opc = X86::VMOVAPSZ128rr;
4334 else if (X86::VR128RegClass.contains(DestReg, SrcReg))
4335 Opc = HasAVX ? X86::VMOVAPSrr : X86::MOVAPSrr;
4336 else {
4337 // If this an extended register and we don't have VLX we need to use a
4338 // 512-bit move.
4339 Opc = X86::VMOVAPSZrr;
4341 DestReg =
4342 TRI->getMatchingSuperReg(DestReg, X86::sub_xmm, &X86::VR512RegClass);
4343 SrcReg =
4344 TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, &X86::VR512RegClass);
4345 }
4346 } else if (X86::VR256XRegClass.contains(DestReg, SrcReg)) {
4347 if (HasVLX)
4348 Opc = X86::VMOVAPSZ256rr;
4349 else if (X86::VR256RegClass.contains(DestReg, SrcReg))
4350 Opc = X86::VMOVAPSYrr;
4351 else {
4352 // If this an extended register and we don't have VLX we need to use a
4353 // 512-bit move.
4354 Opc = X86::VMOVAPSZrr;
4356 DestReg =
4357 TRI->getMatchingSuperReg(DestReg, X86::sub_ymm, &X86::VR512RegClass);
4358 SrcReg =
4359 TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm, &X86::VR512RegClass);
4360 }
4361 } else if (X86::VR512RegClass.contains(DestReg, SrcReg))
4362 Opc = X86::VMOVAPSZrr;
4363 // All KMASK RegClasses hold the same k registers, can be tested against
4364 // anyone.
4365 else if (X86::VK16RegClass.contains(DestReg, SrcReg))
4366 Opc = Subtarget.hasBWI() ? (HasEGPR ? X86::KMOVQkk_EVEX : X86::KMOVQkk)
4367 : (HasEGPR ? X86::KMOVQkk_EVEX : X86::KMOVWkk);
4368 if (!Opc)
4369 Opc = CopyToFromAsymmetricReg(DestReg, SrcReg, Subtarget);
4370
4371 if (Opc) {
4372 BuildMI(MBB, MI, DL, get(Opc), DestReg)
4373 .addReg(SrcReg, getKillRegState(KillSrc));
4374 return;
4375 }
4376
4377 if (SrcReg == X86::EFLAGS || DestReg == X86::EFLAGS) {
4378 // FIXME: We use a fatal error here because historically LLVM has tried
4379 // lower some of these physreg copies and we want to ensure we get
4380 // reasonable bug reports if someone encounters a case no other testing
4381 // found. This path should be removed after the LLVM 7 release.
4382 report_fatal_error("Unable to copy EFLAGS physical register!");
4383 }
4384
4385 LLVM_DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg) << " to "
4386 << RI.getName(DestReg) << '\n');
4387 report_fatal_error("Cannot emit physreg copy instruction");
4388}
4389
4390std::optional<DestSourcePair>
4392 if (MI.isMoveReg()) {
4393 // FIXME: Dirty hack for apparent invariant that doesn't hold when
4394 // subreg_to_reg is coalesced with ordinary copies, such that the bits that
4395 // were asserted as 0 are now undef.
4396 if (MI.getOperand(0).isUndef() && MI.getOperand(0).getSubReg())
4397 return std::nullopt;
4398
4399 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
4400 }
4401 return std::nullopt;
4402}
4403
4404static unsigned getLoadStoreOpcodeForFP16(bool Load, const X86Subtarget &STI) {
4405 if (STI.hasFP16())
4406 return Load ? X86::VMOVSHZrm_alt : X86::VMOVSHZmr;
4407 if (Load)
4408 return X86::MOVSHPrm;
4409 return X86::MOVSHPmr;
4410}
4411
4413 const TargetRegisterClass *RC,
4414 bool IsStackAligned,
4415 const X86Subtarget &STI, bool Load) {
4416 bool HasAVX = STI.hasAVX();
4417 bool HasAVX512 = STI.hasAVX512();
4418 bool HasVLX = STI.hasVLX();
4419 bool HasEGPR = STI.hasEGPR();
4420
4421 assert(RC != nullptr && "Invalid target register class");
4422 switch (STI.getRegisterInfo()->getSpillSize(*RC)) {
4423 default:
4424 llvm_unreachable("Unknown spill size");
4425 case 1:
4426 assert(X86::GR8RegClass.hasSubClassEq(RC) && "Unknown 1-byte regclass");
4427 if (STI.is64Bit())
4428 // Copying to or from a physical H register on x86-64 requires a NOREX
4429 // move. Otherwise use a normal move.
4430 if (isHReg(Reg) || X86::GR8_ABCD_HRegClass.hasSubClassEq(RC))
4431 return Load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
4432 return Load ? X86::MOV8rm : X86::MOV8mr;
4433 case 2:
4434 if (X86::VK16RegClass.hasSubClassEq(RC))
4435 return Load ? (HasEGPR ? X86::KMOVWkm_EVEX : X86::KMOVWkm)
4436 : (HasEGPR ? X86::KMOVWmk_EVEX : X86::KMOVWmk);
4437 assert(X86::GR16RegClass.hasSubClassEq(RC) && "Unknown 2-byte regclass");
4438 return Load ? X86::MOV16rm : X86::MOV16mr;
4439 case 4:
4440 if (X86::GR32RegClass.hasSubClassEq(RC))
4441 return Load ? X86::MOV32rm : X86::MOV32mr;
4442 if (X86::FR32XRegClass.hasSubClassEq(RC))
4443 return Load ? (HasAVX512 ? X86::VMOVSSZrm_alt
4444 : HasAVX ? X86::VMOVSSrm_alt
4445 : X86::MOVSSrm_alt)
4446 : (HasAVX512 ? X86::VMOVSSZmr
4447 : HasAVX ? X86::VMOVSSmr
4448 : X86::MOVSSmr);
4449 if (X86::RFP32RegClass.hasSubClassEq(RC))
4450 return Load ? X86::LD_Fp32m : X86::ST_Fp32m;
4451 if (X86::VK32RegClass.hasSubClassEq(RC)) {
4452 assert(STI.hasBWI() && "KMOVD requires BWI");
4453 return Load ? (HasEGPR ? X86::KMOVDkm_EVEX : X86::KMOVDkm)
4454 : (HasEGPR ? X86::KMOVDmk_EVEX : X86::KMOVDmk);
4455 }
4456 // All of these mask pair classes have the same spill size, the same kind
4457 // of kmov instructions can be used with all of them.
4458 if (X86::VK1PAIRRegClass.hasSubClassEq(RC) ||
4459 X86::VK2PAIRRegClass.hasSubClassEq(RC) ||
4460 X86::VK4PAIRRegClass.hasSubClassEq(RC) ||
4461 X86::VK8PAIRRegClass.hasSubClassEq(RC) ||
4462 X86::VK16PAIRRegClass.hasSubClassEq(RC))
4463 return Load ? X86::MASKPAIR16LOAD : X86::MASKPAIR16STORE;
4464 if (X86::FR16RegClass.hasSubClassEq(RC) ||
4465 X86::FR16XRegClass.hasSubClassEq(RC))
4466 return getLoadStoreOpcodeForFP16(Load, STI);
4467 llvm_unreachable("Unknown 4-byte regclass");
4468 case 8:
4469 if (X86::GR64RegClass.hasSubClassEq(RC))
4470 return Load ? X86::MOV64rm : X86::MOV64mr;
4471 if (X86::FR64XRegClass.hasSubClassEq(RC))
4472 return Load ? (HasAVX512 ? X86::VMOVSDZrm_alt
4473 : HasAVX ? X86::VMOVSDrm_alt
4474 : X86::MOVSDrm_alt)
4475 : (HasAVX512 ? X86::VMOVSDZmr
4476 : HasAVX ? X86::VMOVSDmr
4477 : X86::MOVSDmr);
4478 if (X86::VR64RegClass.hasSubClassEq(RC))
4479 return Load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr;
4480 if (X86::RFP64RegClass.hasSubClassEq(RC))
4481 return Load ? X86::LD_Fp64m : X86::ST_Fp64m;
4482 if (X86::VK64RegClass.hasSubClassEq(RC)) {
4483 assert(STI.hasBWI() && "KMOVQ requires BWI");
4484 return Load ? (HasEGPR ? X86::KMOVQkm_EVEX : X86::KMOVQkm)
4485 : (HasEGPR ? X86::KMOVQmk_EVEX : X86::KMOVQmk);
4486 }
4487 llvm_unreachable("Unknown 8-byte regclass");
4488 case 10:
4489 assert(X86::RFP80RegClass.hasSubClassEq(RC) && "Unknown 10-byte regclass");
4490 return Load ? X86::LD_Fp80m : X86::ST_FpP80m;
4491 case 16: {
4492 if (X86::VR128XRegClass.hasSubClassEq(RC)) {
4493 // If stack is realigned we can use aligned stores.
4494 if (IsStackAligned)
4495 return Load ? (HasVLX ? X86::VMOVAPSZ128rm
4496 : HasAVX512 ? X86::VMOVAPSZ128rm_NOVLX
4497 : HasAVX ? X86::VMOVAPSrm
4498 : X86::MOVAPSrm)
4499 : (HasVLX ? X86::VMOVAPSZ128mr
4500 : HasAVX512 ? X86::VMOVAPSZ128mr_NOVLX
4501 : HasAVX ? X86::VMOVAPSmr
4502 : X86::MOVAPSmr);
4503 else
4504 return Load ? (HasVLX ? X86::VMOVUPSZ128rm
4505 : HasAVX512 ? X86::VMOVUPSZ128rm_NOVLX
4506 : HasAVX ? X86::VMOVUPSrm
4507 : X86::MOVUPSrm)
4508 : (HasVLX ? X86::VMOVUPSZ128mr
4509 : HasAVX512 ? X86::VMOVUPSZ128mr_NOVLX
4510 : HasAVX ? X86::VMOVUPSmr
4511 : X86::MOVUPSmr);
4512 }
4513 llvm_unreachable("Unknown 16-byte regclass");
4514 }
4515 case 32:
4516 assert(X86::VR256XRegClass.hasSubClassEq(RC) && "Unknown 32-byte regclass");
4517 // If stack is realigned we can use aligned stores.
4518 if (IsStackAligned)
4519 return Load ? (HasVLX ? X86::VMOVAPSZ256rm
4520 : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
4521 : X86::VMOVAPSYrm)
4522 : (HasVLX ? X86::VMOVAPSZ256mr
4523 : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
4524 : X86::VMOVAPSYmr);
4525 else
4526 return Load ? (HasVLX ? X86::VMOVUPSZ256rm
4527 : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
4528 : X86::VMOVUPSYrm)
4529 : (HasVLX ? X86::VMOVUPSZ256mr
4530 : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
4531 : X86::VMOVUPSYmr);
4532 case 64:
4533 assert(X86::VR512RegClass.hasSubClassEq(RC) && "Unknown 64-byte regclass");
4534 assert(STI.hasAVX512() && "Using 512-bit register requires AVX512");
4535 if (IsStackAligned)
4536 return Load ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
4537 else
4538 return Load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
4539 case 1024:
4540 assert(X86::TILERegClass.hasSubClassEq(RC) && "Unknown 1024-byte regclass");
4541 assert(STI.hasAMXTILE() && "Using 8*1024-bit register requires AMX-TILE");
4542#define GET_EGPR_IF_ENABLED(OPC) (STI.hasEGPR() ? OPC##_EVEX : OPC)
4543 return Load ? GET_EGPR_IF_ENABLED(X86::TILELOADD)
4544 : GET_EGPR_IF_ENABLED(X86::TILESTORED);
4545#undef GET_EGPR_IF_ENABLED
4546 case 2048:
4547 assert(X86::TILEPAIRRegClass.hasSubClassEq(RC) &&
4548 "Unknown 2048-byte regclass");
4549 assert(STI.hasAMXTILE() && "Using 2048-bit register requires AMX-TILE");
4550 return Load ? X86::PTILEPAIRLOAD : X86::PTILEPAIRSTORE;
4551 }
4552}
4553
4554std::optional<ExtAddrMode>
4556 const TargetRegisterInfo *TRI) const {
4557 const MCInstrDesc &Desc = MemI.getDesc();
4558 int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags);
4559 if (MemRefBegin < 0)
4560 return std::nullopt;
4561
4562 MemRefBegin += X86II::getOperandBias(Desc);
4563
4564 auto &BaseOp = MemI.getOperand(MemRefBegin + X86::AddrBaseReg);
4565 if (!BaseOp.isReg()) // Can be an MO_FrameIndex
4566 return std::nullopt;
4567
4568 const MachineOperand &DispMO = MemI.getOperand(MemRefBegin + X86::AddrDisp);
4569 // Displacement can be symbolic
4570 if (!DispMO.isImm())
4571 return std::nullopt;
4572
4573 ExtAddrMode AM;
4574 AM.BaseReg = BaseOp.getReg();
4575 AM.ScaledReg = MemI.getOperand(MemRefBegin + X86::AddrIndexReg).getReg();
4576 AM.Scale = MemI.getOperand(MemRefBegin + X86::AddrScaleAmt).getImm();
4577 AM.Displacement = DispMO.getImm();
4578 return AM;
4579}
4580
4582 StringRef &ErrInfo) const {
4583 std::optional<ExtAddrMode> AMOrNone = getAddrModeFromMemoryOp(MI, nullptr);
4584 if (!AMOrNone)
4585 return true;
4586
4587 ExtAddrMode AM = *AMOrNone;
4589 if (AM.ScaledReg != X86::NoRegister) {
4590 switch (AM.Scale) {
4591 case 1:
4592 case 2:
4593 case 4:
4594 case 8:
4595 break;
4596 default:
4597 ErrInfo = "Scale factor in address must be 1, 2, 4 or 8";
4598 return false;
4599 }
4600 }
4601 if (!isInt<32>(AM.Displacement)) {
4602 ErrInfo = "Displacement in address must fit into 32-bit signed "
4603 "integer";
4604 return false;
4605 }
4606
4607 return true;
4608}
4609
4611 const Register Reg,
4612 int64_t &ImmVal) const {
4613 Register MovReg = Reg;
4614 const MachineInstr *MovMI = &MI;
4615
4616 // Follow use-def for SUBREG_TO_REG to find the real move immediate
4617 // instruction. It is quite common for x86-64.
4618 if (MI.isSubregToReg()) {
4619 // We use following pattern to setup 64b immediate.
4620 // %8:gr32 = MOV32r0 implicit-def dead $eflags
4621 // %6:gr64 = SUBREG_TO_REG 0, killed %8:gr32, %subreg.sub_32bit
4622 if (!MI.getOperand(1).isImm())
4623 return false;
4624 unsigned FillBits = MI.getOperand(1).getImm();
4625 unsigned SubIdx = MI.getOperand(3).getImm();
4626 MovReg = MI.getOperand(2).getReg();
4627 if (SubIdx != X86::sub_32bit || FillBits != 0)
4628 return false;
4629 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
4630 MovMI = MRI.getUniqueVRegDef(MovReg);
4631 if (!MovMI)
4632 return false;
4633 }
4634
4635 if (MovMI->getOpcode() == X86::MOV32r0 &&
4636 MovMI->getOperand(0).getReg() == MovReg) {
4637 ImmVal = 0;
4638 return true;
4639 }
4640
4641 if (MovMI->getOpcode() != X86::MOV32ri &&
4642 MovMI->getOpcode() != X86::MOV64ri &&
4643 MovMI->getOpcode() != X86::MOV32ri64 && MovMI->getOpcode() != X86::MOV8ri)
4644 return false;
4645 // Mov Src can be a global address.
4646 if (!MovMI->getOperand(1).isImm() || MovMI->getOperand(0).getReg() != MovReg)
4647 return false;
4648 ImmVal = MovMI->getOperand(1).getImm();
4649 return true;
4650}
4651
4653 const MachineInstr *MI, const Register NullValueReg,
4654 const TargetRegisterInfo *TRI) const {
4655 if (!MI->modifiesRegister(NullValueReg, TRI))
4656 return true;
4657 switch (MI->getOpcode()) {
4658 // Shift right/left of a null unto itself is still a null, i.e. rax = shl rax
4659 // X.
4660 case X86::SHR64ri:
4661 case X86::SHR32ri:
4662 case X86::SHL64ri:
4663 case X86::SHL32ri:
4664 assert(MI->getOperand(0).isDef() && MI->getOperand(1).isUse() &&
4665 "expected for shift opcode!");
4666 return MI->getOperand(0).getReg() == NullValueReg &&
4667 MI->getOperand(1).getReg() == NullValueReg;
4668 // Zero extend of a sub-reg of NullValueReg into itself does not change the
4669 // null value.
4670 case X86::MOV32rr:
4671 return llvm::all_of(MI->operands(), [&](const MachineOperand &MO) {
4672 return TRI->isSubRegisterEq(NullValueReg, MO.getReg());
4673 });
4674 default:
4675 return false;
4676 }
4677 llvm_unreachable("Should be handled above!");
4678}
4679
4682 int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
4683 const TargetRegisterInfo *TRI) const {
4684 const MCInstrDesc &Desc = MemOp.getDesc();
4685 int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags);
4686 if (MemRefBegin < 0)
4687 return false;
4688
4689 MemRefBegin += X86II::getOperandBias(Desc);
4690
4691 const MachineOperand *BaseOp =
4692 &MemOp.getOperand(MemRefBegin + X86::AddrBaseReg);
4693 if (!BaseOp->isReg()) // Can be an MO_FrameIndex
4694 return false;
4695
4696 if (MemOp.getOperand(MemRefBegin + X86::AddrScaleAmt).getImm() != 1)
4697 return false;
4698
4699 if (MemOp.getOperand(MemRefBegin + X86::AddrIndexReg).getReg() !=
4700 X86::NoRegister)
4701 return false;
4702
4703 const MachineOperand &DispMO = MemOp.getOperand(MemRefBegin + X86::AddrDisp);
4704
4705 // Displacement can be symbolic
4706 if (!DispMO.isImm())
4707 return false;
4708
4709 Offset = DispMO.getImm();
4710
4711 if (!BaseOp->isReg())
4712 return false;
4713
4714 OffsetIsScalable = false;
4715 // FIXME: Relying on memoperands() may not be right thing to do here. Check
4716 // with X86 maintainers, and fix it accordingly. For now, it is ok, since
4717 // there is no use of `Width` for X86 back-end at the moment.
4718 Width = !MemOp.memoperands_empty() ? MemOp.memoperands().front()->getSize()
4720 BaseOps.push_back(BaseOp);
4721 return true;
4722}
4723
4724static unsigned getStoreRegOpcode(Register SrcReg,
4725 const TargetRegisterClass *RC,
4726 bool IsStackAligned,
4727 const X86Subtarget &STI) {
4728 return getLoadStoreRegOpcode(SrcReg, RC, IsStackAligned, STI, false);
4729}
4730
4731static unsigned getLoadRegOpcode(Register DestReg,
4732 const TargetRegisterClass *RC,
4733 bool IsStackAligned, const X86Subtarget &STI) {
4734 return getLoadStoreRegOpcode(DestReg, RC, IsStackAligned, STI, true);
4735}
4736
4737static bool isAMXOpcode(unsigned Opc) {
4738 switch (Opc) {
4739 default:
4740 return false;
4741 case X86::TILELOADD:
4742 case X86::TILESTORED:
4743 case X86::TILELOADD_EVEX:
4744 case X86::TILESTORED_EVEX:
4745 case X86::PTILEPAIRLOAD:
4746 case X86::PTILEPAIRSTORE:
4747 return true;
4748 }
4749}
4750
4753 unsigned Opc, Register Reg, int FrameIdx,
4754 bool isKill) const {
4755 switch (Opc) {
4756 default:
4757 llvm_unreachable("Unexpected special opcode!");
4758 case X86::TILESTORED:
4759 case X86::TILESTORED_EVEX:
4760 case X86::PTILEPAIRSTORE: {
4761 // tilestored %tmm, (%sp, %idx)
4762 MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
4763 Register VirtReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
4764 BuildMI(MBB, MI, DebugLoc(), get(X86::MOV64ri), VirtReg).addImm(64);
4765 MachineInstr *NewMI =
4766 addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc)), FrameIdx)
4767 .addReg(Reg, getKillRegState(isKill));
4769 MO.setReg(VirtReg);
4770 MO.setIsKill(true);
4771 break;
4772 }
4773 case X86::TILELOADD:
4774 case X86::TILELOADD_EVEX:
4775 case X86::PTILEPAIRLOAD: {
4776 // tileloadd (%sp, %idx), %tmm
4777 MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
4778 Register VirtReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
4779 BuildMI(MBB, MI, DebugLoc(), get(X86::MOV64ri), VirtReg).addImm(64);
4781 BuildMI(MBB, MI, DebugLoc(), get(Opc), Reg), FrameIdx);
4783 MO.setReg(VirtReg);
4784 MO.setIsKill(true);
4785 break;
4786 }
4787 }
4788}
4789
4792 bool isKill, int FrameIdx, const TargetRegisterClass *RC,
4793 const TargetRegisterInfo *TRI, Register VReg,
4794 MachineInstr::MIFlag Flags) const {
4795 const MachineFunction &MF = *MBB.getParent();
4796 const MachineFrameInfo &MFI = MF.getFrameInfo();
4797 assert(MFI.getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) &&
4798 "Stack slot too small for store");
4799
4800 unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
4801 bool isAligned =
4802 (Subtarget.getFrameLowering()->getStackAlign() >= Alignment) ||
4803 (RI.canRealignStack(MF) && !MFI.isFixedObjectIndex(FrameIdx));
4804
4805 unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget);
4806 if (isAMXOpcode(Opc))
4807 loadStoreTileReg(MBB, MI, Opc, SrcReg, FrameIdx, isKill);
4808 else
4809 addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc)), FrameIdx)
4810 .addReg(SrcReg, getKillRegState(isKill))
4811 .setMIFlag(Flags);
4812}
4813
4816 int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
4817 Register VReg, MachineInstr::MIFlag Flags) const {
4818 const MachineFunction &MF = *MBB.getParent();
4819 const MachineFrameInfo &MFI = MF.getFrameInfo();
4820 assert(MFI.getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) &&
4821 "Load size exceeds stack slot");
4822 unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
4823 bool isAligned =
4824 (Subtarget.getFrameLowering()->getStackAlign() >= Alignment) ||
4825 (RI.canRealignStack(MF) && !MFI.isFixedObjectIndex(FrameIdx));
4826
4827 unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget);
4828 if (isAMXOpcode(Opc))
4829 loadStoreTileReg(MBB, MI, Opc, DestReg, FrameIdx);
4830 else
4831 addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc), DestReg), FrameIdx)
4832 .setMIFlag(Flags);
4833}
4834
4836 Register &SrcReg2, int64_t &CmpMask,
4837 int64_t &CmpValue) const {
4838 switch (MI.getOpcode()) {
4839 default:
4840 break;
4841 case X86::CMP64ri32:
4842 case X86::CMP32ri:
4843 case X86::CMP16ri:
4844 case X86::CMP8ri:
4845 SrcReg = MI.getOperand(0).getReg();
4846 SrcReg2 = 0;
4847 if (MI.getOperand(1).isImm()) {
4848 CmpMask = ~0;
4849 CmpValue = MI.getOperand(1).getImm();
4850 } else {
4851 CmpMask = CmpValue = 0;
4852 }
4853 return true;
4854 // A SUB can be used to perform comparison.
4855 CASE_ND(SUB64rm)
4856 CASE_ND(SUB32rm)
4857 CASE_ND(SUB16rm)
4858 CASE_ND(SUB8rm)
4859 SrcReg = MI.getOperand(1).getReg();
4860 SrcReg2 = 0;
4861 CmpMask = 0;
4862 CmpValue = 0;
4863 return true;
4864 CASE_ND(SUB64rr)
4865 CASE_ND(SUB32rr)
4866 CASE_ND(SUB16rr)
4867 CASE_ND(SUB8rr)
4868 SrcReg = MI.getOperand(1).getReg();
4869 SrcReg2 = MI.getOperand(2).getReg();
4870 CmpMask = 0;
4871 CmpValue = 0;
4872 return true;
4873 CASE_ND(SUB64ri32)
4874 CASE_ND(SUB32ri)
4875 CASE_ND(SUB16ri)
4876 CASE_ND(SUB8ri)
4877 SrcReg = MI.getOperand(1).getReg();
4878 SrcReg2 = 0;
4879 if (MI.getOperand(2).isImm()) {
4880 CmpMask = ~0;
4881 CmpValue = MI.getOperand(2).getImm();
4882 } else {
4883 CmpMask = CmpValue = 0;
4884 }
4885 return true;
4886 case X86::CMP64rr:
4887 case X86::CMP32rr:
4888 case X86::CMP16rr:
4889 case X86::CMP8rr:
4890 SrcReg = MI.getOperand(0).getReg();
4891 SrcReg2 = MI.getOperand(1).getReg();
4892 CmpMask = 0;
4893 CmpValue = 0;
4894 return true;
4895 case X86::TEST8rr:
4896 case X86::TEST16rr:
4897 case X86::TEST32rr:
4898 case X86::TEST64rr:
4899 SrcReg = MI.getOperand(0).getReg();
4900 if (MI.getOperand(1).getReg() != SrcReg)
4901 return false;
4902 // Compare against zero.
4903 SrcReg2 = 0;
4904 CmpMask = ~0;
4905 CmpValue = 0;
4906 return true;
4907 case X86::TEST64ri32:
4908 case X86::TEST32ri:
4909 case X86::TEST16ri:
4910 case X86::TEST8ri:
4911 SrcReg = MI.getOperand(0).getReg();
4912 SrcReg2 = 0;
4913 // Force identical compare.
4914 CmpMask = 0;
4915 CmpValue = 0;
4916 return true;
4917 }
4918 return false;
4919}
4920
4921bool X86InstrInfo::isRedundantFlagInstr(const MachineInstr &FlagI,
4922 Register SrcReg, Register SrcReg2,
4923 int64_t ImmMask, int64_t ImmValue,
4924 const MachineInstr &OI, bool *IsSwapped,
4925 int64_t *ImmDelta) const {
4926 switch (OI.getOpcode()) {
4927 case X86::CMP64rr:
4928 case X86::CMP32rr:
4929 case X86::CMP16rr:
4930 case X86::CMP8rr:
4931 CASE_ND(SUB64rr)
4932 CASE_ND(SUB32rr)
4933 CASE_ND(SUB16rr)
4934 CASE_ND(SUB8rr) {
4935 Register OISrcReg;
4936 Register OISrcReg2;
4937 int64_t OIMask;
4938 int64_t OIValue;
4939 if (!analyzeCompare(OI, OISrcReg, OISrcReg2, OIMask, OIValue) ||
4940 OIMask != ImmMask || OIValue != ImmValue)
4941 return false;
4942 if (SrcReg == OISrcReg && SrcReg2 == OISrcReg2) {
4943 *IsSwapped = false;
4944 return true;
4945 }
4946 if (SrcReg == OISrcReg2 && SrcReg2 == OISrcReg) {
4947 *IsSwapped = true;
4948 return true;
4949 }
4950 return false;
4951 }
4952 case X86::CMP64ri32:
4953 case X86::CMP32ri:
4954 case X86::CMP16ri:
4955 case X86::CMP8ri:
4956 case X86::TEST64ri32:
4957 case X86::TEST32ri:
4958 case X86::TEST16ri:
4959 case X86::TEST8ri:
4960 CASE_ND(SUB64ri32)
4961 CASE_ND(SUB32ri)
4962 CASE_ND(SUB16ri)
4963 CASE_ND(SUB8ri)
4964 case X86::TEST64rr:
4965 case X86::TEST32rr:
4966 case X86::TEST16rr:
4967 case X86::TEST8rr: {
4968 if (ImmMask != 0) {
4969 Register OISrcReg;
4970 Register OISrcReg2;
4971 int64_t OIMask;
4972 int64_t OIValue;
4973 if (analyzeCompare(OI, OISrcReg, OISrcReg2, OIMask, OIValue) &&
4974 SrcReg == OISrcReg && ImmMask == OIMask) {
4975 if (OIValue == ImmValue) {
4976 *ImmDelta = 0;
4977 return true;
4978 } else if (static_cast<uint64_t>(ImmValue) ==
4979 static_cast<uint64_t>(OIValue) - 1) {
4980 *ImmDelta = -1;
4981 return true;
4982 } else if (static_cast<uint64_t>(ImmValue) ==
4983 static_cast<uint64_t>(OIValue) + 1) {
4984 *ImmDelta = 1;
4985 return true;
4986 } else {
4987 return false;
4988 }
4989 }
4990 }
4991 return FlagI.isIdenticalTo(OI);
4992 }
4993 default:
4994 return false;
4995 }
4996}
4997
4998/// Check whether the definition can be converted
4999/// to remove a comparison against zero.
5000inline static bool isDefConvertible(const MachineInstr &MI, bool &NoSignFlag,
5001 bool &ClearsOverflowFlag) {
5002 NoSignFlag = false;
5003 ClearsOverflowFlag = false;
5004
5005 // "ELF Handling for Thread-Local Storage" specifies that x86-64 GOTTPOFF, and
5006 // i386 GOTNTPOFF/INDNTPOFF relocations can convert an ADD to a LEA during
5007 // Initial Exec to Local Exec relaxation. In these cases, we must not depend
5008 // on the EFLAGS modification of ADD actually happening in the final binary.
5009 if (MI.getOpcode() == X86::ADD64rm || MI.getOpcode() == X86::ADD32rm) {
5010 unsigned Flags = MI.getOperand(5).getTargetFlags();
5011 if (Flags == X86II::MO_GOTTPOFF || Flags == X86II::MO_INDNTPOFF ||
5012 Flags == X86II::MO_GOTNTPOFF)
5013 return false;
5014 }
5015
5016 switch (MI.getOpcode()) {
5017 default:
5018 return false;
5019
5020 // The shift instructions only modify ZF if their shift count is non-zero.
5021 // N.B.: The processor truncates the shift count depending on the encoding.
5022 CASE_ND(SAR8ri)
5023 CASE_ND(SAR16ri)
5024 CASE_ND(SAR32ri)
5025 CASE_ND(SAR64ri)
5026 CASE_ND(SHR8ri)
5027 CASE_ND(SHR16ri)
5028 CASE_ND(SHR32ri)
5029 CASE_ND(SHR64ri)
5030 return getTruncatedShiftCount(MI, 2) != 0;
5031
5032 // Some left shift instructions can be turned into LEA instructions but only
5033 // if their flags aren't used. Avoid transforming such instructions.
5034 CASE_ND(SHL8ri)
5035 CASE_ND(SHL16ri)
5036 CASE_ND(SHL32ri)
5037 CASE_ND(SHL64ri) {
5038 unsigned ShAmt = getTruncatedShiftCount(MI, 2);
5039 if (isTruncatedShiftCountForLEA(ShAmt))
5040 return false;
5041 return ShAmt != 0;
5042 }
5043
5044 CASE_ND(SHRD16rri8)
5045 CASE_ND(SHRD32rri8)
5046 CASE_ND(SHRD64rri8)
5047 CASE_ND(SHLD16rri8)
5048 CASE_ND(SHLD32rri8)
5049 CASE_ND(SHLD64rri8)
5050 return getTruncatedShiftCount(MI, 3) != 0;
5051
5052 CASE_ND(SUB64ri32)
5053 CASE_ND(SUB32ri)
5054 CASE_ND(SUB16ri)
5055 CASE_ND(SUB8ri)
5056 CASE_ND(SUB64rr)
5057 CASE_ND(SUB32rr)
5058 CASE_ND(SUB16rr)
5059 CASE_ND(SUB8rr)
5060 CASE_ND(SUB64rm)
5061 CASE_ND(SUB32rm)
5062 CASE_ND(SUB16rm)
5063 CASE_ND(SUB8rm)
5064 CASE_ND(DEC64r)
5065 CASE_ND(DEC32r)
5066 CASE_ND(DEC16r)
5067 CASE_ND(DEC8r)
5068 CASE_ND(ADD64ri32)
5069 CASE_ND(ADD32ri)
5070 CASE_ND(ADD16ri)
5071 CASE_ND(ADD8ri)
5072 CASE_ND(ADD64rr)
5073 CASE_ND(ADD32rr)
5074 CASE_ND(ADD16rr)
5075 CASE_ND(ADD8rr)
5076 CASE_ND(ADD64rm)
5077 CASE_ND(ADD32rm)
5078 CASE_ND(ADD16rm)
5079 CASE_ND(ADD8rm)
5080 CASE_ND(INC64r)
5081 CASE_ND(INC32r)
5082 CASE_ND(INC16r)
5083 CASE_ND(INC8r)
5084 CASE_ND(ADC64ri32)
5085 CASE_ND(ADC32ri)
5086 CASE_ND(ADC16ri)
5087 CASE_ND(ADC8ri)
5088 CASE_ND(ADC64rr)
5089 CASE_ND(ADC32rr)
5090 CASE_ND(ADC16rr)
5091 CASE_ND(ADC8rr)
5092 CASE_ND(ADC64rm)
5093 CASE_ND(ADC32rm)
5094 CASE_ND(ADC16rm)
5095 CASE_ND(ADC8rm)
5096 CASE_ND(SBB64ri32)
5097 CASE_ND(SBB32ri)
5098 CASE_ND(SBB16ri)
5099 CASE_ND(SBB8ri)
5100 CASE_ND(SBB64rr)
5101 CASE_ND(SBB32rr)
5102 CASE_ND(SBB16rr)
5103 CASE_ND(SBB8rr)
5104 CASE_ND(SBB64rm)
5105 CASE_ND(SBB32rm)
5106 CASE_ND(SBB16rm)
5107 CASE_ND(SBB8rm)
5108 CASE_ND(NEG8r)
5109 CASE_ND(NEG16r)
5110 CASE_ND(NEG32r)
5111 CASE_ND(NEG64r)
5112 case X86::LZCNT16rr:
5113 case X86::LZCNT16rm:
5114 case X86::LZCNT32rr:
5115 case X86::LZCNT32rm:
5116 case X86::LZCNT64rr:
5117 case X86::LZCNT64rm:
5118 case X86::POPCNT16rr:
5119 case X86::POPCNT16rm:
5120 case X86::POPCNT32rr:
5121 case X86::POPCNT32rm:
5122 case X86::POPCNT64rr:
5123 case X86::POPCNT64rm:
5124 case X86::TZCNT16rr:
5125 case X86::TZCNT16rm:
5126 case X86::TZCNT32rr:
5127 case X86::TZCNT32rm:
5128 case X86::TZCNT64rr:
5129 case X86::TZCNT64rm:
5130 return true;
5131 CASE_ND(AND64ri32)
5132 CASE_ND(AND32ri)
5133 CASE_ND(AND16ri)
5134 CASE_ND(AND8ri)
5135 CASE_ND(AND64rr)
5136 CASE_ND(AND32rr)
5137 CASE_ND(AND16rr)
5138 CASE_ND(AND8rr)
5139 CASE_ND(AND64rm)
5140 CASE_ND(AND32rm)
5141 CASE_ND(AND16rm)
5142 CASE_ND(AND8rm)
5143 CASE_ND(XOR64ri32)
5144 CASE_ND(XOR32ri)
5145 CASE_ND(XOR16ri)
5146 CASE_ND(XOR8ri)
5147 CASE_ND(XOR64rr)
5148 CASE_ND(XOR32rr)
5149 CASE_ND(XOR16rr)
5150 CASE_ND(XOR8rr)
5151 CASE_ND(XOR64rm)
5152 CASE_ND(XOR32rm)
5153 CASE_ND(XOR16rm)
5154 CASE_ND(XOR8rm)
5155 CASE_ND(OR64ri32)
5156 CASE_ND(OR32ri)
5157 CASE_ND(OR16ri)
5158 CASE_ND(OR8ri)
5159 CASE_ND(OR64rr)
5160 CASE_ND(OR32rr)
5161 CASE_ND(OR16rr)
5162 CASE_ND(OR8rr)
5163 CASE_ND(OR64rm)
5164 CASE_ND(OR32rm)
5165 CASE_ND(OR16rm)
5166 CASE_ND(OR8rm)
5167 case X86::ANDN32rr:
5168 case X86::ANDN32rm:
5169 case X86::ANDN64rr:
5170 case X86::ANDN64rm:
5171 case X86::BLSI32rr:
5172 case X86::BLSI32rm:
5173 case X86::BLSI64rr:
5174 case X86::BLSI64rm:
5175 case X86::BLSMSK32rr:
5176 case X86::BLSMSK32rm:
5177 case X86::BLSMSK64rr:
5178 case X86::BLSMSK64rm:
5179 case X86::BLSR32rr:
5180 case X86::BLSR32rm:
5181 case X86::BLSR64rr:
5182 case X86::BLSR64rm:
5183 case X86::BLCFILL32rr:
5184 case X86::BLCFILL32rm:
5185 case X86::BLCFILL64rr:
5186 case X86::BLCFILL64rm:
5187 case X86::BLCI32rr:
5188 case X86::BLCI32rm:
5189 case X86::BLCI64rr:
5190 case X86::BLCI64rm:
5191 case X86::BLCIC32rr:
5192 case X86::BLCIC32rm:
5193 case X86::BLCIC64rr:
5194 case X86::BLCIC64rm:
5195 case X86::BLCMSK32rr:
5196 case X86::BLCMSK32rm:
5197 case X86::BLCMSK64rr:
5198 case X86::BLCMSK64rm:
5199 case X86::BLCS32rr:
5200 case X86::BLCS32rm:
5201 case X86::BLCS64rr:
5202 case X86::BLCS64rm:
5203 case X86::BLSFILL32rr:
5204 case X86::BLSFILL32rm:
5205 case X86::BLSFILL64rr:
5206 case X86::BLSFILL64rm:
5207 case X86::BLSIC32rr:
5208 case X86::BLSIC32rm:
5209 case X86::BLSIC64rr:
5210 case X86::BLSIC64rm:
5211 case X86::BZHI32rr:
5212 case X86::BZHI32rm:
5213 case X86::BZHI64rr:
5214 case X86::BZHI64rm:
5215 case X86::T1MSKC32rr:
5216 case X86::T1MSKC32rm:
5217 case X86::T1MSKC64rr:
5218 case X86::T1MSKC64rm:
5219 case X86::TZMSK32rr:
5220 case X86::TZMSK32rm:
5221 case X86::TZMSK64rr:
5222 case X86::TZMSK64rm:
5223 // These instructions clear the overflow flag just like TEST.
5224 // FIXME: These are not the only instructions in this switch that clear the
5225 // overflow flag.
5226 ClearsOverflowFlag = true;
5227 return true;
5228 case X86::BEXTR32rr:
5229 case X86::BEXTR64rr:
5230 case X86::BEXTR32rm:
5231 case X86::BEXTR64rm:
5232 case X86::BEXTRI32ri:
5233 case X86::BEXTRI32mi:
5234 case X86::BEXTRI64ri:
5235 case X86::BEXTRI64mi:
5236 // BEXTR doesn't update the sign flag so we can't use it. It does clear
5237 // the overflow flag, but that's not useful without the sign flag.
5238 NoSignFlag = true;
5239 return true;
5240 }
5241}
5242
5243/// Check whether the use can be converted to remove a comparison against zero.
5244/// Returns the EFLAGS condition and the operand that we are comparing against zero.
5245static std::pair<X86::CondCode, unsigned> isUseDefConvertible(const MachineInstr &MI) {
5246 switch (MI.getOpcode()) {
5247 default:
5248 return std::make_pair(X86::COND_INVALID, ~0U);
5249 CASE_ND(NEG8r)
5250 CASE_ND(NEG16r)
5251 CASE_ND(NEG32r)
5252 CASE_ND(NEG64r)
5253 return std::make_pair(X86::COND_AE, 1U);
5254 case X86::LZCNT16rr:
5255 case X86::LZCNT32rr:
5256 case X86::LZCNT64rr:
5257 return std::make_pair(X86::COND_B, 1U);
5258 case X86::POPCNT16rr:
5259 case X86::POPCNT32rr:
5260 case X86::POPCNT64rr:
5261 return std::make_pair(X86::COND_E, 1U);
5262 case X86::TZCNT16rr:
5263 case X86::TZCNT32rr:
5264 case X86::TZCNT64rr:
5265 return std::make_pair(X86::COND_B, 1U);
5266 case X86::BSF16rr:
5267 case X86::BSF32rr:
5268 case X86::BSF64rr:
5269 case X86::BSR16rr:
5270 case X86::BSR32rr:
5271 case X86::BSR64rr:
5272 return std::make_pair(X86::COND_E, 2U);
5273 case X86::BLSI32rr:
5274 case X86::BLSI64rr:
5275 return std::make_pair(X86::COND_AE, 1U);
5276 case X86::BLSR32rr:
5277 case X86::BLSR64rr:
5278 case X86::BLSMSK32rr:
5279 case X86::BLSMSK64rr:
5280 return std::make_pair(X86::COND_B, 1U);
5281 // TODO: TBM instructions.
5282 }
5283}
5284
5285/// Check if there exists an earlier instruction that
5286/// operates on the same source operands and sets flags in the same way as
5287/// Compare; remove Compare if possible.
5289 Register SrcReg2, int64_t CmpMask,
5290 int64_t CmpValue,
5291 const MachineRegisterInfo *MRI) const {
5292 // Check whether we can replace SUB with CMP.
5293 switch (CmpInstr.getOpcode()) {
5294 default:
5295 break;
5296 CASE_ND(SUB64ri32)
5297 CASE_ND(SUB32ri)
5298 CASE_ND(SUB16ri)
5299 CASE_ND(SUB8ri)
5300 CASE_ND(SUB64rm)
5301 CASE_ND(SUB32rm)
5302 CASE_ND(SUB16rm)
5303 CASE_ND(SUB8rm)
5304 CASE_ND(SUB64rr)
5305 CASE_ND(SUB32rr)
5306 CASE_ND(SUB16rr)
5307 CASE_ND(SUB8rr) {
5308 if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg()))
5309 return false;
5310 // There is no use of the destination register, we can replace SUB with CMP.
5311 unsigned NewOpcode = 0;
5312#define FROM_TO(A, B) \
5313 CASE_ND(A) NewOpcode = X86::B; \
5314 break;
5315 switch (CmpInstr.getOpcode()) {
5316 default:
5317 llvm_unreachable("Unreachable!");
5318 FROM_TO(SUB64rm, CMP64rm)
5319 FROM_TO(SUB32rm, CMP32rm)
5320 FROM_TO(SUB16rm, CMP16rm)
5321 FROM_TO(SUB8rm, CMP8rm)
5322 FROM_TO(SUB64rr, CMP64rr)
5323 FROM_TO(SUB32rr, CMP32rr)
5324 FROM_TO(SUB16rr, CMP16rr)
5325 FROM_TO(SUB8rr, CMP8rr)
5326 FROM_TO(SUB64ri32, CMP64ri32)
5327 FROM_TO(SUB32ri, CMP32ri)
5328 FROM_TO(SUB16ri, CMP16ri)
5329 FROM_TO(SUB8ri, CMP8ri)
5330 }
5331#undef FROM_TO
5332 CmpInstr.setDesc(get(NewOpcode));
5333 CmpInstr.removeOperand(0);
5334 // Mutating this instruction invalidates any debug data associated with it.
5335 CmpInstr.dropDebugNumber();
5336 // Fall through to optimize Cmp if Cmp is CMPrr or CMPri.
5337 if (NewOpcode == X86::CMP64rm || NewOpcode == X86::CMP32rm ||
5338 NewOpcode == X86::CMP16rm || NewOpcode == X86::CMP8rm)
5339 return false;
5340 }
5341 }
5342
5343 // The following code tries to remove the comparison by re-using EFLAGS
5344 // from earlier instructions.
5345
5346 bool IsCmpZero = (CmpMask != 0 && CmpValue == 0);
5347
5348 // Transformation currently requires SSA values.
5349 if (SrcReg2.isPhysical())
5350 return false;
5351 MachineInstr *SrcRegDef = MRI->getVRegDef(SrcReg);
5352 assert(SrcRegDef && "Must have a definition (SSA)");
5353
5354 MachineInstr *MI = nullptr;
5355 MachineInstr *Sub = nullptr;
5356 MachineInstr *Movr0Inst = nullptr;
5358 bool NoSignFlag = false;
5359 bool ClearsOverflowFlag = false;
5360 bool ShouldUpdateCC = false;
5361 bool IsSwapped = false;
5362 bool HasNF = Subtarget.hasNF();
5363 unsigned OpNo = 0;
5365 int64_t ImmDelta = 0;
5366
5367 // Search backward from CmpInstr for the next instruction defining EFLAGS.
5369 MachineBasicBlock &CmpMBB = *CmpInstr.getParent();
5371 std::next(MachineBasicBlock::reverse_iterator(CmpInstr));
5372 for (MachineBasicBlock *MBB = &CmpMBB;;) {
5373 for (MachineInstr &Inst : make_range(From, MBB->rend())) {
5374 // Try to use EFLAGS from the instruction defining %SrcReg. Example:
5375 // %eax = addl ...
5376 // ... // EFLAGS not changed
5377 // testl %eax, %eax // <-- can be removed
5378 if (&Inst == SrcRegDef) {
5379 if (IsCmpZero &&
5380 isDefConvertible(Inst, NoSignFlag, ClearsOverflowFlag)) {
5381 MI = &Inst;
5382 break;
5383 }
5384
5385 // Look back for the following pattern, in which case the
5386 // test16rr/test64rr instruction could be erased.
5387 //
5388 // Example for test16rr:
5389 // %reg = and32ri %in_reg, 5
5390 // ... // EFLAGS not changed.
5391 // %src_reg = copy %reg.sub_16bit:gr32
5392 // test16rr %src_reg, %src_reg, implicit-def $eflags
5393 // Example for test64rr:
5394 // %reg = and32ri %in_reg, 5
5395 // ... // EFLAGS not changed.
5396 // %src_reg = subreg_to_reg 0, %reg, %subreg.sub_index
5397 // test64rr %src_reg, %src_reg, implicit-def $eflags
5398 MachineInstr *AndInstr = nullptr;
5399 if (IsCmpZero &&
5400 findRedundantFlagInstr(CmpInstr, Inst, MRI, &AndInstr, TRI,
5401 Subtarget, NoSignFlag, ClearsOverflowFlag)) {
5402 assert(AndInstr != nullptr && X86::isAND(AndInstr->getOpcode()));
5403 MI = AndInstr;
5404 break;
5405 }
5406 // Cannot find other candidates before definition of SrcReg.
5407 return false;
5408 }
5409
5410 if (Inst.modifiesRegister(X86::EFLAGS, TRI)) {
5411 // Try to use EFLAGS produced by an instruction reading %SrcReg.
5412 // Example:
5413 // %eax = ...
5414 // ...
5415 // popcntl %eax
5416 // ... // EFLAGS not changed
5417 // testl %eax, %eax // <-- can be removed
5418 if (IsCmpZero) {
5419 std::tie(NewCC, OpNo) = isUseDefConvertible(Inst);
5420 if (NewCC != X86::COND_INVALID && Inst.getOperand(OpNo).isReg() &&
5421 Inst.getOperand(OpNo).getReg() == SrcReg) {
5422 ShouldUpdateCC = true;
5423 MI = &Inst;
5424 break;
5425 }
5426 }
5427
5428 // Try to use EFLAGS from an instruction with similar flag results.
5429 // Example:
5430 // sub x, y or cmp x, y
5431 // ... // EFLAGS not changed
5432 // cmp x, y // <-- can be removed
5433 if (isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpMask, CmpValue,
5434 Inst, &IsSwapped, &ImmDelta)) {
5435 Sub = &Inst;
5436 break;
5437 }
5438
5439 // MOV32r0 is implemented with xor which clobbers condition code. It is
5440 // safe to move up, if the definition to EFLAGS is dead and earlier
5441 // instructions do not read or write EFLAGS.
5442 if (!Movr0Inst && Inst.getOpcode() == X86::MOV32r0 &&
5443 Inst.registerDefIsDead(X86::EFLAGS, TRI)) {
5444 Movr0Inst = &Inst;
5445 continue;
5446 }
5447
5448 // For the instructions are ADDrm/ADDmr with relocation, we'll skip the
5449 // optimization for replacing non-NF with NF. This is to keep backward
5450 // compatiblity with old version of linkers without APX relocation type
5451 // support on Linux OS.
5452 bool IsWithReloc = X86EnableAPXForRelocation
5453 ? false
5455
5456 // Try to replace non-NF with NF instructions.
5457 if (HasNF && Inst.registerDefIsDead(X86::EFLAGS, TRI) && !IsWithReloc) {
5458 unsigned NewOp = X86::getNFVariant(Inst.getOpcode());
5459 if (!NewOp)
5460 return false;
5461
5462 InstsToUpdate.push_back(std::make_pair(&Inst, NewOp));
5463 continue;
5464 }
5465
5466 // Cannot do anything for any other EFLAG changes.
5467 return false;
5468 }
5469 }
5470
5471 if (MI || Sub)
5472 break;
5473
5474 // Reached begin of basic block. Continue in predecessor if there is
5475 // exactly one.
5476 if (MBB->pred_size() != 1)
5477 return false;
5478 MBB = *MBB->pred_begin();
5479 From = MBB->rbegin();
5480 }
5481
5482 // Scan forward from the instruction after CmpInstr for uses of EFLAGS.
5483 // It is safe to remove CmpInstr if EFLAGS is redefined or killed.
5484 // If we are done with the basic block, we need to check whether EFLAGS is
5485 // live-out.
5486 bool FlagsMayLiveOut = true;
5488 MachineBasicBlock::iterator AfterCmpInstr =
5489 std::next(MachineBasicBlock::iterator(CmpInstr));
5490 for (MachineInstr &Instr : make_range(AfterCmpInstr, CmpMBB.end())) {
5491 bool ModifyEFLAGS = Instr.modifiesRegister(X86::EFLAGS, TRI);
5492 bool UseEFLAGS = Instr.readsRegister(X86::EFLAGS, TRI);
5493 // We should check the usage if this instruction uses and updates EFLAGS.
5494 if (!UseEFLAGS && ModifyEFLAGS) {
5495 // It is safe to remove CmpInstr if EFLAGS is updated again.
5496 FlagsMayLiveOut = false;
5497 break;
5498 }
5499 if (!UseEFLAGS && !ModifyEFLAGS)
5500 continue;
5501
5502 // EFLAGS is used by this instruction.
5503 X86::CondCode OldCC = X86::getCondFromMI(Instr);
5504 if ((MI || IsSwapped || ImmDelta != 0) && OldCC == X86::COND_INVALID)
5505 return false;
5506
5507 X86::CondCode ReplacementCC = X86::COND_INVALID;
5508 if (MI) {
5509 switch (OldCC) {
5510 default:
5511 break;
5512 case X86::COND_A:
5513 case X86::COND_AE:
5514 case X86::COND_B:
5515 case X86::COND_BE:
5516 // CF is used, we can't perform this optimization.
5517 return false;
5518 case X86::COND_G:
5519 case X86::COND_GE:
5520 case X86::COND_L:
5521 case X86::COND_LE:
5522 // If SF is used, but the instruction doesn't update the SF, then we
5523 // can't do the optimization.
5524 if (NoSignFlag)
5525 return false;
5526 [[fallthrough]];
5527 case X86::COND_O:
5528 case X86::COND_NO:
5529 // If OF is used, the instruction needs to clear it like CmpZero does.
5530 if (!ClearsOverflowFlag)
5531 return false;
5532 break;
5533 case X86::COND_S:
5534 case X86::COND_NS:
5535 // If SF is used, but the instruction doesn't update the SF, then we
5536 // can't do the optimization.
5537 if (NoSignFlag)
5538 return false;
5539 break;
5540 }
5541
5542 // If we're updating the condition code check if we have to reverse the
5543 // condition.
5544 if (ShouldUpdateCC)
5545 switch (OldCC) {
5546 default:
5547 return false;
5548 case X86::COND_E:
5549 ReplacementCC = NewCC;
5550 break;
5551 case X86::COND_NE:
5552 ReplacementCC = GetOppositeBranchCondition(NewCC);
5553 break;
5554 }
5555 } else if (IsSwapped) {
5556 // If we have SUB(r1, r2) and CMP(r2, r1), the condition code needs
5557 // to be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc.
5558 // We swap the condition code and synthesize the new opcode.
5559 ReplacementCC = getSwappedCondition(OldCC);
5560 if (ReplacementCC == X86::COND_INVALID)
5561 return false;
5562 ShouldUpdateCC = true;
5563 } else if (ImmDelta != 0) {
5564 unsigned BitWidth = TRI->getRegSizeInBits(*MRI->getRegClass(SrcReg));
5565 // Shift amount for min/max constants to adjust for 8/16/32 instruction
5566 // sizes.
5567 switch (OldCC) {
5568 case X86::COND_L: // x <s (C + 1) --> x <=s C
5569 if (ImmDelta != 1 || APInt::getSignedMinValue(BitWidth) == CmpValue)
5570 return false;
5571 ReplacementCC = X86::COND_LE;
5572 break;
5573 case X86::COND_B: // x <u (C + 1) --> x <=u C
5574 if (ImmDelta != 1 || CmpValue == 0)
5575 return false;
5576 ReplacementCC = X86::COND_BE;
5577 break;
5578 case X86::COND_GE: // x >=s (C + 1) --> x >s C
5579 if (ImmDelta != 1 || APInt::getSignedMinValue(BitWidth) == CmpValue)
5580 return false;
5581 ReplacementCC = X86::COND_G;
5582 break;
5583 case X86::COND_AE: // x >=u (C + 1) --> x >u C
5584 if (ImmDelta != 1 || CmpValue == 0)
5585 return false;
5586 ReplacementCC = X86::COND_A;
5587 break;
5588 case X86::COND_G: // x >s (C - 1) --> x >=s C
5589 if (ImmDelta != -1 || APInt::getSignedMaxValue(BitWidth) == CmpValue)
5590 return false;
5591 ReplacementCC = X86::COND_GE;
5592 break;
5593 case X86::COND_A: // x >u (C - 1) --> x >=u C
5594 if (ImmDelta != -1 || APInt::getMaxValue(BitWidth) == CmpValue)
5595 return false;
5596 ReplacementCC = X86::COND_AE;
5597 break;
5598 case X86::COND_LE: // x <=s (C - 1) --> x <s C
5599 if (ImmDelta != -1 || APInt::getSignedMaxValue(BitWidth) == CmpValue)
5600 return false;
5601 ReplacementCC = X86::COND_L;
5602 break;
5603 case X86::COND_BE: // x <=u (C - 1) --> x <u C
5604 if (ImmDelta != -1 || APInt::getMaxValue(BitWidth) == CmpValue)
5605 return false;
5606 ReplacementCC = X86::COND_B;
5607 break;
5608 default:
5609 return false;
5610 }
5611 ShouldUpdateCC = true;
5612 }
5613
5614 if (ShouldUpdateCC && ReplacementCC != OldCC) {
5615 // Push the MachineInstr to OpsToUpdate.
5616 // If it is safe to remove CmpInstr, the condition code of these
5617 // instructions will be modified.
5618 OpsToUpdate.push_back(std::make_pair(&Instr, ReplacementCC));
5619 }
5620 if (ModifyEFLAGS || Instr.killsRegister(X86::EFLAGS, TRI)) {
5621 // It is safe to remove CmpInstr if EFLAGS is updated again or killed.
5622 FlagsMayLiveOut = false;
5623 break;
5624 }
5625 }
5626
5627 // If we have to update users but EFLAGS is live-out abort, since we cannot
5628 // easily find all of the users.
5629 if ((MI != nullptr || ShouldUpdateCC) && FlagsMayLiveOut) {
5630 for (MachineBasicBlock *Successor : CmpMBB.successors())
5631 if (Successor->isLiveIn(X86::EFLAGS))
5632 return false;
5633 }
5634
5635 // The instruction to be updated is either Sub or MI.
5636 assert((MI == nullptr || Sub == nullptr) && "Should not have Sub and MI set");
5637 Sub = MI != nullptr ? MI : Sub;
5638 MachineBasicBlock *SubBB = Sub->getParent();
5639 // Move Movr0Inst to the appropriate place before Sub.
5640 if (Movr0Inst) {
5641 // Only move within the same block so we don't accidentally move to a
5642 // block with higher execution frequency.
5643 if (&CmpMBB != SubBB)
5644 return false;
5645 // Look backwards until we find a def that doesn't use the current EFLAGS.
5647 InsertE = Sub->getParent()->rend();
5648 for (; InsertI != InsertE; ++InsertI) {
5649 MachineInstr *Instr = &*InsertI;
5650 if (!Instr->readsRegister(X86::EFLAGS, TRI) &&
5651 Instr->modifiesRegister(X86::EFLAGS, TRI)) {
5652 Movr0Inst->getParent()->remove(Movr0Inst);
5653 Instr->getParent()->insert(MachineBasicBlock::iterator(Instr),
5654 Movr0Inst);
5655 break;
5656 }
5657 }
5658 if (InsertI == InsertE)
5659 return false;
5660 }
5661
5662 // Replace non-NF with NF instructions.
5663 for (auto &Inst : InstsToUpdate) {
5664 Inst.first->setDesc(get(Inst.second));
5665 Inst.first->removeOperand(
5666 Inst.first->findRegisterDefOperandIdx(X86::EFLAGS, /*TRI=*/nullptr));
5667 }
5668
5669 // Make sure Sub instruction defines EFLAGS and mark the def live.
5670 MachineOperand *FlagDef =
5671 Sub->findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr);
5672 assert(FlagDef && "Unable to locate a def EFLAGS operand");
5673 FlagDef->setIsDead(false);
5674
5675 CmpInstr.eraseFromParent();
5676
5677 // Modify the condition code of instructions in OpsToUpdate.
5678 for (auto &Op : OpsToUpdate) {
5679 Op.first->getOperand(Op.first->getDesc().getNumOperands() - 1)
5680 .setImm(Op.second);
5681 }
5682 // Add EFLAGS to block live-ins between CmpBB and block of flags producer.
5683 for (MachineBasicBlock *MBB = &CmpMBB; MBB != SubBB;
5684 MBB = *MBB->pred_begin()) {
5685 assert(MBB->pred_size() == 1 && "Expected exactly one predecessor");
5686 if (!MBB->isLiveIn(X86::EFLAGS))
5687 MBB->addLiveIn(X86::EFLAGS);
5688 }
5689 return true;
5690}
5691
5692/// \returns true if the instruction can be changed to COPY when imm is 0.
5693static bool canConvert2Copy(unsigned Opc) {
5694 switch (Opc) {
5695 default:
5696 return false;
5697 CASE_ND(ADD64ri32)
5698 CASE_ND(SUB64ri32)
5699 CASE_ND(OR64ri32)
5700 CASE_ND(XOR64ri32)
5701 CASE_ND(ADD32ri)
5702 CASE_ND(SUB32ri)
5703 CASE_ND(OR32ri)
5704 CASE_ND(XOR32ri)
5705 return true;
5706 }
5707}
5708
5709/// Convert an ALUrr opcode to corresponding ALUri opcode. Such as
5710/// ADD32rr ==> ADD32ri
5711static unsigned convertALUrr2ALUri(unsigned Opc) {
5712 switch (Opc) {
5713 default:
5714 return 0;
5715#define FROM_TO(FROM, TO) \
5716 case X86::FROM: \
5717 return X86::TO; \
5718 case X86::FROM##_ND: \
5719 return X86::TO##_ND;
5720 FROM_TO(ADD64rr, ADD64ri32)
5721 FROM_TO(ADC64rr, ADC64ri32)
5722 FROM_TO(SUB64rr, SUB64ri32)
5723 FROM_TO(SBB64rr, SBB64ri32)
5724 FROM_TO(AND64rr, AND64ri32)
5725 FROM_TO(OR64rr, OR64ri32)
5726 FROM_TO(XOR64rr, XOR64ri32)
5727 FROM_TO(SHR64rCL, SHR64ri)
5728 FROM_TO(SHL64rCL, SHL64ri)
5729 FROM_TO(SAR64rCL, SAR64ri)
5730 FROM_TO(ROL64rCL, ROL64ri)
5731 FROM_TO(ROR64rCL, ROR64ri)
5732 FROM_TO(RCL64rCL, RCL64ri)
5733 FROM_TO(RCR64rCL, RCR64ri)
5734 FROM_TO(ADD32rr, ADD32ri)
5735 FROM_TO(ADC32rr, ADC32ri)
5736 FROM_TO(SUB32rr, SUB32ri)
5737 FROM_TO(SBB32rr, SBB32ri)
5738 FROM_TO(AND32rr, AND32ri)
5739 FROM_TO(OR32rr, OR32ri)
5740 FROM_TO(XOR32rr, XOR32ri)
5741 FROM_TO(SHR32rCL, SHR32ri)
5742 FROM_TO(SHL32rCL, SHL32ri)
5743 FROM_TO(SAR32rCL, SAR32ri)
5744 FROM_TO(ROL32rCL, ROL32ri)
5745 FROM_TO(ROR32rCL, ROR32ri)
5746 FROM_TO(RCL32rCL, RCL32ri)
5747 FROM_TO(RCR32rCL, RCR32ri)
5748#undef FROM_TO
5749#define FROM_TO(FROM, TO) \
5750 case X86::FROM: \
5751 return X86::TO;
5752 FROM_TO(TEST64rr, TEST64ri32)
5753 FROM_TO(CTEST64rr, CTEST64ri32)
5754 FROM_TO(CMP64rr, CMP64ri32)
5755 FROM_TO(CCMP64rr, CCMP64ri32)
5756 FROM_TO(TEST32rr, TEST32ri)
5757 FROM_TO(CTEST32rr, CTEST32ri)
5758 FROM_TO(CMP32rr, CMP32ri)
5759 FROM_TO(CCMP32rr, CCMP32ri)
5760#undef FROM_TO
5761 }
5762}
5763
5764/// Reg is assigned ImmVal in DefMI, and is used in UseMI.
5765/// If MakeChange is true, this function tries to replace Reg by ImmVal in
5766/// UseMI. If MakeChange is false, just check if folding is possible.
5767//
5768/// \returns true if folding is successful or possible.
5769bool X86InstrInfo::foldImmediateImpl(MachineInstr &UseMI, MachineInstr *DefMI,
5770 Register Reg, int64_t ImmVal,
5772 bool MakeChange) const {
5773 bool Modified = false;
5774
5775 // 64 bit operations accept sign extended 32 bit immediates.
5776 // 32 bit operations accept all 32 bit immediates, so we don't need to check
5777 // them.
5778 const TargetRegisterClass *RC = nullptr;
5779 if (Reg.isVirtual())
5780 RC = MRI->getRegClass(Reg);
5781 if ((Reg.isPhysical() && X86::GR64RegClass.contains(Reg)) ||
5782 (Reg.isVirtual() && X86::GR64RegClass.hasSubClassEq(RC))) {
5783 if (!isInt<32>(ImmVal))
5784 return false;
5785 }
5786
5787 if (UseMI.findRegisterUseOperand(Reg, /*TRI=*/nullptr)->getSubReg())
5788 return false;
5789 // Immediate has larger code size than register. So avoid folding the
5790 // immediate if it has more than 1 use and we are optimizing for size.
5791 if (UseMI.getMF()->getFunction().hasOptSize() && Reg.isVirtual() &&
5792 !MRI->hasOneNonDBGUse(Reg))
5793 return false;
5794
5795 unsigned Opc = UseMI.getOpcode();
5796 unsigned NewOpc;
5797 if (Opc == TargetOpcode::COPY) {
5798 Register ToReg = UseMI.getOperand(0).getReg();
5799 const TargetRegisterClass *RC = nullptr;
5800 if (ToReg.isVirtual())
5801 RC = MRI->getRegClass(ToReg);
5802 bool GR32Reg = (ToReg.isVirtual() && X86::GR32RegClass.hasSubClassEq(RC)) ||
5803 (ToReg.isPhysical() && X86::GR32RegClass.contains(ToReg));
5804 bool GR64Reg = (ToReg.isVirtual() && X86::GR64RegClass.hasSubClassEq(RC)) ||
5805 (ToReg.isPhysical() && X86::GR64RegClass.contains(ToReg));
5806 bool GR8Reg = (ToReg.isVirtual() && X86::GR8RegClass.hasSubClassEq(RC)) ||
5807 (ToReg.isPhysical() && X86::GR8RegClass.contains(ToReg));
5808
5809 if (ImmVal == 0) {
5810 // We have MOV32r0 only.
5811 if (!GR32Reg)
5812 return false;
5813 }
5814
5815 if (GR64Reg) {
5816 if (isUInt<32>(ImmVal))
5817 NewOpc = X86::MOV32ri64;
5818 else
5819 NewOpc = X86::MOV64ri;
5820 } else if (GR32Reg) {
5821 NewOpc = X86::MOV32ri;
5822 if (ImmVal == 0) {
5823 // MOV32r0 clobbers EFLAGS.
5824 const TargetRegisterInfo *TRI = &getRegisterInfo();
5825 if (UseMI.getParent()->computeRegisterLiveness(
5826 TRI, X86::EFLAGS, UseMI) != MachineBasicBlock::LQR_Dead)
5827 return false;
5828
5829 // MOV32r0 is different than other cases because it doesn't encode the
5830 // immediate in the instruction. So we directly modify it here.
5831 if (!MakeChange)
5832 return true;
5833 UseMI.setDesc(get(X86::MOV32r0));
5834 UseMI.removeOperand(
5835 UseMI.findRegisterUseOperandIdx(Reg, /*TRI=*/nullptr));
5836 UseMI.addOperand(MachineOperand::CreateReg(X86::EFLAGS, /*isDef=*/true,
5837 /*isImp=*/true,
5838 /*isKill=*/false,
5839 /*isDead=*/true));
5840 Modified = true;
5841 }
5842 } else if (GR8Reg)
5843 NewOpc = X86::MOV8ri;
5844 else
5845 return false;
5846 } else
5847 NewOpc = convertALUrr2ALUri(Opc);
5848
5849 if (!NewOpc)
5850 return false;
5851
5852 // For SUB instructions the immediate can only be the second source operand.
5853 if ((NewOpc == X86::SUB64ri32 || NewOpc == X86::SUB32ri ||
5854 NewOpc == X86::SBB64ri32 || NewOpc == X86::SBB32ri ||
5855 NewOpc == X86::SUB64ri32_ND || NewOpc == X86::SUB32ri_ND ||
5856 NewOpc == X86::SBB64ri32_ND || NewOpc == X86::SBB32ri_ND) &&
5857 UseMI.findRegisterUseOperandIdx(Reg, /*TRI=*/nullptr) != 2)
5858 return false;
5859 // For CMP instructions the immediate can only be at index 1.
5860 if (((NewOpc == X86::CMP64ri32 || NewOpc == X86::CMP32ri) ||
5861 (NewOpc == X86::CCMP64ri32 || NewOpc == X86::CCMP32ri)) &&
5862 UseMI.findRegisterUseOperandIdx(Reg, /*TRI=*/nullptr) != 1)
5863 return false;
5864
5865 using namespace X86;
5866 if (isSHL(Opc) || isSHR(Opc) || isSAR(Opc) || isROL(Opc) || isROR(Opc) ||
5867 isRCL(Opc) || isRCR(Opc)) {
5868 unsigned RegIdx = UseMI.findRegisterUseOperandIdx(Reg, /*TRI=*/nullptr);
5869 if (RegIdx < 2)
5870 return false;
5871 if (!isInt<8>(ImmVal))
5872 return false;
5873 assert(Reg == X86::CL);
5874
5875 if (!MakeChange)
5876 return true;
5877 UseMI.setDesc(get(NewOpc));
5878 UseMI.removeOperand(RegIdx);
5879 UseMI.addOperand(MachineOperand::CreateImm(ImmVal));
5880 // Reg is physical register $cl, so we don't know if DefMI is dead through
5881 // MRI. Let the caller handle it, or pass dead-mi-elimination can delete
5882 // the dead physical register define instruction.
5883 return true;
5884 }
5885
5886 if (!MakeChange)
5887 return true;
5888
5889 if (!Modified) {
5890 // Modify the instruction.
5891 if (ImmVal == 0 && canConvert2Copy(NewOpc) &&
5892 UseMI.registerDefIsDead(X86::EFLAGS, /*TRI=*/nullptr)) {
5893 // %100 = add %101, 0
5894 // ==>
5895 // %100 = COPY %101
5896 UseMI.setDesc(get(TargetOpcode::COPY));
5897 UseMI.removeOperand(
5898 UseMI.findRegisterUseOperandIdx(Reg, /*TRI=*/nullptr));
5899 UseMI.removeOperand(
5900 UseMI.findRegisterDefOperandIdx(X86::EFLAGS, /*TRI=*/nullptr));
5901 UseMI.untieRegOperand(0);
5904 } else {
5905 unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex;
5906 unsigned ImmOpNum = 2;
5907 if (!UseMI.getOperand(0).isDef()) {
5908 Op1 = 0; // TEST, CMP, CTEST, CCMP
5909 ImmOpNum = 1;
5910 }
5911 if (Opc == TargetOpcode::COPY)
5912 ImmOpNum = 1;
5913 if (findCommutedOpIndices(UseMI, Op1, Op2) &&
5914 UseMI.getOperand(Op1).getReg() == Reg)
5915 commuteInstruction(UseMI);
5916
5917 assert(UseMI.getOperand(ImmOpNum).getReg() == Reg);
5918 UseMI.setDesc(get(NewOpc));
5919 UseMI.getOperand(ImmOpNum).ChangeToImmediate(ImmVal);
5920 }
5921 }
5922
5923 if (Reg.isVirtual() && MRI->use_nodbg_empty(Reg))
5925
5926 return true;
5927}
5928
5929/// foldImmediate - 'Reg' is known to be defined by a move immediate
5930/// instruction, try to fold the immediate into the use instruction.
5932 Register Reg, MachineRegisterInfo *MRI) const {
5933 int64_t ImmVal;
5934 if (!getConstValDefinedInReg(DefMI, Reg, ImmVal))
5935 return false;
5936
5937 return foldImmediateImpl(UseMI, &DefMI, Reg, ImmVal, MRI, true);
5938}
5939
5940/// Expand a single-def pseudo instruction to a two-addr
5941/// instruction with two undef reads of the register being defined.
5942/// This is used for mapping:
5943/// %xmm4 = V_SET0
5944/// to:
5945/// %xmm4 = PXORrr undef %xmm4, undef %xmm4
5946///
5948 const MCInstrDesc &Desc) {
5949 assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction.");
5950 Register Reg = MIB.getReg(0);
5951 MIB->setDesc(Desc);
5952
5953 // MachineInstr::addOperand() will insert explicit operands before any
5954 // implicit operands.
5956 // But we don't trust that.
5957 assert(MIB.getReg(1) == Reg && MIB.getReg(2) == Reg && "Misplaced operand");
5958 return true;
5959}
5960
5961/// Expand a single-def pseudo instruction to a two-addr
5962/// instruction with two %k0 reads.
5963/// This is used for mapping:
5964/// %k4 = K_SET1
5965/// to:
5966/// %k4 = KXNORrr %k0, %k0
5968 Register Reg) {
5969 assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction.");
5970 MIB->setDesc(Desc);
5972 return true;
5973}
5974
5976 bool MinusOne) {
5977 MachineBasicBlock &MBB = *MIB->getParent();
5978 const DebugLoc &DL = MIB->getDebugLoc();
5979 Register Reg = MIB.getReg(0);
5980
5981 // Insert the XOR.
5982 BuildMI(MBB, MIB.getInstr(), DL, TII.get(X86::XOR32rr), Reg)
5985
5986 // Turn the pseudo into an INC or DEC.
5987 MIB->setDesc(TII.get(MinusOne ? X86::DEC32r : X86::INC32r));
5988 MIB.addReg(Reg);
5989
5990 return true;
5991}
5992
5994 const TargetInstrInfo &TII,
5995 const X86Subtarget &Subtarget) {
5996 MachineBasicBlock &MBB = *MIB->getParent();
5997 const DebugLoc &DL = MIB->getDebugLoc();
5998 int64_t Imm = MIB->getOperand(1).getImm();
5999 assert(Imm != 0 && "Using push/pop for 0 is not efficient.");
6001
6002 int StackAdjustment;
6003
6004 if (Subtarget.is64Bit()) {
6005 assert(MIB->getOpcode() == X86::MOV64ImmSExti8 ||
6006 MIB->getOpcode() == X86::MOV32ImmSExti8);
6007
6008 // Can't use push/pop lowering if the function might write to the red zone.
6009 X86MachineFunctionInfo *X86FI =
6010 MBB.getParent()->getInfo<X86MachineFunctionInfo>();
6011 if (X86FI->getUsesRedZone()) {
6012 MIB->setDesc(TII.get(MIB->getOpcode() == X86::MOV32ImmSExti8
6013 ? X86::MOV32ri
6014 : X86::MOV64ri));
6015 return true;
6016 }
6017
6018 // 64-bit mode doesn't have 32-bit push/pop, so use 64-bit operations and
6019 // widen the register if necessary.
6020 StackAdjustment = 8;
6021 BuildMI(MBB, I, DL, TII.get(X86::PUSH64i32)).addImm(Imm);
6022 MIB->setDesc(TII.get(X86::POP64r));
6023 MIB->getOperand(0).setReg(getX86SubSuperRegister(MIB.getReg(0), 64));
6024 } else {
6025 assert(MIB->getOpcode() == X86::MOV32ImmSExti8);
6026 StackAdjustment = 4;
6027 BuildMI(MBB, I, DL, TII.get(X86::PUSH32i)).addImm(Imm);
6028 MIB->setDesc(TII.get(X86::POP32r));
6029 }
6030 MIB->removeOperand(1);
6031 MIB->addImplicitDefUseOperands(*MBB.getParent());
6032
6033 // Build CFI if necessary.
6034 MachineFunction &MF = *MBB.getParent();
6035 const X86FrameLowering *TFL = Subtarget.getFrameLowering();
6036 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
6037 bool NeedsDwarfCFI = !IsWin64Prologue && MF.needsFrameMoves();
6038 bool EmitCFI = !TFL->hasFP(MF) && NeedsDwarfCFI;
6039 if (EmitCFI) {
6040 TFL->BuildCFI(
6041 MBB, I, DL,
6042 MCCFIInstruction::createAdjustCfaOffset(nullptr, StackAdjustment));
6043 TFL->BuildCFI(
6044 MBB, std::next(I), DL,
6045 MCCFIInstruction::createAdjustCfaOffset(nullptr, -StackAdjustment));
6046 }
6047
6048 return true;
6049}
6050
6051// LoadStackGuard has so far only been implemented for 64-bit MachO. Different
6052// code sequence is needed for other targets.
6054 const TargetInstrInfo &TII) {
6055 MachineBasicBlock &MBB = *MIB->getParent();
6056 const DebugLoc &DL = MIB->getDebugLoc();
6057 Register Reg = MIB.getReg(0);
6058 const GlobalValue *GV =
6059 cast<GlobalValue>((*MIB->memoperands_begin())->getValue());
6060 auto Flags = MachineMemOperand::MOLoad |
6063 MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand(
6064 MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 8, Align(8));
6066
6067 BuildMI(MBB, I, DL, TII.get(X86::MOV64rm), Reg)
6068 .addReg(X86::RIP)
6069 .addImm(1)
6070 .addReg(0)
6072 .addReg(0)
6073 .addMemOperand(MMO);
6074 MIB->setDebugLoc(DL);
6075 MIB->setDesc(TII.get(X86::MOV64rm));
6077}
6078
6080 MachineBasicBlock &MBB = *MIB->getParent();
6081 MachineFunction &MF = *MBB.getParent();
6082 const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
6083 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
6084 unsigned XorOp =
6085 MIB->getOpcode() == X86::XOR64_FP ? X86::XOR64rr : X86::XOR32rr;
6086 MIB->setDesc(TII.get(XorOp));
6087 MIB.addReg(TRI->getFrameRegister(MF), RegState::Undef);
6088 return true;
6089}
6090
6091// This is used to handle spills for 128/256-bit registers when we have AVX512,
6092// but not VLX. If it uses an extended register we need to use an instruction
6093// that loads the lower 128/256-bit, but is available with only AVX512F.
6095 const TargetRegisterInfo *TRI,
6096 const MCInstrDesc &LoadDesc,
6097 const MCInstrDesc &BroadcastDesc, unsigned SubIdx) {
6098 Register DestReg = MIB.getReg(0);
6099 // Check if DestReg is XMM16-31 or YMM16-31.
6100 if (TRI->getEncodingValue(DestReg) < 16) {
6101 // We can use a normal VEX encoded load.
6102 MIB->setDesc(LoadDesc);
6103 } else {
6104 // Use a 128/256-bit VBROADCAST instruction.
6105 MIB->setDesc(BroadcastDesc);
6106 // Change the destination to a 512-bit register.
6107 DestReg = TRI->getMatchingSuperReg(DestReg, SubIdx, &X86::VR512RegClass);
6108 MIB->getOperand(0).setReg(DestReg);
6109 }
6110 return true;
6111}
6112
6113// This is used to handle spills for 128/256-bit registers when we have AVX512,
6114// but not VLX. If it uses an extended register we need to use an instruction
6115// that stores the lower 128/256-bit, but is available with only AVX512F.
6117 const TargetRegisterInfo *TRI,
6118 const MCInstrDesc &StoreDesc,
6119 const MCInstrDesc &ExtractDesc, unsigned SubIdx) {
6120 Register SrcReg = MIB.getReg(X86::AddrNumOperands);
6121 // Check if DestReg is XMM16-31 or YMM16-31.
6122 if (TRI->getEncodingValue(SrcReg) < 16) {
6123 // We can use a normal VEX encoded store.
6124 MIB->setDesc(StoreDesc);
6125 } else {
6126 // Use a VEXTRACTF instruction.
6127 MIB->setDesc(ExtractDesc);
6128 // Change the destination to a 512-bit register.
6129 SrcReg = TRI->getMatchingSuperReg(SrcReg, SubIdx, &X86::VR512RegClass);
6131 MIB.addImm(0x0); // Append immediate to extract from the lower bits.
6132 }
6133
6134 return true;
6135}
6136
6138 MIB->setDesc(Desc);
6139 int64_t ShiftAmt = MIB->getOperand(2).getImm();
6140 // Temporarily remove the immediate so we can add another source register.
6141 MIB->removeOperand(2);
6142 // Add the register. Don't copy the kill flag if there is one.
6143 MIB.addReg(MIB.getReg(1), getUndefRegState(MIB->getOperand(1).isUndef()));
6144 // Add back the immediate.
6145 MIB.addImm(ShiftAmt);
6146 return true;
6147}
6148
6150 const TargetInstrInfo &TII, bool HasAVX) {
6151 unsigned NewOpc;
6152 if (MI.getOpcode() == X86::MOVSHPrm) {
6153 NewOpc = HasAVX ? X86::VMOVSSrm : X86::MOVSSrm;
6154 Register Reg = MI.getOperand(0).getReg();
6155 if (Reg > X86::XMM15)
6156 NewOpc = X86::VMOVSSZrm;
6157 } else {
6158 NewOpc = HasAVX ? X86::VMOVSSmr : X86::MOVSSmr;
6159 Register Reg = MI.getOperand(5).getReg();
6160 if (Reg > X86::XMM15)
6161 NewOpc = X86::VMOVSSZmr;
6162 }
6163
6164 MIB->setDesc(TII.get(NewOpc));
6165 return true;
6166}
6167
6169 bool HasAVX = Subtarget.hasAVX();
6170 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
6171 switch (MI.getOpcode()) {
6172 case X86::MOV32r0:
6173 return Expand2AddrUndef(MIB, get(X86::XOR32rr));
6174 case X86::MOV32r1:
6175 return expandMOV32r1(MIB, *this, /*MinusOne=*/false);
6176 case X86::MOV32r_1:
6177 return expandMOV32r1(MIB, *this, /*MinusOne=*/true);
6178 case X86::MOV32ImmSExti8:
6179 case X86::MOV64ImmSExti8:
6180 return ExpandMOVImmSExti8(MIB, *this, Subtarget);
6181 case X86::SETB_C32r:
6182 return Expand2AddrUndef(MIB, get(X86::SBB32rr));
6183 case X86::SETB_C64r:
6184 return Expand2AddrUndef(MIB, get(X86::SBB64rr));
6185 case X86::MMX_SET0:
6186 return Expand2AddrUndef(MIB, get(X86::MMX_PXORrr));
6187 case X86::V_SET0:
6188 case X86::FsFLD0SS:
6189 case X86::FsFLD0SD:
6190 case X86::FsFLD0SH:
6191 case X86::FsFLD0F128:
6192 return Expand2AddrUndef(MIB, get(HasAVX ? X86::VXORPSrr : X86::XORPSrr));
6193 case X86::AVX_SET0: {
6194 assert(HasAVX && "AVX not supported");
6196 Register SrcReg = MIB.getReg(0);
6197 Register XReg = TRI->getSubReg(SrcReg, X86::sub_xmm);
6198 MIB->getOperand(0).setReg(XReg);
6199 Expand2AddrUndef(MIB, get(X86::VXORPSrr));
6200 MIB.addReg(SrcReg, RegState::ImplicitDefine);
6201 return true;
6202 }
6203 case X86::AVX512_128_SET0:
6204 case X86::AVX512_FsFLD0SH:
6205 case X86::AVX512_FsFLD0SS:
6206 case X86::AVX512_FsFLD0SD:
6207 case X86::AVX512_FsFLD0F128: {
6208 bool HasVLX = Subtarget.hasVLX();
6209 Register SrcReg = MIB.getReg(0);
6211 if (HasVLX || TRI->getEncodingValue(SrcReg) < 16)
6212 return Expand2AddrUndef(MIB,
6213 get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr));
6214 // Extended register without VLX. Use a larger XOR.
6215 SrcReg =
6216 TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, &X86::VR512RegClass);
6217 MIB->getOperand(0).setReg(SrcReg);
6218 return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
6219 }
6220 case X86::AVX512_256_SET0:
6221 case X86::AVX512_512_SET0: {
6222 bool HasVLX = Subtarget.hasVLX();
6223 Register SrcReg = MIB.getReg(0);
6225 if (HasVLX || TRI->getEncodingValue(SrcReg) < 16) {
6226 Register XReg = TRI->getSubReg(SrcReg, X86::sub_xmm);
6227 MIB->getOperand(0).setReg(XReg);
6228 Expand2AddrUndef(MIB, get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr));
6229 MIB.addReg(SrcReg, RegState::ImplicitDefine);
6230 return true;
6231 }
6232 if (MI.getOpcode() == X86::AVX512_256_SET0) {
6233 // No VLX so we must reference a zmm.
6234 MCRegister ZReg =
6235 TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm, &X86::VR512RegClass);
6236 MIB->getOperand(0).setReg(ZReg);
6237 }
6238 return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
6239 }
6240 case X86::MOVSHPmr:
6241 case X86::MOVSHPrm:
6242 return expandMOVSHP(MIB, MI, *this, Subtarget.hasAVX());
6243 case X86::V_SETALLONES:
6244 return Expand2AddrUndef(MIB,
6245 get(HasAVX ? X86::VPCMPEQDrr : X86::PCMPEQDrr));
6246 case X86::AVX2_SETALLONES:
6247 return Expand2AddrUndef(MIB, get(X86::VPCMPEQDYrr));
6248 case X86::AVX1_SETALLONES: {
6249 Register Reg = MIB.getReg(0);
6250 // VCMPPSYrri with an immediate 0xf should produce VCMPTRUEPS.
6251 MIB->setDesc(get(X86::VCMPPSYrri));
6252 MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xf);
6253 return true;
6254 }
6255 case X86::AVX512_512_SETALLONES: {
6256 Register Reg = MIB.getReg(0);
6257 MIB->setDesc(get(X86::VPTERNLOGDZrri));
6258 // VPTERNLOGD needs 3 register inputs and an immediate.
6259 // 0xff will return 1s for any input.
6260 MIB.addReg(Reg, RegState::Undef)
6261 .addReg(Reg, RegState::Undef)
6262 .addReg(Reg, RegState::Undef)
6263 .addImm(0xff);
6264 return true;
6265 }
6266 case X86::AVX512_512_SEXT_MASK_32:
6267 case X86::AVX512_512_SEXT_MASK_64: {
6268 Register Reg = MIB.getReg(0);
6269 Register MaskReg = MIB.getReg(1);
6270 unsigned MaskState = getRegState(MIB->getOperand(1));
6271 unsigned Opc = (MI.getOpcode() == X86::AVX512_512_SEXT_MASK_64)
6272 ? X86::VPTERNLOGQZrrikz
6273 : X86::VPTERNLOGDZrrikz;
6274 MI.removeOperand(1);
6275 MIB->setDesc(get(Opc));
6276 // VPTERNLOG needs 3 register inputs and an immediate.
6277 // 0xff will return 1s for any input.
6278 MIB.addReg(Reg, RegState::Undef)
6279 .addReg(MaskReg, MaskState)
6280 .addReg(Reg, RegState::Undef)
6281 .addReg(Reg, RegState::Undef)
6282 .addImm(0xff);
6283 return true;
6284 }
6285 case X86::VMOVAPSZ128rm_NOVLX:
6286 return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSrm),
6287 get(X86::VBROADCASTF32X4Zrm), X86::sub_xmm);
6288 case X86::VMOVUPSZ128rm_NOVLX:
6289 return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSrm),
6290 get(X86::VBROADCASTF32X4Zrm), X86::sub_xmm);
6291 case X86::VMOVAPSZ256rm_NOVLX:
6292 return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSYrm),
6293 get(X86::VBROADCASTF64X4Zrm), X86::sub_ymm);
6294 case X86::VMOVUPSZ256rm_NOVLX:
6295 return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSYrm),
6296 get(X86::VBROADCASTF64X4Zrm), X86::sub_ymm);
6297 case X86::VMOVAPSZ128mr_NOVLX:
6298 return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSmr),
6299 get(X86::VEXTRACTF32X4Zmri), X86::sub_xmm);
6300 case X86::VMOVUPSZ128mr_NOVLX:
6301 return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSmr),
6302 get(X86::VEXTRACTF32X4Zmri), X86::sub_xmm);
6303 case X86::VMOVAPSZ256mr_NOVLX:
6304 return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSYmr),
6305 get(X86::VEXTRACTF64X4Zmri), X86::sub_ymm);
6306 case X86::VMOVUPSZ256mr_NOVLX:
6307 return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSYmr),
6308 get(X86::VEXTRACTF64X4Zmri), X86::sub_ymm);
6309 case X86::MOV32ri64: {
6310 Register Reg = MIB.getReg(0);
6311 Register Reg32 = RI.getSubReg(Reg, X86::sub_32bit);
6312 MI.setDesc(get(X86::MOV32ri));
6313 MIB->getOperand(0).setReg(Reg32);
6315 return true;
6316 }
6317
6318 case X86::RDFLAGS32:
6319 case X86::RDFLAGS64: {
6320 unsigned Is64Bit = MI.getOpcode() == X86::RDFLAGS64;
6321 MachineBasicBlock &MBB = *MIB->getParent();
6322
6323 MachineInstr *NewMI = BuildMI(MBB, MI, MIB->getDebugLoc(),
6324 get(Is64Bit ? X86::PUSHF64 : X86::PUSHF32))
6325 .getInstr();
6326
6327 // Permit reads of the EFLAGS and DF registers without them being defined.
6328 // This intrinsic exists to read external processor state in flags, such as
6329 // the trap flag, interrupt flag, and direction flag, none of which are
6330 // modeled by the backend.
6331 assert(NewMI->getOperand(2).getReg() == X86::EFLAGS &&
6332 "Unexpected register in operand! Should be EFLAGS.");
6333 NewMI->getOperand(2).setIsUndef();
6334 assert(NewMI->getOperand(3).getReg() == X86::DF &&
6335 "Unexpected register in operand! Should be DF.");
6336 NewMI->getOperand(3).setIsUndef();
6337
6338 MIB->setDesc(get(Is64Bit ? X86::POP64r : X86::POP32r));
6339 return true;
6340 }
6341
6342 case X86::WRFLAGS32:
6343 case X86::WRFLAGS64: {
6344 unsigned Is64Bit = MI.getOpcode() == X86::WRFLAGS64;
6345 MachineBasicBlock &MBB = *MIB->getParent();
6346
6347 BuildMI(MBB, MI, MIB->getDebugLoc(),
6348 get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
6349 .addReg(MI.getOperand(0).getReg());
6350 BuildMI(MBB, MI, MIB->getDebugLoc(),
6351 get(Is64Bit ? X86::POPF64 : X86::POPF32));
6352 MI.eraseFromParent();
6353 return true;
6354 }
6355
6356 // KNL does not recognize dependency-breaking idioms for mask registers,
6357 // so kxnor %k1, %k1, %k2 has a RAW dependence on %k1.
6358 // Using %k0 as the undef input register is a performance heuristic based
6359 // on the assumption that %k0 is used less frequently than the other mask
6360 // registers, since it is not usable as a write mask.
6361 // FIXME: A more advanced approach would be to choose the best input mask
6362 // register based on context.
6363 case X86::KSET0W:
6364 return Expand2AddrKreg(MIB, get(X86::KXORWkk), X86::K0);
6365 case X86::KSET0D:
6366 return Expand2AddrKreg(MIB, get(X86::KXORDkk), X86::K0);
6367 case X86::KSET0Q:
6368 return Expand2AddrKreg(MIB, get(X86::KXORQkk), X86::K0);
6369 case X86::KSET1W:
6370 return Expand2AddrKreg(MIB, get(X86::KXNORWkk), X86::K0);
6371 case X86::KSET1D:
6372 return Expand2AddrKreg(MIB, get(X86::KXNORDkk), X86::K0);
6373 case X86::KSET1Q:
6374 return Expand2AddrKreg(MIB, get(X86::KXNORQkk), X86::K0);
6375 case TargetOpcode::LOAD_STACK_GUARD:
6376 expandLoadStackGuard(MIB, *this);
6377 return true;
6378 case X86::XOR64_FP:
6379 case X86::XOR32_FP:
6380 return expandXorFP(MIB, *this);
6381 case X86::SHLDROT32ri:
6382 return expandSHXDROT(MIB, get(X86::SHLD32rri8));
6383 case X86::SHLDROT64ri:
6384 return expandSHXDROT(MIB, get(X86::SHLD64rri8));
6385 case X86::SHRDROT32ri:
6386 return expandSHXDROT(MIB, get(X86::SHRD32rri8));
6387 case X86::SHRDROT64ri:
6388 return expandSHXDROT(MIB, get(X86::SHRD64rri8));
6389 case X86::ADD8rr_DB:
6390 MIB->setDesc(get(X86::OR8rr));
6391 break;
6392 case X86::ADD16rr_DB:
6393 MIB->setDesc(get(X86::OR16rr));
6394 break;
6395 case X86::ADD32rr_DB:
6396 MIB->setDesc(get(X86::OR32rr));
6397 break;
6398 case X86::ADD64rr_DB:
6399 MIB->setDesc(get(X86::OR64rr));
6400 break;
6401 case X86::ADD8ri_DB:
6402 MIB->setDesc(get(X86::OR8ri));
6403 break;
6404 case X86::ADD16ri_DB:
6405 MIB->setDesc(get(X86::OR16ri));
6406 break;
6407 case X86::ADD32ri_DB:
6408 MIB->setDesc(get(X86::OR32ri));
6409 break;
6410 case X86::ADD64ri32_DB:
6411 MIB->setDesc(get(X86::OR64ri32));
6412 break;
6413 }
6414 return false;
6415}
6416
6417/// Return true for all instructions that only update
6418/// the first 32 or 64-bits of the destination register and leave the rest
6419/// unmodified. This can be used to avoid folding loads if the instructions
6420/// only update part of the destination register, and the non-updated part is
6421/// not needed. e.g. cvtss2sd, sqrtss. Unfolding the load from these
6422/// instructions breaks the partial register dependency and it can improve
6423/// performance. e.g.:
6424///
6425/// movss (%rdi), %xmm0
6426/// cvtss2sd %xmm0, %xmm0
6427///
6428/// Instead of
6429/// cvtss2sd (%rdi), %xmm0
6430///
6431/// FIXME: This should be turned into a TSFlags.
6432///
6433static bool hasPartialRegUpdate(unsigned Opcode, const X86Subtarget &Subtarget,
6434 bool ForLoadFold = false) {
6435 switch (Opcode) {
6436 case X86::CVTSI2SSrr:
6437 case X86::CVTSI2SSrm:
6438 case X86::CVTSI642SSrr:
6439 case X86::CVTSI642SSrm:
6440 case X86::CVTSI2SDrr:
6441 case X86::CVTSI2SDrm:
6442 case X86::CVTSI642SDrr:
6443 case X86::CVTSI642SDrm:
6444 // Load folding won't effect the undef register update since the input is
6445 // a GPR.
6446 return !ForLoadFold;
6447 case X86::CVTSD2SSrr:
6448 case X86::CVTSD2SSrm:
6449 case X86::CVTSS2SDrr:
6450 case X86::CVTSS2SDrm:
6451 case X86::MOVHPDrm:
6452 case X86::MOVHPSrm:
6453 case X86::MOVLPDrm:
6454 case X86::MOVLPSrm:
6455 case X86::RCPSSr:
6456 case X86::RCPSSm:
6457 case X86::RCPSSr_Int:
6458 case X86::RCPSSm_Int:
6459 case X86::ROUNDSDri:
6460 case X86::ROUNDSDmi:
6461 case X86::ROUNDSSri:
6462 case X86::ROUNDSSmi:
6463 case X86::RSQRTSSr:
6464 case X86::RSQRTSSm:
6465 case X86::RSQRTSSr_Int:
6466 case X86::RSQRTSSm_Int:
6467 case X86::SQRTSSr:
6468 case X86::SQRTSSm:
6469 case X86::SQRTSSr_Int:
6470 case X86::SQRTSSm_Int:
6471 case X86::SQRTSDr:
6472 case X86::SQRTSDm:
6473 case X86::SQRTSDr_Int:
6474 case X86::SQRTSDm_Int:
6475 return true;
6476 case X86::VFCMULCPHZ128rm:
6477 case X86::VFCMULCPHZ128rmb:
6478 case X86::VFCMULCPHZ128rmbkz:
6479 case X86::VFCMULCPHZ128rmkz:
6480 case X86::VFCMULCPHZ128rr:
6481 case X86::VFCMULCPHZ128rrkz:
6482 case X86::VFCMULCPHZ256rm:
6483 case X86::VFCMULCPHZ256rmb:
6484 case X86::VFCMULCPHZ256rmbkz:
6485 case X86::VFCMULCPHZ256rmkz:
6486 case X86::VFCMULCPHZ256rr:
6487 case X86::VFCMULCPHZ256rrkz:
6488 case X86::VFCMULCPHZrm:
6489 case X86::VFCMULCPHZrmb:
6490 case X86::VFCMULCPHZrmbkz:
6491 case X86::VFCMULCPHZrmkz:
6492 case X86::VFCMULCPHZrr:
6493 case X86::VFCMULCPHZrrb:
6494 case X86::VFCMULCPHZrrbkz:
6495 case X86::VFCMULCPHZrrkz:
6496 case X86::VFMULCPHZ128rm:
6497 case X86::VFMULCPHZ128rmb:
6498 case X86::VFMULCPHZ128rmbkz:
6499 case X86::VFMULCPHZ128rmkz:
6500 case X86::VFMULCPHZ128rr:
6501 case X86::VFMULCPHZ128rrkz:
6502 case X86::VFMULCPHZ256rm:
6503 case X86::VFMULCPHZ256rmb:
6504 case X86::VFMULCPHZ256rmbkz:
6505 case X86::VFMULCPHZ256rmkz:
6506 case X86::VFMULCPHZ256rr:
6507 case X86::VFMULCPHZ256rrkz:
6508 case X86::VFMULCPHZrm:
6509 case X86::VFMULCPHZrmb:
6510 case X86::VFMULCPHZrmbkz:
6511 case X86::VFMULCPHZrmkz:
6512 case X86::VFMULCPHZrr:
6513 case X86::VFMULCPHZrrb:
6514 case X86::VFMULCPHZrrbkz:
6515 case X86::VFMULCPHZrrkz:
6516 case X86::VFCMULCSHZrm:
6517 case X86::VFCMULCSHZrmkz:
6518 case X86::VFCMULCSHZrr:
6519 case X86::VFCMULCSHZrrb:
6520 case X86::VFCMULCSHZrrbkz:
6521 case X86::VFCMULCSHZrrkz:
6522 case X86::VFMULCSHZrm:
6523 case X86::VFMULCSHZrmkz:
6524 case X86::VFMULCSHZrr:
6525 case X86::VFMULCSHZrrb:
6526 case X86::VFMULCSHZrrbkz:
6527 case X86::VFMULCSHZrrkz:
6528 return Subtarget.hasMULCFalseDeps();
6529 case X86::VPERMDYrm:
6530 case X86::VPERMDYrr:
6531 case X86::VPERMQYmi:
6532 case X86::VPERMQYri:
6533 case X86::VPERMPSYrm:
6534 case X86::VPERMPSYrr:
6535 case X86::VPERMPDYmi:
6536 case X86::VPERMPDYri:
6537 case X86::VPERMDZ256rm:
6538 case X86::VPERMDZ256rmb:
6539 case X86::VPERMDZ256rmbkz:
6540 case X86::VPERMDZ256rmkz:
6541 case X86::VPERMDZ256rr:
6542 case X86::VPERMDZ256rrkz:
6543 case X86::VPERMDZrm:
6544 case X86::VPERMDZrmb:
6545 case X86::VPERMDZrmbkz:
6546 case X86::VPERMDZrmkz:
6547 case X86::VPERMDZrr:
6548 case X86::VPERMDZrrkz:
6549 case X86::VPERMQZ256mbi:
6550 case X86::VPERMQZ256mbikz:
6551 case X86::VPERMQZ256mi:
6552 case X86::VPERMQZ256mikz:
6553 case X86::VPERMQZ256ri:
6554 case X86::VPERMQZ256rikz:
6555 case X86::VPERMQZ256rm:
6556 case X86::VPERMQZ256rmb:
6557 case X86::VPERMQZ256rmbkz:
6558 case X86::VPERMQZ256rmkz:
6559 case X86::VPERMQZ256rr:
6560 case X86::VPERMQZ256rrkz:
6561 case X86::VPERMQZmbi:
6562 case X86::VPERMQZmbikz:
6563 case X86::VPERMQZmi:
6564 case X86::VPERMQZmikz:
6565 case X86::VPERMQZri:
6566 case X86::VPERMQZrikz:
6567 case X86::VPERMQZrm:
6568 case X86::VPERMQZrmb:
6569 case X86::VPERMQZrmbkz:
6570 case X86::VPERMQZrmkz:
6571 case X86::VPERMQZrr:
6572 case X86::VPERMQZrrkz:
6573 case X86::VPERMPSZ256rm:
6574 case X86::VPERMPSZ256rmb:
6575 case X86::VPERMPSZ256rmbkz:
6576 case X86::VPERMPSZ256rmkz:
6577 case X86::VPERMPSZ256rr:
6578 case X86::VPERMPSZ256rrkz:
6579 case X86::VPERMPSZrm:
6580 case X86::VPERMPSZrmb:
6581 case X86::VPERMPSZrmbkz:
6582 case X86::VPERMPSZrmkz:
6583 case X86::VPERMPSZrr:
6584 case X86::VPERMPSZrrkz:
6585 case X86::VPERMPDZ256mbi:
6586 case X86::VPERMPDZ256mbikz:
6587 case X86::VPERMPDZ256mi:
6588 case X86::VPERMPDZ256mikz:
6589 case X86::VPERMPDZ256ri:
6590 case X86::VPERMPDZ256rikz:
6591 case X86::VPERMPDZ256rm:
6592 case X86::VPERMPDZ256rmb:
6593 case X86::VPERMPDZ256rmbkz:
6594 case X86::VPERMPDZ256rmkz:
6595 case X86::VPERMPDZ256rr:
6596 case X86::VPERMPDZ256rrkz:
6597 case X86::VPERMPDZmbi:
6598 case X86::VPERMPDZmbikz:
6599 case X86::VPERMPDZmi:
6600 case X86::VPERMPDZmikz:
6601 case X86::VPERMPDZri:
6602 case X86::VPERMPDZrikz:
6603 case X86::VPERMPDZrm:
6604 case X86::VPERMPDZrmb:
6605 case X86::VPERMPDZrmbkz:
6606 case X86::VPERMPDZrmkz:
6607 case X86::VPERMPDZrr:
6608 case X86::VPERMPDZrrkz:
6609 return Subtarget.hasPERMFalseDeps();
6610 case X86::VRANGEPDZ128rmbi:
6611 case X86::VRANGEPDZ128rmbikz:
6612 case X86::VRANGEPDZ128rmi:
6613 case X86::VRANGEPDZ128rmikz:
6614 case X86::VRANGEPDZ128rri:
6615 case X86::VRANGEPDZ128rrikz:
6616 case X86::VRANGEPDZ256rmbi:
6617 case X86::VRANGEPDZ256rmbikz:
6618 case X86::VRANGEPDZ256rmi:
6619 case X86::VRANGEPDZ256rmikz:
6620 case X86::VRANGEPDZ256rri:
6621 case X86::VRANGEPDZ256rrikz:
6622 case X86::VRANGEPDZrmbi:
6623 case X86::VRANGEPDZrmbikz:
6624 case X86::VRANGEPDZrmi:
6625 case X86::VRANGEPDZrmikz:
6626 case X86::VRANGEPDZrri:
6627 case X86::VRANGEPDZrrib:
6628 case X86::VRANGEPDZrribkz:
6629 case X86::VRANGEPDZrrikz:
6630 case X86::VRANGEPSZ128rmbi:
6631 case X86::VRANGEPSZ128rmbikz:
6632 case X86::VRANGEPSZ128rmi:
6633 case X86::VRANGEPSZ128rmikz:
6634 case X86::VRANGEPSZ128rri:
6635 case X86::VRANGEPSZ128rrikz:
6636 case X86::VRANGEPSZ256rmbi:
6637 case X86::VRANGEPSZ256rmbikz:
6638 case X86::VRANGEPSZ256rmi:
6639 case X86::VRANGEPSZ256rmikz:
6640 case X86::VRANGEPSZ256rri:
6641 case X86::VRANGEPSZ256rrikz:
6642 case X86::VRANGEPSZrmbi:
6643 case X86::VRANGEPSZrmbikz:
6644 case X86::VRANGEPSZrmi:
6645 case X86::VRANGEPSZrmikz:
6646 case X86::VRANGEPSZrri:
6647 case X86::VRANGEPSZrrib:
6648 case X86::VRANGEPSZrribkz:
6649 case X86::VRANGEPSZrrikz:
6650 case X86::VRANGESDZrmi:
6651 case X86::VRANGESDZrmikz:
6652 case X86::VRANGESDZrri:
6653 case X86::VRANGESDZrrib:
6654 case X86::VRANGESDZrribkz:
6655 case X86::VRANGESDZrrikz:
6656 case X86::VRANGESSZrmi:
6657 case X86::VRANGESSZrmikz:
6658 case X86::VRANGESSZrri:
6659 case X86::VRANGESSZrrib:
6660 case X86::VRANGESSZrribkz:
6661 case X86::VRANGESSZrrikz:
6662 return Subtarget.hasRANGEFalseDeps();
6663 case X86::VGETMANTSSZrmi:
6664 case X86::VGETMANTSSZrmikz:
6665 case X86::VGETMANTSSZrri:
6666 case X86::VGETMANTSSZrrib:
6667 case X86::VGETMANTSSZrribkz:
6668 case X86::VGETMANTSSZrrikz:
6669 case X86::VGETMANTSDZrmi:
6670 case X86::VGETMANTSDZrmikz:
6671 case X86::VGETMANTSDZrri:
6672 case X86::VGETMANTSDZrrib:
6673 case X86::VGETMANTSDZrribkz:
6674 case X86::VGETMANTSDZrrikz:
6675 case X86::VGETMANTSHZrmi:
6676 case X86::VGETMANTSHZrmikz:
6677 case X86::VGETMANTSHZrri:
6678 case X86::VGETMANTSHZrrib:
6679 case X86::VGETMANTSHZrribkz:
6680 case X86::VGETMANTSHZrrikz:
6681 case X86::VGETMANTPSZ128rmbi:
6682 case X86::VGETMANTPSZ128rmbikz:
6683 case X86::VGETMANTPSZ128rmi:
6684 case X86::VGETMANTPSZ128rmikz:
6685 case X86::VGETMANTPSZ256rmbi:
6686 case X86::VGETMANTPSZ256rmbikz:
6687 case X86::VGETMANTPSZ256rmi:
6688 case X86::VGETMANTPSZ256rmikz:
6689 case X86::VGETMANTPSZrmbi:
6690 case X86::VGETMANTPSZrmbikz:
6691 case X86::VGETMANTPSZrmi:
6692 case X86::VGETMANTPSZrmikz:
6693 case X86::VGETMANTPDZ128rmbi:
6694 case X86::VGETMANTPDZ128rmbikz:
6695 case X86::VGETMANTPDZ128rmi:
6696 case X86::VGETMANTPDZ128rmikz:
6697 case X86::VGETMANTPDZ256rmbi:
6698 case X86::VGETMANTPDZ256rmbikz:
6699 case X86::VGETMANTPDZ256rmi:
6700 case X86::VGETMANTPDZ256rmikz:
6701 case X86::VGETMANTPDZrmbi:
6702 case X86::VGETMANTPDZrmbikz:
6703 case X86::VGETMANTPDZrmi:
6704 case X86::VGETMANTPDZrmikz:
6705 return Subtarget.hasGETMANTFalseDeps();
6706 case X86::VPMULLQZ128rm:
6707 case X86::VPMULLQZ128rmb:
6708 case X86::VPMULLQZ128rmbkz:
6709 case X86::VPMULLQZ128rmkz:
6710 case X86::VPMULLQZ128rr:
6711 case X86::VPMULLQZ128rrkz:
6712 case X86::VPMULLQZ256rm:
6713 case X86::VPMULLQZ256rmb:
6714 case X86::VPMULLQZ256rmbkz:
6715 case X86::VPMULLQZ256rmkz:
6716 case X86::VPMULLQZ256rr:
6717 case X86::VPMULLQZ256rrkz:
6718 case X86::VPMULLQZrm:
6719 case X86::VPMULLQZrmb:
6720 case X86::VPMULLQZrmbkz:
6721 case X86::VPMULLQZrmkz:
6722 case X86::VPMULLQZrr:
6723 case X86::VPMULLQZrrkz:
6724 return Subtarget.hasMULLQFalseDeps();
6725 // GPR
6726 case X86::POPCNT32rm:
6727 case X86::POPCNT32rr:
6728 case X86::POPCNT64rm:
6729 case X86::POPCNT64rr:
6730 return Subtarget.hasPOPCNTFalseDeps();
6731 case X86::LZCNT32rm:
6732 case X86::LZCNT32rr:
6733 case X86::LZCNT64rm:
6734 case X86::LZCNT64rr:
6735 case X86::TZCNT32rm:
6736 case X86::TZCNT32rr:
6737 case X86::TZCNT64rm:
6738 case X86::TZCNT64rr:
6739 return Subtarget.hasLZCNTFalseDeps();
6740 }
6741
6742 return false;
6743}
6744
6745/// Inform the BreakFalseDeps pass how many idle
6746/// instructions we would like before a partial register update.
6748 const MachineInstr &MI, unsigned OpNum,
6749 const TargetRegisterInfo *TRI) const {
6750
6751 if (OpNum != 0)
6752 return 0;
6753
6754 // NDD ops with 8/16b results may appear to be partial register
6755 // updates after register allocation.
6756 bool HasNDDPartialWrite = false;
6757 if (X86II::hasNewDataDest(MI.getDesc().TSFlags)) {
6758 Register Reg = MI.getOperand(0).getReg();
6759 if (!Reg.isVirtual())
6760 HasNDDPartialWrite =
6761 X86::GR8RegClass.contains(Reg) || X86::GR16RegClass.contains(Reg);
6762 }
6763
6764 if (!(HasNDDPartialWrite || hasPartialRegUpdate(MI.getOpcode(), Subtarget)))
6765 return 0;
6766
6767 // Check if the result register is also used as a source.
6768 // For non-NDD ops, this means a partial update is wanted, hence we return 0.
6769 // For NDD ops, this means it is possible to compress the instruction
6770 // to a legacy form in CompressEVEX, which would create an unwanted partial
6771 // update, so we return the clearance.
6772 const MachineOperand &MO = MI.getOperand(0);
6773 Register Reg = MO.getReg();
6774 bool ReadsReg = false;
6775 if (Reg.isVirtual())
6776 ReadsReg = (MO.readsReg() || MI.readsVirtualRegister(Reg));
6777 else
6778 ReadsReg = MI.readsRegister(Reg, TRI);
6779 if (ReadsReg != HasNDDPartialWrite)
6780 return 0;
6781
6782 // If any instructions in the clearance range are reading Reg, insert a
6783 // dependency breaking instruction, which is inexpensive and is likely to
6784 // be hidden in other instruction's cycles.
6786}
6787
6788// Return true for any instruction the copies the high bits of the first source
6789// operand into the unused high bits of the destination operand.
6790// Also returns true for instructions that have two inputs where one may
6791// be undef and we want it to use the same register as the other input.
6792static bool hasUndefRegUpdate(unsigned Opcode, unsigned OpNum,
6793 bool ForLoadFold = false) {
6794 // Set the OpNum parameter to the first source operand.
6795 switch (Opcode) {
6796 case X86::MMX_PUNPCKHBWrr:
6797 case X86::MMX_PUNPCKHWDrr:
6798 case X86::MMX_PUNPCKHDQrr:
6799 case X86::MMX_PUNPCKLBWrr:
6800 case X86::MMX_PUNPCKLWDrr:
6801 case X86::MMX_PUNPCKLDQrr:
6802 case X86::MOVHLPSrr:
6803 case X86::PACKSSWBrr:
6804 case X86::PACKUSWBrr:
6805 case X86::PACKSSDWrr:
6806 case X86::PACKUSDWrr:
6807 case X86::PUNPCKHBWrr:
6808 case X86::PUNPCKLBWrr:
6809 case X86::PUNPCKHWDrr:
6810 case X86::PUNPCKLWDrr:
6811 case X86::PUNPCKHDQrr:
6812 case X86::PUNPCKLDQrr:
6813 case X86::PUNPCKHQDQrr:
6814 case X86::PUNPCKLQDQrr:
6815 case X86::SHUFPDrri:
6816 case X86::SHUFPSrri:
6817 // These instructions are sometimes used with an undef first or second
6818 // source. Return true here so BreakFalseDeps will assign this source to the
6819 // same register as the first source to avoid a false dependency.
6820 // Operand 1 of these instructions is tied so they're separate from their
6821 // VEX counterparts.
6822 return OpNum == 2 && !ForLoadFold;
6823
6824 case X86::VMOVLHPSrr:
6825 case X86::VMOVLHPSZrr:
6826 case X86::VPACKSSWBrr:
6827 case X86::VPACKUSWBrr:
6828 case X86::VPACKSSDWrr:
6829 case X86::VPACKUSDWrr:
6830 case X86::VPACKSSWBZ128rr:
6831 case X86::VPACKUSWBZ128rr:
6832 case X86::VPACKSSDWZ128rr:
6833 case X86::VPACKUSDWZ128rr:
6834 case X86::VPERM2F128rri:
6835 case X86::VPERM2I128rri:
6836 case X86::VSHUFF32X4Z256rri:
6837 case X86::VSHUFF32X4Zrri:
6838 case X86::VSHUFF64X2Z256rri:
6839 case X86::VSHUFF64X2Zrri:
6840 case X86::VSHUFI32X4Z256rri:
6841 case X86::VSHUFI32X4Zrri:
6842 case X86::VSHUFI64X2Z256rri:
6843 case X86::VSHUFI64X2Zrri:
6844 case X86::VPUNPCKHBWrr:
6845 case X86::VPUNPCKLBWrr:
6846 case X86::VPUNPCKHBWYrr:
6847 case X86::VPUNPCKLBWYrr:
6848 case X86::VPUNPCKHBWZ128rr:
6849 case X86::VPUNPCKLBWZ128rr:
6850 case X86::VPUNPCKHBWZ256rr:
6851 case X86::VPUNPCKLBWZ256rr:
6852 case X86::VPUNPCKHBWZrr:
6853 case X86::VPUNPCKLBWZrr:
6854 case X86::VPUNPCKHWDrr:
6855 case X86::VPUNPCKLWDrr:
6856 case X86::VPUNPCKHWDYrr:
6857 case X86::VPUNPCKLWDYrr:
6858 case X86::VPUNPCKHWDZ128rr:
6859 case X86::VPUNPCKLWDZ128rr:
6860 case X86::VPUNPCKHWDZ256rr:
6861 case X86::VPUNPCKLWDZ256rr:
6862 case X86::VPUNPCKHWDZrr:
6863 case X86::VPUNPCKLWDZrr:
6864 case X86::VPUNPCKHDQrr:
6865 case X86::VPUNPCKLDQrr:
6866 case X86::VPUNPCKHDQYrr:
6867 case X86::VPUNPCKLDQYrr:
6868 case X86::VPUNPCKHDQZ128rr:
6869 case X86::VPUNPCKLDQZ128rr:
6870 case X86::VPUNPCKHDQZ256rr:
6871 case X86::VPUNPCKLDQZ256rr:
6872 case X86::VPUNPCKHDQZrr:
6873 case X86::VPUNPCKLDQZrr:
6874 case X86::VPUNPCKHQDQrr:
6875 case X86::VPUNPCKLQDQrr:
6876 case X86::VPUNPCKHQDQYrr:
6877 case X86::VPUNPCKLQDQYrr:
6878 case X86::VPUNPCKHQDQZ128rr:
6879 case X86::VPUNPCKLQDQZ128rr:
6880 case X86::VPUNPCKHQDQZ256rr:
6881 case X86::VPUNPCKLQDQZ256rr:
6882 case X86::VPUNPCKHQDQZrr:
6883 case X86::VPUNPCKLQDQZrr:
6884 // These instructions are sometimes used with an undef first or second
6885 // source. Return true here so BreakFalseDeps will assign this source to the
6886 // same register as the first source to avoid a false dependency.
6887 return (OpNum == 1 || OpNum == 2) && !ForLoadFold;
6888
6889 case X86::VCVTSI2SSrr:
6890 case X86::VCVTSI2SSrm:
6891 case X86::VCVTSI2SSrr_Int:
6892 case X86::VCVTSI2SSrm_Int:
6893 case X86::VCVTSI642SSrr:
6894 case X86::VCVTSI642SSrm:
6895 case X86::VCVTSI642SSrr_Int:
6896 case X86::VCVTSI642SSrm_Int:
6897 case X86::VCVTSI2SDrr:
6898 case X86::VCVTSI2SDrm:
6899 case X86::VCVTSI2SDrr_Int:
6900 case X86::VCVTSI2SDrm_Int:
6901 case X86::VCVTSI642SDrr:
6902 case X86::VCVTSI642SDrm:
6903 case X86::VCVTSI642SDrr_Int:
6904 case X86::VCVTSI642SDrm_Int:
6905 // AVX-512
6906 case X86::VCVTSI2SSZrr:
6907 case X86::VCVTSI2SSZrm:
6908 case X86::VCVTSI2SSZrr_Int:
6909 case X86::VCVTSI2SSZrrb_Int:
6910 case X86::VCVTSI2SSZrm_Int:
6911 case X86::VCVTSI642SSZrr:
6912 case X86::VCVTSI642SSZrm:
6913 case X86::VCVTSI642SSZrr_Int:
6914 case X86::VCVTSI642SSZrrb_Int:
6915 case X86::VCVTSI642SSZrm_Int:
6916 case X86::VCVTSI2SDZrr:
6917 case X86::VCVTSI2SDZrm:
6918 case X86::VCVTSI2SDZrr_Int:
6919 case X86::VCVTSI2SDZrm_Int:
6920 case X86::VCVTSI642SDZrr:
6921 case X86::VCVTSI642SDZrm:
6922 case X86::VCVTSI642SDZrr_Int:
6923 case X86::VCVTSI642SDZrrb_Int:
6924 case X86::VCVTSI642SDZrm_Int:
6925 case X86::VCVTUSI2SSZrr:
6926 case X86::VCVTUSI2SSZrm:
6927 case X86::VCVTUSI2SSZrr_Int:
6928 case X86::VCVTUSI2SSZrrb_Int:
6929 case X86::VCVTUSI2SSZrm_Int:
6930 case X86::VCVTUSI642SSZrr:
6931 case X86::VCVTUSI642SSZrm:
6932 case X86::VCVTUSI642SSZrr_Int:
6933 case X86::VCVTUSI642SSZrrb_Int:
6934 case X86::VCVTUSI642SSZrm_Int:
6935 case X86::VCVTUSI2SDZrr:
6936 case X86::VCVTUSI2SDZrm:
6937 case X86::VCVTUSI2SDZrr_Int:
6938 case X86::VCVTUSI2SDZrm_Int:
6939 case X86::VCVTUSI642SDZrr:
6940 case X86::VCVTUSI642SDZrm:
6941 case X86::VCVTUSI642SDZrr_Int:
6942 case X86::VCVTUSI642SDZrrb_Int:
6943 case X86::VCVTUSI642SDZrm_Int:
6944 case X86::VCVTSI2SHZrr:
6945 case X86::VCVTSI2SHZrm:
6946 case X86::VCVTSI2SHZrr_Int:
6947 case X86::VCVTSI2SHZrrb_Int:
6948 case X86::VCVTSI2SHZrm_Int:
6949 case X86::VCVTSI642SHZrr:
6950 case X86::VCVTSI642SHZrm:
6951 case X86::VCVTSI642SHZrr_Int:
6952 case X86::VCVTSI642SHZrrb_Int:
6953 case X86::VCVTSI642SHZrm_Int:
6954 case X86::VCVTUSI2SHZrr:
6955 case X86::VCVTUSI2SHZrm:
6956 case X86::VCVTUSI2SHZrr_Int:
6957 case X86::VCVTUSI2SHZrrb_Int:
6958 case X86::VCVTUSI2SHZrm_Int:
6959 case X86::VCVTUSI642SHZrr:
6960 case X86::VCVTUSI642SHZrm:
6961 case X86::VCVTUSI642SHZrr_Int:
6962 case X86::VCVTUSI642SHZrrb_Int:
6963 case X86::VCVTUSI642SHZrm_Int:
6964 // Load folding won't effect the undef register update since the input is
6965 // a GPR.
6966 return OpNum == 1 && !ForLoadFold;
6967 case X86::VCVTSD2SSrr:
6968 case X86::VCVTSD2SSrm:
6969 case X86::VCVTSD2SSrr_Int:
6970 case X86::VCVTSD2SSrm_Int:
6971 case X86::VCVTSS2SDrr:
6972 case X86::VCVTSS2SDrm:
6973 case X86::VCVTSS2SDrr_Int:
6974 case X86::VCVTSS2SDrm_Int:
6975 case X86::VRCPSSr:
6976 case X86::VRCPSSr_Int:
6977 case X86::VRCPSSm:
6978 case X86::VRCPSSm_Int:
6979 case X86::VROUNDSDri:
6980 case X86::VROUNDSDmi:
6981 case X86::VROUNDSDri_Int:
6982 case X86::VROUNDSDmi_Int:
6983 case X86::VROUNDSSri:
6984 case X86::VROUNDSSmi:
6985 case X86::VROUNDSSri_Int:
6986 case X86::VROUNDSSmi_Int:
6987 case X86::VRSQRTSSr:
6988 case X86::VRSQRTSSr_Int:
6989 case X86::VRSQRTSSm:
6990 case X86::VRSQRTSSm_Int:
6991 case X86::VSQRTSSr:
6992 case X86::VSQRTSSr_Int:
6993 case X86::VSQRTSSm:
6994 case X86::VSQRTSSm_Int:
6995 case X86::VSQRTSDr:
6996 case X86::VSQRTSDr_Int:
6997 case X86::VSQRTSDm:
6998 case X86::VSQRTSDm_Int:
6999 // AVX-512
7000 case X86::VCVTSD2SSZrr:
7001 case X86::VCVTSD2SSZrr_Int:
7002 case X86::VCVTSD2SSZrrb_Int:
7003 case X86::VCVTSD2SSZrm:
7004 case X86::VCVTSD2SSZrm_Int:
7005 case X86::VCVTSS2SDZrr:
7006 case X86::VCVTSS2SDZrr_Int:
7007 case X86::VCVTSS2SDZrrb_Int:
7008 case X86::VCVTSS2SDZrm:
7009 case X86::VCVTSS2SDZrm_Int:
7010 case X86::VGETEXPSDZr:
7011 case X86::VGETEXPSDZrb:
7012 case X86::VGETEXPSDZm:
7013 case X86::VGETEXPSSZr:
7014 case X86::VGETEXPSSZrb:
7015 case X86::VGETEXPSSZm:
7016 case X86::VGETMANTSDZrri:
7017 case X86::VGETMANTSDZrrib:
7018 case X86::VGETMANTSDZrmi:
7019 case X86::VGETMANTSSZrri:
7020 case X86::VGETMANTSSZrrib:
7021 case X86::VGETMANTSSZrmi:
7022 case X86::VRNDSCALESDZrri:
7023 case X86::VRNDSCALESDZrri_Int:
7024 case X86::VRNDSCALESDZrrib_Int:
7025 case X86::VRNDSCALESDZrmi:
7026 case X86::VRNDSCALESDZrmi_Int:
7027 case X86::VRNDSCALESSZrri:
7028 case X86::VRNDSCALESSZrri_Int:
7029 case X86::VRNDSCALESSZrrib_Int:
7030 case X86::VRNDSCALESSZrmi:
7031 case X86::VRNDSCALESSZrmi_Int:
7032 case X86::VRCP14SDZrr:
7033 case X86::VRCP14SDZrm:
7034 case X86::VRCP14SSZrr:
7035 case X86::VRCP14SSZrm:
7036 case X86::VRCPSHZrr:
7037 case X86::VRCPSHZrm:
7038 case X86::VRSQRTSHZrr:
7039 case X86::VRSQRTSHZrm:
7040 case X86::VREDUCESHZrmi:
7041 case X86::VREDUCESHZrri:
7042 case X86::VREDUCESHZrrib:
7043 case X86::VGETEXPSHZr:
7044 case X86::VGETEXPSHZrb:
7045 case X86::VGETEXPSHZm:
7046 case X86::VGETMANTSHZrri:
7047 case X86::VGETMANTSHZrrib:
7048 case X86::VGETMANTSHZrmi:
7049 case X86::VRNDSCALESHZrri:
7050 case X86::VRNDSCALESHZrri_Int:
7051 case X86::VRNDSCALESHZrrib_Int:
7052 case X86::VRNDSCALESHZrmi:
7053 case X86::VRNDSCALESHZrmi_Int:
7054 case X86::VSQRTSHZr:
7055 case X86::VSQRTSHZr_Int:
7056 case X86::VSQRTSHZrb_Int:
7057 case X86::VSQRTSHZm:
7058 case X86::VSQRTSHZm_Int:
7059 case X86::VRCP28SDZr:
7060 case X86::VRCP28SDZrb:
7061 case X86::VRCP28SDZm:
7062 case X86::VRCP28SSZr:
7063 case X86::VRCP28SSZrb:
7064 case X86::VRCP28SSZm:
7065 case X86::VREDUCESSZrmi:
7066 case X86::VREDUCESSZrri:
7067 case X86::VREDUCESSZrrib:
7068 case X86::VRSQRT14SDZrr:
7069 case X86::VRSQRT14SDZrm:
7070 case X86::VRSQRT14SSZrr:
7071 case X86::VRSQRT14SSZrm:
7072 case X86::VRSQRT28SDZr:
7073 case X86::VRSQRT28SDZrb:
7074 case X86::VRSQRT28SDZm:
7075 case X86::VRSQRT28SSZr:
7076 case X86::VRSQRT28SSZrb:
7077 case X86::VRSQRT28SSZm:
7078 case X86::VSQRTSSZr:
7079 case X86::VSQRTSSZr_Int:
7080 case X86::VSQRTSSZrb_Int:
7081 case X86::VSQRTSSZm:
7082 case X86::VSQRTSSZm_Int:
7083 case X86::VSQRTSDZr:
7084 case X86::VSQRTSDZr_Int:
7085 case X86::VSQRTSDZrb_Int:
7086 case X86::VSQRTSDZm:
7087 case X86::VSQRTSDZm_Int:
7088 case X86::VCVTSD2SHZrr:
7089 case X86::VCVTSD2SHZrr_Int:
7090 case X86::VCVTSD2SHZrrb_Int:
7091 case X86::VCVTSD2SHZrm:
7092 case X86::VCVTSD2SHZrm_Int:
7093 case X86::VCVTSS2SHZrr:
7094 case X86::VCVTSS2SHZrr_Int:
7095 case X86::VCVTSS2SHZrrb_Int:
7096 case X86::VCVTSS2SHZrm:
7097 case X86::VCVTSS2SHZrm_Int:
7098 case X86::VCVTSH2SDZrr:
7099 case X86::VCVTSH2SDZrr_Int:
7100 case X86::VCVTSH2SDZrrb_Int:
7101 case X86::VCVTSH2SDZrm:
7102 case X86::VCVTSH2SDZrm_Int:
7103 case X86::VCVTSH2SSZrr:
7104 case X86::VCVTSH2SSZrr_Int:
7105 case X86::VCVTSH2SSZrrb_Int:
7106 case X86::VCVTSH2SSZrm:
7107 case X86::VCVTSH2SSZrm_Int:
7108 return OpNum == 1;
7109 case X86::VMOVSSZrrk:
7110 case X86::VMOVSDZrrk:
7111 return OpNum == 3 && !ForLoadFold;
7112 case X86::VMOVSSZrrkz:
7113 case X86::VMOVSDZrrkz:
7114 return OpNum == 2 && !ForLoadFold;
7115 }
7116
7117 return false;
7118}
7119
7120/// Inform the BreakFalseDeps pass how many idle instructions we would like
7121/// before certain undef register reads.
7122///
7123/// This catches the VCVTSI2SD family of instructions:
7124///
7125/// vcvtsi2sdq %rax, undef %xmm0, %xmm14
7126///
7127/// We should to be careful *not* to catch VXOR idioms which are presumably
7128/// handled specially in the pipeline:
7129///
7130/// vxorps undef %xmm1, undef %xmm1, %xmm1
7131///
7132/// Like getPartialRegUpdateClearance, this makes a strong assumption that the
7133/// high bits that are passed-through are not live.
7134unsigned
7136 const TargetRegisterInfo *TRI) const {
7137 const MachineOperand &MO = MI.getOperand(OpNum);
7138 if (MO.getReg().isPhysical() && hasUndefRegUpdate(MI.getOpcode(), OpNum))
7139 return UndefRegClearance;
7140
7141 return 0;
7142}
7143
7145 MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const {
7146 Register Reg = MI.getOperand(OpNum).getReg();
7147 // If MI kills this register, the false dependence is already broken.
7148 if (MI.killsRegister(Reg, TRI))
7149 return;
7150
7151 if (X86::VR128RegClass.contains(Reg)) {
7152 // These instructions are all floating point domain, so xorps is the best
7153 // choice.
7154 unsigned Opc = Subtarget.hasAVX() ? X86::VXORPSrr : X86::XORPSrr;
7155 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(Opc), Reg)
7156 .addReg(Reg, RegState::Undef)
7157 .addReg(Reg, RegState::Undef);
7158 MI.addRegisterKilled(Reg, TRI, true);
7159 } else if (X86::VR256RegClass.contains(Reg)) {
7160 // Use vxorps to clear the full ymm register.
7161 // It wants to read and write the xmm sub-register.
7162 Register XReg = TRI->getSubReg(Reg, X86::sub_xmm);
7163 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::VXORPSrr), XReg)
7164 .addReg(XReg, RegState::Undef)
7165 .addReg(XReg, RegState::Undef)
7167 MI.addRegisterKilled(Reg, TRI, true);
7168 } else if (X86::VR128XRegClass.contains(Reg)) {
7169 // Only handle VLX targets.
7170 if (!Subtarget.hasVLX())
7171 return;
7172 // Since vxorps requires AVX512DQ, vpxord should be the best choice.
7173 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::VPXORDZ128rr), Reg)
7174 .addReg(Reg, RegState::Undef)
7175 .addReg(Reg, RegState::Undef);
7176 MI.addRegisterKilled(Reg, TRI, true);
7177 } else if (X86::VR256XRegClass.contains(Reg) ||
7178 X86::VR512RegClass.contains(Reg)) {
7179 // Only handle VLX targets.
7180 if (!Subtarget.hasVLX())
7181 return;
7182 // Use vpxord to clear the full ymm/zmm register.
7183 // It wants to read and write the xmm sub-register.
7184 Register XReg = TRI->getSubReg(Reg, X86::sub_xmm);
7185 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::VPXORDZ128rr), XReg)
7186 .addReg(XReg, RegState::Undef)
7187 .addReg(XReg, RegState::Undef)
7189 MI.addRegisterKilled(Reg, TRI, true);
7190 } else if (X86::GR64RegClass.contains(Reg)) {
7191 // Using XOR32rr because it has shorter encoding and zeros up the upper bits
7192 // as well.
7193 Register XReg = TRI->getSubReg(Reg, X86::sub_32bit);
7194 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), XReg)
7195 .addReg(XReg, RegState::Undef)
7196 .addReg(XReg, RegState::Undef)
7198 MI.addRegisterKilled(Reg, TRI, true);
7199 } else if (X86::GR32RegClass.contains(Reg)) {
7200 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), Reg)
7201 .addReg(Reg, RegState::Undef)
7202 .addReg(Reg, RegState::Undef);
7203 MI.addRegisterKilled(Reg, TRI, true);
7204 } else if ((X86::GR16RegClass.contains(Reg) ||
7205 X86::GR8RegClass.contains(Reg)) &&
7206 X86II::hasNewDataDest(MI.getDesc().TSFlags)) {
7207 // This case is only expected for NDD ops which appear to be partial
7208 // writes, but are not due to the zeroing of the upper part. Here
7209 // we add an implicit def of the superegister, which prevents
7210 // CompressEVEX from converting this to a legacy form.
7211 Register SuperReg = getX86SubSuperRegister(Reg, 64);
7212 MachineInstrBuilder BuildMI(*MI.getParent()->getParent(), &MI);
7213 if (!MI.definesRegister(SuperReg, /*TRI=*/nullptr))
7214 BuildMI.addReg(SuperReg, RegState::ImplicitDefine);
7215 }
7216}
7217
7219 int PtrOffset = 0) {
7220 unsigned NumAddrOps = MOs.size();
7221
7222 if (NumAddrOps < 4) {
7223 // FrameIndex only - add an immediate offset (whether its zero or not).
7224 for (unsigned i = 0; i != NumAddrOps; ++i)
7225 MIB.add(MOs[i]);
7226 addOffset(MIB, PtrOffset);
7227 } else {
7228 // General Memory Addressing - we need to add any offset to an existing
7229 // offset.
7230 assert(MOs.size() == 5 && "Unexpected memory operand list length");
7231 for (unsigned i = 0; i != NumAddrOps; ++i) {
7232 const MachineOperand &MO = MOs[i];
7233 if (i == 3 && PtrOffset != 0) {
7234 MIB.addDisp(MO, PtrOffset);
7235 } else {
7236 MIB.add(MO);
7237 }
7238 }
7239 }
7240}
7241
7243 MachineInstr &NewMI,
7244 const TargetInstrInfo &TII) {
7246 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
7247
7248 for (int Idx : llvm::seq<int>(0, NewMI.getNumOperands())) {
7249 MachineOperand &MO = NewMI.getOperand(Idx);
7250 // We only need to update constraints on virtual register operands.
7251 if (!MO.isReg())
7252 continue;
7253 Register Reg = MO.getReg();
7254 if (!Reg.isVirtual())
7255 continue;
7256
7257 auto *NewRC =
7258 MRI.constrainRegClass(Reg, TII.getRegClass(NewMI.getDesc(), Idx, &TRI));
7259 if (!NewRC) {
7260 LLVM_DEBUG(
7261 dbgs() << "WARNING: Unable to update register constraint for operand "
7262 << Idx << " of instruction:\n";
7263 NewMI.dump(); dbgs() << "\n");
7264 }
7265 }
7266}
7267
7268static MachineInstr *fuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
7272 const TargetInstrInfo &TII) {
7273 // Create the base instruction with the memory operand as the first part.
7274 // Omit the implicit operands, something BuildMI can't do.
7275 MachineInstr *NewMI =
7276 MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true);
7277 MachineInstrBuilder MIB(MF, NewMI);
7278 addOperands(MIB, MOs);
7279
7280 // Loop over the rest of the ri operands, converting them over.
7281 unsigned NumOps = MI.getDesc().getNumOperands() - 2;
7282 for (unsigned i = 0; i != NumOps; ++i) {
7283 MachineOperand &MO = MI.getOperand(i + 2);
7284 MIB.add(MO);
7285 }
7286 for (const MachineOperand &MO : llvm::drop_begin(MI.operands(), NumOps + 2))
7287 MIB.add(MO);
7288
7289 updateOperandRegConstraints(MF, *NewMI, TII);
7290
7291 MachineBasicBlock *MBB = InsertPt->getParent();
7292 MBB->insert(InsertPt, NewMI);
7293
7294 return MIB;
7295}
7296
7297static MachineInstr *fuseInst(MachineFunction &MF, unsigned Opcode,
7298 unsigned OpNo, ArrayRef<MachineOperand> MOs,
7301 int PtrOffset = 0) {
7302 // Omit the implicit operands, something BuildMI can't do.
7303 MachineInstr *NewMI =
7304 MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true);
7305 MachineInstrBuilder MIB(MF, NewMI);
7306
7307 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
7308 MachineOperand &MO = MI.getOperand(i);
7309 if (i == OpNo) {
7310 assert(MO.isReg() && "Expected to fold into reg operand!");
7311 addOperands(MIB, MOs, PtrOffset);
7312 } else {
7313 MIB.add(MO);
7314 }
7315 }
7316
7317 updateOperandRegConstraints(MF, *NewMI, TII);
7318
7319 // Copy the NoFPExcept flag from the instruction we're fusing.
7322
7323 MachineBasicBlock *MBB = InsertPt->getParent();
7324 MBB->insert(InsertPt, NewMI);
7325
7326 return MIB;
7327}
7328
7329static MachineInstr *makeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
7332 MachineInstr &MI) {
7333 MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
7334 MI.getDebugLoc(), TII.get(Opcode));
7335 addOperands(MIB, MOs);
7336 return MIB.addImm(0);
7337}
7338
7339MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
7340 MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
7342 unsigned Size, Align Alignment) const {
7343 switch (MI.getOpcode()) {
7344 case X86::INSERTPSrri:
7345 case X86::VINSERTPSrri:
7346 case X86::VINSERTPSZrri:
7347 // Attempt to convert the load of inserted vector into a fold load
7348 // of a single float.
7349 if (OpNum == 2) {
7350 unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm();
7351 unsigned ZMask = Imm & 15;
7352 unsigned DstIdx = (Imm >> 4) & 3;
7353 unsigned SrcIdx = (Imm >> 6) & 3;
7354
7355 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
7356 const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI);
7357 unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
7358 if ((Size == 0 || Size >= 16) && RCSize >= 16 &&
7359 (MI.getOpcode() != X86::INSERTPSrri || Alignment >= Align(4))) {
7360 int PtrOffset = SrcIdx * 4;
7361 unsigned NewImm = (DstIdx << 4) | ZMask;
7362 unsigned NewOpCode =
7363 (MI.getOpcode() == X86::VINSERTPSZrri) ? X86::VINSERTPSZrmi
7364 : (MI.getOpcode() == X86::VINSERTPSrri) ? X86::VINSERTPSrmi
7365 : X86::INSERTPSrmi;
7366 MachineInstr *NewMI =
7367 fuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, PtrOffset);
7368 NewMI->getOperand(NewMI->getNumOperands() - 1).setImm(NewImm);
7369 return NewMI;
7370 }
7371 }
7372 break;
7373 case X86::MOVHLPSrr:
7374 case X86::VMOVHLPSrr:
7375 case X86::VMOVHLPSZrr:
7376 // Move the upper 64-bits of the second operand to the lower 64-bits.
7377 // To fold the load, adjust the pointer to the upper and use (V)MOVLPS.
7378 // TODO: In most cases AVX doesn't have a 8-byte alignment requirement.
7379 if (OpNum == 2) {
7380 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
7381 const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI);
7382 unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
7383 if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment >= Align(8)) {
7384 unsigned NewOpCode =
7385 (MI.getOpcode() == X86::VMOVHLPSZrr) ? X86::VMOVLPSZ128rm
7386 : (MI.getOpcode() == X86::VMOVHLPSrr) ? X86::VMOVLPSrm
7387 : X86::MOVLPSrm;
7388 MachineInstr *NewMI =
7389 fuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, 8);
7390 return NewMI;
7391 }
7392 }
7393 break;
7394 case X86::UNPCKLPDrr:
7395 // If we won't be able to fold this to the memory form of UNPCKL, use
7396 // MOVHPD instead. Done as custom because we can't have this in the load
7397 // table twice.
7398 if (OpNum == 2) {
7399 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
7400 const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI);
7401 unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
7402 if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment < Align(16)) {
7403 MachineInstr *NewMI =
7404 fuseInst(MF, X86::MOVHPDrm, OpNum, MOs, InsertPt, MI, *this);
7405 return NewMI;
7406 }
7407 }
7408 break;
7409 case X86::MOV32r0:
7410 if (auto *NewMI =
7411 makeM0Inst(*this, (Size == 4) ? X86::MOV32mi : X86::MOV64mi32, MOs,
7412 InsertPt, MI))
7413 return NewMI;
7414 break;
7415 }
7416
7417 return nullptr;
7418}
7419
7421 MachineInstr &MI) {
7422 if (!hasUndefRegUpdate(MI.getOpcode(), 1, /*ForLoadFold*/ true) ||
7423 !MI.getOperand(1).isReg())
7424 return false;
7425
7426 // The are two cases we need to handle depending on where in the pipeline
7427 // the folding attempt is being made.
7428 // -Register has the undef flag set.
7429 // -Register is produced by the IMPLICIT_DEF instruction.
7430
7431 if (MI.getOperand(1).isUndef())
7432 return true;
7433
7435 MachineInstr *VRegDef = RegInfo.getUniqueVRegDef(MI.getOperand(1).getReg());
7436 return VRegDef && VRegDef->isImplicitDef();
7437}
7438
7439unsigned X86InstrInfo::commuteOperandsForFold(MachineInstr &MI,
7440 unsigned Idx1) const {
7441 unsigned Idx2 = CommuteAnyOperandIndex;
7442 if (!findCommutedOpIndices(MI, Idx1, Idx2))
7443 return Idx1;
7444
7445 bool HasDef = MI.getDesc().getNumDefs();
7446 Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
7447 Register Reg1 = MI.getOperand(Idx1).getReg();
7448 Register Reg2 = MI.getOperand(Idx2).getReg();
7449 bool Tied1 = 0 == MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO);
7450 bool Tied2 = 0 == MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO);
7451
7452 // If either of the commutable operands are tied to the destination
7453 // then we can not commute + fold.
7454 if ((HasDef && Reg0 == Reg1 && Tied1) || (HasDef && Reg0 == Reg2 && Tied2))
7455 return Idx1;
7456
7457 return commuteInstruction(MI, false, Idx1, Idx2) ? Idx2 : Idx1;
7458}
7459
7460static void printFailMsgforFold(const MachineInstr &MI, unsigned Idx) {
7461 if (PrintFailedFusing && !MI.isCopy())
7462 dbgs() << "We failed to fuse operand " << Idx << " in " << MI;
7463}
7464
7466 MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
7468 unsigned Size, Align Alignment, bool AllowCommute) const {
7469 bool isSlowTwoMemOps = Subtarget.slowTwoMemOps();
7470 unsigned Opc = MI.getOpcode();
7471
7472 // For CPUs that favor the register form of a call or push,
7473 // do not fold loads into calls or pushes, unless optimizing for size
7474 // aggressively.
7475 if (isSlowTwoMemOps && !MF.getFunction().hasMinSize() &&
7476 (Opc == X86::CALL32r || Opc == X86::CALL64r ||
7477 Opc == X86::CALL64r_ImpCall || Opc == X86::PUSH16r ||
7478 Opc == X86::PUSH32r || Opc == X86::PUSH64r))
7479 return nullptr;
7480
7481 // Avoid partial and undef register update stalls unless optimizing for size.
7482 if (!MF.getFunction().hasOptSize() &&
7483 (hasPartialRegUpdate(Opc, Subtarget, /*ForLoadFold*/ true) ||
7485 return nullptr;
7486
7487 unsigned NumOps = MI.getDesc().getNumOperands();
7488 bool IsTwoAddr = NumOps > 1 && OpNum < 2 && MI.getOperand(0).isReg() &&
7489 MI.getOperand(1).isReg() &&
7490 MI.getOperand(0).getReg() == MI.getOperand(1).getReg();
7491
7492 // FIXME: AsmPrinter doesn't know how to handle
7493 // X86II::MO_GOT_ABSOLUTE_ADDRESS after folding.
7494 if (Opc == X86::ADD32ri &&
7495 MI.getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS)
7496 return nullptr;
7497
7498 // GOTTPOFF relocation loads can only be folded into add instructions.
7499 // FIXME: Need to exclude other relocations that only support specific
7500 // instructions.
7501 if (MOs.size() == X86::AddrNumOperands &&
7502 MOs[X86::AddrDisp].getTargetFlags() == X86II::MO_GOTTPOFF &&
7503 Opc != X86::ADD64rr)
7504 return nullptr;
7505
7506 // Don't fold loads into indirect calls that need a KCFI check as we'll
7507 // have to unfold these in X86TargetLowering::EmitKCFICheck anyway.
7508 if (MI.isCall() && MI.getCFIType())
7509 return nullptr;
7510
7511 // Attempt to fold any custom cases we have.
7512 if (auto *CustomMI = foldMemoryOperandCustom(MF, MI, OpNum, MOs, InsertPt,
7513 Size, Alignment))
7514 return CustomMI;
7515
7516 // Folding a memory location into the two-address part of a two-address
7517 // instruction is different than folding it other places. It requires
7518 // replacing the *two* registers with the memory location.
7519 //
7520 // Utilize the mapping NonNDD -> RMW for the NDD variant.
7521 unsigned NonNDOpc = Subtarget.hasNDD() ? X86::getNonNDVariant(Opc) : 0U;
7522 const X86FoldTableEntry *I =
7523 IsTwoAddr ? lookupTwoAddrFoldTable(NonNDOpc ? NonNDOpc : Opc)
7524 : lookupFoldTable(Opc, OpNum);
7525
7526 MachineInstr *NewMI = nullptr;
7527 if (I) {
7528 unsigned Opcode = I->DstOp;
7529 if (Alignment <
7530 Align(1ULL << ((I->Flags & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT)))
7531 return nullptr;
7532 bool NarrowToMOV32rm = false;
7533 if (Size) {
7535 const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI);
7536 unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
7537 // Check if it's safe to fold the load. If the size of the object is
7538 // narrower than the load width, then it's not.
7539 // FIXME: Allow scalar intrinsic instructions like ADDSSrm_Int.
7540 if ((I->Flags & TB_FOLDED_LOAD) && Size < RCSize) {
7541 // If this is a 64-bit load, but the spill slot is 32, then we can do
7542 // a 32-bit load which is implicitly zero-extended. This likely is
7543 // due to live interval analysis remat'ing a load from stack slot.
7544 if (Opcode != X86::MOV64rm || RCSize != 8 || Size != 4)
7545 return nullptr;
7546 if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg())
7547 return nullptr;
7548 Opcode = X86::MOV32rm;
7549 NarrowToMOV32rm = true;
7550 }
7551 // For stores, make sure the size of the object is equal to the size of
7552 // the store. If the object is larger, the extra bits would be garbage. If
7553 // the object is smaller we might overwrite another object or fault.
7554 if ((I->Flags & TB_FOLDED_STORE) && Size != RCSize)
7555 return nullptr;
7556 }
7557
7558 NewMI = IsTwoAddr ? fuseTwoAddrInst(MF, Opcode, MOs, InsertPt, MI, *this)
7559 : fuseInst(MF, Opcode, OpNum, MOs, InsertPt, MI, *this);
7560
7561 if (NarrowToMOV32rm) {
7562 // If this is the special case where we use a MOV32rm to load a 32-bit
7563 // value and zero-extend the top bits. Change the destination register
7564 // to a 32-bit one.
7565 Register DstReg = NewMI->getOperand(0).getReg();
7566 if (DstReg.isPhysical())
7567 NewMI->getOperand(0).setReg(RI.getSubReg(DstReg, X86::sub_32bit));
7568 else
7569 NewMI->getOperand(0).setSubReg(X86::sub_32bit);
7570 }
7571 return NewMI;
7572 }
7573
7574 if (AllowCommute) {
7575 // If the instruction and target operand are commutable, commute the
7576 // instruction and try again.
7577 unsigned CommuteOpIdx2 = commuteOperandsForFold(MI, OpNum);
7578 if (CommuteOpIdx2 == OpNum) {
7579 printFailMsgforFold(MI, OpNum);
7580 return nullptr;
7581 }
7582 // Attempt to fold with the commuted version of the instruction.
7583 NewMI = foldMemoryOperandImpl(MF, MI, CommuteOpIdx2, MOs, InsertPt, Size,
7584 Alignment, /*AllowCommute=*/false);
7585 if (NewMI)
7586 return NewMI;
7587 // Folding failed again - undo the commute before returning.
7588 commuteInstruction(MI, false, OpNum, CommuteOpIdx2);
7589 }
7590
7591 printFailMsgforFold(MI, OpNum);
7592 return nullptr;
7593}
7594
7597 MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS,
7598 VirtRegMap *VRM) const {
7599 // Check switch flag
7600 if (NoFusing)
7601 return nullptr;
7602
7603 // Avoid partial and undef register update stalls unless optimizing for size.
7604 if (!MF.getFunction().hasOptSize() &&
7605 (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/ true) ||
7607 return nullptr;
7608
7609 // Don't fold subreg spills, or reloads that use a high subreg.
7610 for (auto Op : Ops) {
7611 MachineOperand &MO = MI.getOperand(Op);
7612 auto SubReg = MO.getSubReg();
7613 // MOV32r0 is special b/c it's used to clear a 64-bit register too.
7614 // (See patterns for MOV32r0 in TD files).
7615 if (MI.getOpcode() == X86::MOV32r0 && SubReg == X86::sub_32bit)
7616 continue;
7617 if (SubReg && (MO.isDef() || SubReg == X86::sub_8bit_hi))
7618 return nullptr;
7619 }
7620
7621 const MachineFrameInfo &MFI = MF.getFrameInfo();
7622 unsigned Size = MFI.getObjectSize(FrameIndex);
7623 Align Alignment = MFI.getObjectAlign(FrameIndex);
7624 // If the function stack isn't realigned we don't want to fold instructions
7625 // that need increased alignment.
7626 if (!RI.hasStackRealignment(MF))
7627 Alignment =
7628 std::min(Alignment, Subtarget.getFrameLowering()->getStackAlign());
7629
7630 auto Impl = [&]() {
7631 return foldMemoryOperandImpl(MF, MI, Ops[0],
7632 MachineOperand::CreateFI(FrameIndex), InsertPt,
7633 Size, Alignment, /*AllowCommute=*/true);
7634 };
7635 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
7636 unsigned NewOpc = 0;
7637 unsigned RCSize = 0;
7638 unsigned Opc = MI.getOpcode();
7639 switch (Opc) {
7640 default:
7641 // NDD can be folded into RMW though its Op0 and Op1 are not tied.
7642 return (Subtarget.hasNDD() ? X86::getNonNDVariant(Opc) : 0U) ? Impl()
7643 : nullptr;
7644 case X86::TEST8rr:
7645 NewOpc = X86::CMP8ri;
7646 RCSize = 1;
7647 break;
7648 case X86::TEST16rr:
7649 NewOpc = X86::CMP16ri;
7650 RCSize = 2;
7651 break;
7652 case X86::TEST32rr:
7653 NewOpc = X86::CMP32ri;
7654 RCSize = 4;
7655 break;
7656 case X86::TEST64rr:
7657 NewOpc = X86::CMP64ri32;
7658 RCSize = 8;
7659 break;
7660 }
7661 // Check if it's safe to fold the load. If the size of the object is
7662 // narrower than the load width, then it's not.
7663 if (Size < RCSize)
7664 return nullptr;
7665 // Change to CMPXXri r, 0 first.
7666 MI.setDesc(get(NewOpc));
7667 MI.getOperand(1).ChangeToImmediate(0);
7668 } else if (Ops.size() != 1)
7669 return nullptr;
7670
7671 return Impl();
7672}
7673
7674/// Check if \p LoadMI is a partial register load that we can't fold into \p MI
7675/// because the latter uses contents that wouldn't be defined in the folded
7676/// version. For instance, this transformation isn't legal:
7677/// movss (%rdi), %xmm0
7678/// addps %xmm0, %xmm0
7679/// ->
7680/// addps (%rdi), %xmm0
7681///
7682/// But this one is:
7683/// movss (%rdi), %xmm0
7684/// addss %xmm0, %xmm0
7685/// ->
7686/// addss (%rdi), %xmm0
7687///
7689 const MachineInstr &UserMI,
7690 const MachineFunction &MF) {
7691 unsigned Opc = LoadMI.getOpcode();
7692 unsigned UserOpc = UserMI.getOpcode();
7694 const TargetRegisterClass *RC =
7695 MF.getRegInfo().getRegClass(LoadMI.getOperand(0).getReg());
7696 unsigned RegSize = TRI.getRegSizeInBits(*RC);
7697
7698 if ((Opc == X86::MOVSSrm || Opc == X86::VMOVSSrm || Opc == X86::VMOVSSZrm ||
7699 Opc == X86::MOVSSrm_alt || Opc == X86::VMOVSSrm_alt ||
7700 Opc == X86::VMOVSSZrm_alt) &&
7701 RegSize > 32) {
7702 // These instructions only load 32 bits, we can't fold them if the
7703 // destination register is wider than 32 bits (4 bytes), and its user
7704 // instruction isn't scalar (SS).
7705 switch (UserOpc) {
7706 case X86::CVTSS2SDrr_Int:
7707 case X86::VCVTSS2SDrr_Int:
7708 case X86::VCVTSS2SDZrr_Int:
7709 case X86::VCVTSS2SDZrrk_Int:
7710 case X86::VCVTSS2SDZrrkz_Int:
7711 case X86::CVTSS2SIrr_Int:
7712 case X86::CVTSS2SI64rr_Int:
7713 case X86::VCVTSS2SIrr_Int:
7714 case X86::VCVTSS2SI64rr_Int:
7715 case X86::VCVTSS2SIZrr_Int:
7716 case X86::VCVTSS2SI64Zrr_Int:
7717 case X86::CVTTSS2SIrr_Int:
7718 case X86::CVTTSS2SI64rr_Int:
7719 case X86::VCVTTSS2SIrr_Int:
7720 case X86::VCVTTSS2SI64rr_Int:
7721 case X86::VCVTTSS2SIZrr_Int:
7722 case X86::VCVTTSS2SI64Zrr_Int:
7723 case X86::VCVTSS2USIZrr_Int:
7724 case X86::VCVTSS2USI64Zrr_Int:
7725 case X86::VCVTTSS2USIZrr_Int:
7726 case X86::VCVTTSS2USI64Zrr_Int:
7727 case X86::RCPSSr_Int:
7728 case X86::VRCPSSr_Int:
7729 case X86::RSQRTSSr_Int:
7730 case X86::VRSQRTSSr_Int:
7731 case X86::ROUNDSSri_Int:
7732 case X86::VROUNDSSri_Int:
7733 case X86::COMISSrr_Int:
7734 case X86::VCOMISSrr_Int:
7735 case X86::VCOMISSZrr_Int:
7736 case X86::UCOMISSrr_Int:
7737 case X86::VUCOMISSrr_Int:
7738 case X86::VUCOMISSZrr_Int:
7739 case X86::ADDSSrr_Int:
7740 case X86::VADDSSrr_Int:
7741 case X86::VADDSSZrr_Int:
7742 case X86::CMPSSrri_Int:
7743 case X86::VCMPSSrri_Int:
7744 case X86::VCMPSSZrri_Int:
7745 case X86::DIVSSrr_Int:
7746 case X86::VDIVSSrr_Int:
7747 case X86::VDIVSSZrr_Int:
7748 case X86::MAXSSrr_Int:
7749 case X86::VMAXSSrr_Int:
7750 case X86::VMAXSSZrr_Int:
7751 case X86::MINSSrr_Int:
7752 case X86::VMINSSrr_Int:
7753 case X86::VMINSSZrr_Int:
7754 case X86::MULSSrr_Int:
7755 case X86::VMULSSrr_Int:
7756 case X86::VMULSSZrr_Int:
7757 case X86::SQRTSSr_Int:
7758 case X86::VSQRTSSr_Int:
7759 case X86::VSQRTSSZr_Int:
7760 case X86::SUBSSrr_Int:
7761 case X86::VSUBSSrr_Int:
7762 case X86::VSUBSSZrr_Int:
7763 case X86::VADDSSZrrk_Int:
7764 case X86::VADDSSZrrkz_Int:
7765 case X86::VCMPSSZrrik_Int:
7766 case X86::VDIVSSZrrk_Int:
7767 case X86::VDIVSSZrrkz_Int:
7768 case X86::VMAXSSZrrk_Int:
7769 case X86::VMAXSSZrrkz_Int:
7770 case X86::VMINSSZrrk_Int:
7771 case X86::VMINSSZrrkz_Int:
7772 case X86::VMULSSZrrk_Int:
7773 case X86::VMULSSZrrkz_Int:
7774 case X86::VSQRTSSZrk_Int:
7775 case X86::VSQRTSSZrkz_Int:
7776 case X86::VSUBSSZrrk_Int:
7777 case X86::VSUBSSZrrkz_Int:
7778 case X86::VFMADDSS4rr_Int:
7779 case X86::VFNMADDSS4rr_Int:
7780 case X86::VFMSUBSS4rr_Int:
7781 case X86::VFNMSUBSS4rr_Int:
7782 case X86::VFMADD132SSr_Int:
7783 case X86::VFNMADD132SSr_Int:
7784 case X86::VFMADD213SSr_Int:
7785 case X86::VFNMADD213SSr_Int:
7786 case X86::VFMADD231SSr_Int:
7787 case X86::VFNMADD231SSr_Int:
7788 case X86::VFMSUB132SSr_Int:
7789 case X86::VFNMSUB132SSr_Int:
7790 case X86::VFMSUB213SSr_Int:
7791 case X86::VFNMSUB213SSr_Int:
7792 case X86::VFMSUB231SSr_Int:
7793 case X86::VFNMSUB231SSr_Int:
7794 case X86::VFMADD132SSZr_Int:
7795 case X86::VFNMADD132SSZr_Int:
7796 case X86::VFMADD213SSZr_Int:
7797 case X86::VFNMADD213SSZr_Int:
7798 case X86::VFMADD231SSZr_Int:
7799 case X86::VFNMADD231SSZr_Int:
7800 case X86::VFMSUB132SSZr_Int:
7801 case X86::VFNMSUB132SSZr_Int:
7802 case X86::VFMSUB213SSZr_Int:
7803 case X86::VFNMSUB213SSZr_Int:
7804 case X86::VFMSUB231SSZr_Int:
7805 case X86::VFNMSUB231SSZr_Int:
7806 case X86::VFMADD132SSZrk_Int:
7807 case X86::VFNMADD132SSZrk_Int:
7808 case X86::VFMADD213SSZrk_Int:
7809 case X86::VFNMADD213SSZrk_Int:
7810 case X86::VFMADD231SSZrk_Int:
7811 case X86::VFNMADD231SSZrk_Int:
7812 case X86::VFMSUB132SSZrk_Int:
7813 case X86::VFNMSUB132SSZrk_Int:
7814 case X86::VFMSUB213SSZrk_Int:
7815 case X86::VFNMSUB213SSZrk_Int:
7816 case X86::VFMSUB231SSZrk_Int:
7817 case X86::VFNMSUB231SSZrk_Int:
7818 case X86::VFMADD132SSZrkz_Int:
7819 case X86::VFNMADD132SSZrkz_Int:
7820 case X86::VFMADD213SSZrkz_Int:
7821 case X86::VFNMADD213SSZrkz_Int:
7822 case X86::VFMADD231SSZrkz_Int:
7823 case X86::VFNMADD231SSZrkz_Int:
7824 case X86::VFMSUB132SSZrkz_Int:
7825 case X86::VFNMSUB132SSZrkz_Int:
7826 case X86::VFMSUB213SSZrkz_Int:
7827 case X86::VFNMSUB213SSZrkz_Int:
7828 case X86::VFMSUB231SSZrkz_Int:
7829 case X86::VFNMSUB231SSZrkz_Int:
7830 case X86::VFIXUPIMMSSZrri:
7831 case X86::VFIXUPIMMSSZrrik:
7832 case X86::VFIXUPIMMSSZrrikz:
7833 case X86::VFPCLASSSSZri:
7834 case X86::VFPCLASSSSZrik:
7835 case X86::VGETEXPSSZr:
7836 case X86::VGETEXPSSZrk:
7837 case X86::VGETEXPSSZrkz:
7838 case X86::VGETMANTSSZrri:
7839 case X86::VGETMANTSSZrrik:
7840 case X86::VGETMANTSSZrrikz:
7841 case X86::VRANGESSZrri:
7842 case X86::VRANGESSZrrik:
7843 case X86::VRANGESSZrrikz:
7844 case X86::VRCP14SSZrr:
7845 case X86::VRCP14SSZrrk:
7846 case X86::VRCP14SSZrrkz:
7847 case X86::VRCP28SSZr:
7848 case X86::VRCP28SSZrk:
7849 case X86::VRCP28SSZrkz:
7850 case X86::VREDUCESSZrri:
7851 case X86::VREDUCESSZrrik:
7852 case X86::VREDUCESSZrrikz:
7853 case X86::VRNDSCALESSZrri_Int:
7854 case X86::VRNDSCALESSZrrik_Int:
7855 case X86::VRNDSCALESSZrrikz_Int:
7856 case X86::VRSQRT14SSZrr:
7857 case X86::VRSQRT14SSZrrk:
7858 case X86::VRSQRT14SSZrrkz:
7859 case X86::VRSQRT28SSZr:
7860 case X86::VRSQRT28SSZrk:
7861 case X86::VRSQRT28SSZrkz:
7862 case X86::VSCALEFSSZrr:
7863 case X86::VSCALEFSSZrrk:
7864 case X86::VSCALEFSSZrrkz:
7865 return false;
7866 default:
7867 return true;
7868 }
7869 }
7870
7871 if ((Opc == X86::MOVSDrm || Opc == X86::VMOVSDrm || Opc == X86::VMOVSDZrm ||
7872 Opc == X86::MOVSDrm_alt || Opc == X86::VMOVSDrm_alt ||
7873 Opc == X86::VMOVSDZrm_alt) &&
7874 RegSize > 64) {
7875 // These instructions only load 64 bits, we can't fold them if the
7876 // destination register is wider than 64 bits (8 bytes), and its user
7877 // instruction isn't scalar (SD).
7878 switch (UserOpc) {
7879 case X86::CVTSD2SSrr_Int:
7880 case X86::VCVTSD2SSrr_Int:
7881 case X86::VCVTSD2SSZrr_Int:
7882 case X86::VCVTSD2SSZrrk_Int:
7883 case X86::VCVTSD2SSZrrkz_Int:
7884 case X86::CVTSD2SIrr_Int:
7885 case X86::CVTSD2SI64rr_Int:
7886 case X86::VCVTSD2SIrr_Int:
7887 case X86::VCVTSD2SI64rr_Int:
7888 case X86::VCVTSD2SIZrr_Int:
7889 case X86::VCVTSD2SI64Zrr_Int:
7890 case X86::CVTTSD2SIrr_Int:
7891 case X86::CVTTSD2SI64rr_Int:
7892 case X86::VCVTTSD2SIrr_Int:
7893 case X86::VCVTTSD2SI64rr_Int:
7894 case X86::VCVTTSD2SIZrr_Int:
7895 case X86::VCVTTSD2SI64Zrr_Int:
7896 case X86::VCVTSD2USIZrr_Int:
7897 case X86::VCVTSD2USI64Zrr_Int:
7898 case X86::VCVTTSD2USIZrr_Int:
7899 case X86::VCVTTSD2USI64Zrr_Int:
7900 case X86::ROUNDSDri_Int:
7901 case X86::VROUNDSDri_Int:
7902 case X86::COMISDrr_Int:
7903 case X86::VCOMISDrr_Int:
7904 case X86::VCOMISDZrr_Int:
7905 case X86::UCOMISDrr_Int:
7906 case X86::VUCOMISDrr_Int:
7907 case X86::VUCOMISDZrr_Int:
7908 case X86::ADDSDrr_Int:
7909 case X86::VADDSDrr_Int:
7910 case X86::VADDSDZrr_Int:
7911 case X86::CMPSDrri_Int:
7912 case X86::VCMPSDrri_Int:
7913 case X86::VCMPSDZrri_Int:
7914 case X86::DIVSDrr_Int:
7915 case X86::VDIVSDrr_Int:
7916 case X86::VDIVSDZrr_Int:
7917 case X86::MAXSDrr_Int:
7918 case X86::VMAXSDrr_Int:
7919 case X86::VMAXSDZrr_Int:
7920 case X86::MINSDrr_Int:
7921 case X86::VMINSDrr_Int:
7922 case X86::VMINSDZrr_Int:
7923 case X86::MULSDrr_Int:
7924 case X86::VMULSDrr_Int:
7925 case X86::VMULSDZrr_Int:
7926 case X86::SQRTSDr_Int:
7927 case X86::VSQRTSDr_Int:
7928 case X86::VSQRTSDZr_Int:
7929 case X86::SUBSDrr_Int:
7930 case X86::VSUBSDrr_Int:
7931 case X86::VSUBSDZrr_Int:
7932 case X86::VADDSDZrrk_Int:
7933 case X86::VADDSDZrrkz_Int:
7934 case X86::VCMPSDZrrik_Int:
7935 case X86::VDIVSDZrrk_Int:
7936 case X86::VDIVSDZrrkz_Int:
7937 case X86::VMAXSDZrrk_Int:
7938 case X86::VMAXSDZrrkz_Int:
7939 case X86::VMINSDZrrk_Int:
7940 case X86::VMINSDZrrkz_Int:
7941 case X86::VMULSDZrrk_Int:
7942 case X86::VMULSDZrrkz_Int:
7943 case X86::VSQRTSDZrk_Int:
7944 case X86::VSQRTSDZrkz_Int:
7945 case X86::VSUBSDZrrk_Int:
7946 case X86::VSUBSDZrrkz_Int:
7947 case X86::VFMADDSD4rr_Int:
7948 case X86::VFNMADDSD4rr_Int:
7949 case X86::VFMSUBSD4rr_Int:
7950 case X86::VFNMSUBSD4rr_Int:
7951 case X86::VFMADD132SDr_Int:
7952 case X86::VFNMADD132SDr_Int:
7953 case X86::VFMADD213SDr_Int:
7954 case X86::VFNMADD213SDr_Int:
7955 case X86::VFMADD231SDr_Int:
7956 case X86::VFNMADD231SDr_Int:
7957 case X86::VFMSUB132SDr_Int:
7958 case X86::VFNMSUB132SDr_Int:
7959 case X86::VFMSUB213SDr_Int:
7960 case X86::VFNMSUB213SDr_Int:
7961 case X86::VFMSUB231SDr_Int:
7962 case X86::VFNMSUB231SDr_Int:
7963 case X86::VFMADD132SDZr_Int:
7964 case X86::VFNMADD132SDZr_Int:
7965 case X86::VFMADD213SDZr_Int:
7966 case X86::VFNMADD213SDZr_Int:
7967 case X86::VFMADD231SDZr_Int:
7968 case X86::VFNMADD231SDZr_Int:
7969 case X86::VFMSUB132SDZr_Int:
7970 case X86::VFNMSUB132SDZr_Int:
7971 case X86::VFMSUB213SDZr_Int:
7972 case X86::VFNMSUB213SDZr_Int:
7973 case X86::VFMSUB231SDZr_Int:
7974 case X86::VFNMSUB231SDZr_Int:
7975 case X86::VFMADD132SDZrk_Int:
7976 case X86::VFNMADD132SDZrk_Int:
7977 case X86::VFMADD213SDZrk_Int:
7978 case X86::VFNMADD213SDZrk_Int:
7979 case X86::VFMADD231SDZrk_Int:
7980 case X86::VFNMADD231SDZrk_Int:
7981 case X86::VFMSUB132SDZrk_Int:
7982 case X86::VFNMSUB132SDZrk_Int:
7983 case X86::VFMSUB213SDZrk_Int:
7984 case X86::VFNMSUB213SDZrk_Int:
7985 case X86::VFMSUB231SDZrk_Int:
7986 case X86::VFNMSUB231SDZrk_Int:
7987 case X86::VFMADD132SDZrkz_Int:
7988 case X86::VFNMADD132SDZrkz_Int:
7989 case X86::VFMADD213SDZrkz_Int:
7990 case X86::VFNMADD213SDZrkz_Int:
7991 case X86::VFMADD231SDZrkz_Int:
7992 case X86::VFNMADD231SDZrkz_Int:
7993 case X86::VFMSUB132SDZrkz_Int:
7994 case X86::VFNMSUB132SDZrkz_Int:
7995 case X86::VFMSUB213SDZrkz_Int:
7996 case X86::VFNMSUB213SDZrkz_Int:
7997 case X86::VFMSUB231SDZrkz_Int:
7998 case X86::VFNMSUB231SDZrkz_Int:
7999 case X86::VFIXUPIMMSDZrri:
8000 case X86::VFIXUPIMMSDZrrik:
8001 case X86::VFIXUPIMMSDZrrikz:
8002 case X86::VFPCLASSSDZri:
8003 case X86::VFPCLASSSDZrik:
8004 case X86::VGETEXPSDZr:
8005 case X86::VGETEXPSDZrk:
8006 case X86::VGETEXPSDZrkz:
8007 case X86::VGETMANTSDZrri:
8008 case X86::VGETMANTSDZrrik:
8009 case X86::VGETMANTSDZrrikz:
8010 case X86::VRANGESDZrri:
8011 case X86::VRANGESDZrrik:
8012 case X86::VRANGESDZrrikz:
8013 case X86::VRCP14SDZrr:
8014 case X86::VRCP14SDZrrk:
8015 case X86::VRCP14SDZrrkz:
8016 case X86::VRCP28SDZr:
8017 case X86::VRCP28SDZrk:
8018 case X86::VRCP28SDZrkz:
8019 case X86::VREDUCESDZrri:
8020 case X86::VREDUCESDZrrik:
8021 case X86::VREDUCESDZrrikz:
8022 case X86::VRNDSCALESDZrri_Int:
8023 case X86::VRNDSCALESDZrrik_Int:
8024 case X86::VRNDSCALESDZrrikz_Int:
8025 case X86::VRSQRT14SDZrr:
8026 case X86::VRSQRT14SDZrrk:
8027 case X86::VRSQRT14SDZrrkz:
8028 case X86::VRSQRT28SDZr:
8029 case X86::VRSQRT28SDZrk:
8030 case X86::VRSQRT28SDZrkz:
8031 case X86::VSCALEFSDZrr:
8032 case X86::VSCALEFSDZrrk:
8033 case X86::VSCALEFSDZrrkz:
8034 return false;
8035 default:
8036 return true;
8037 }
8038 }
8039
8040 if ((Opc == X86::VMOVSHZrm || Opc == X86::VMOVSHZrm_alt) && RegSize > 16) {
8041 // These instructions only load 16 bits, we can't fold them if the
8042 // destination register is wider than 16 bits (2 bytes), and its user
8043 // instruction isn't scalar (SH).
8044 switch (UserOpc) {
8045 case X86::VADDSHZrr_Int:
8046 case X86::VCMPSHZrri_Int:
8047 case X86::VDIVSHZrr_Int:
8048 case X86::VMAXSHZrr_Int:
8049 case X86::VMINSHZrr_Int:
8050 case X86::VMULSHZrr_Int:
8051 case X86::VSUBSHZrr_Int:
8052 case X86::VADDSHZrrk_Int:
8053 case X86::VADDSHZrrkz_Int:
8054 case X86::VCMPSHZrrik_Int:
8055 case X86::VDIVSHZrrk_Int:
8056 case X86::VDIVSHZrrkz_Int:
8057 case X86::VMAXSHZrrk_Int:
8058 case X86::VMAXSHZrrkz_Int:
8059 case X86::VMINSHZrrk_Int:
8060 case X86::VMINSHZrrkz_Int:
8061 case X86::VMULSHZrrk_Int:
8062 case X86::VMULSHZrrkz_Int:
8063 case X86::VSUBSHZrrk_Int:
8064 case X86::VSUBSHZrrkz_Int:
8065 case X86::VFMADD132SHZr_Int:
8066 case X86::VFNMADD132SHZr_Int:
8067 case X86::VFMADD213SHZr_Int:
8068 case X86::VFNMADD213SHZr_Int:
8069 case X86::VFMADD231SHZr_Int:
8070 case X86::VFNMADD231SHZr_Int:
8071 case X86::VFMSUB132SHZr_Int:
8072 case X86::VFNMSUB132SHZr_Int:
8073 case X86::VFMSUB213SHZr_Int:
8074 case X86::VFNMSUB213SHZr_Int:
8075 case X86::VFMSUB231SHZr_Int:
8076 case X86::VFNMSUB231SHZr_Int:
8077 case X86::VFMADD132SHZrk_Int:
8078 case X86::VFNMADD132SHZrk_Int:
8079 case X86::VFMADD213SHZrk_Int:
8080 case X86::VFNMADD213SHZrk_Int:
8081 case X86::VFMADD231SHZrk_Int:
8082 case X86::VFNMADD231SHZrk_Int:
8083 case X86::VFMSUB132SHZrk_Int:
8084 case X86::VFNMSUB132SHZrk_Int:
8085 case X86::VFMSUB213SHZrk_Int:
8086 case X86::VFNMSUB213SHZrk_Int:
8087 case X86::VFMSUB231SHZrk_Int:
8088 case X86::VFNMSUB231SHZrk_Int:
8089 case X86::VFMADD132SHZrkz_Int:
8090 case X86::VFNMADD132SHZrkz_Int:
8091 case X86::VFMADD213SHZrkz_Int:
8092 case X86::VFNMADD213SHZrkz_Int:
8093 case X86::VFMADD231SHZrkz_Int:
8094 case X86::VFNMADD231SHZrkz_Int:
8095 case X86::VFMSUB132SHZrkz_Int:
8096 case X86::VFNMSUB132SHZrkz_Int:
8097 case X86::VFMSUB213SHZrkz_Int:
8098 case X86::VFNMSUB213SHZrkz_Int:
8099 case X86::VFMSUB231SHZrkz_Int:
8100 case X86::VFNMSUB231SHZrkz_Int:
8101 return false;
8102 default:
8103 return true;
8104 }
8105 }
8106
8107 return false;
8108}
8109
8112 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
8113 LiveIntervals *LIS) const {
8114
8115 // TODO: Support the case where LoadMI loads a wide register, but MI
8116 // only uses a subreg.
8117 for (auto Op : Ops) {
8118 if (MI.getOperand(Op).getSubReg())
8119 return nullptr;
8120 }
8121
8122 // If loading from a FrameIndex, fold directly from the FrameIndex.
8123 unsigned NumOps = LoadMI.getDesc().getNumOperands();
8124 int FrameIndex;
8125 if (isLoadFromStackSlot(LoadMI, FrameIndex)) {
8126 if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF))
8127 return nullptr;
8128 return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex, LIS);
8129 }
8130
8131 // Check switch flag
8132 if (NoFusing)
8133 return nullptr;
8134
8135 // Avoid partial and undef register update stalls unless optimizing for size.
8136 if (!MF.getFunction().hasOptSize() &&
8137 (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/ true) ||
8139 return nullptr;
8140
8141 // Do not fold a NDD instruction and a memory instruction with relocation to
8142 // avoid emit APX relocation when the flag is disabled for backward
8143 // compatibility.
8144 uint64_t TSFlags = MI.getDesc().TSFlags;
8146 X86II::hasNewDataDest(TSFlags))
8147 return nullptr;
8148
8149 // Determine the alignment of the load.
8150 Align Alignment;
8151 unsigned LoadOpc = LoadMI.getOpcode();
8152 if (LoadMI.hasOneMemOperand())
8153 Alignment = (*LoadMI.memoperands_begin())->getAlign();
8154 else
8155 switch (LoadOpc) {
8156 case X86::AVX512_512_SET0:
8157 case X86::AVX512_512_SETALLONES:
8158 Alignment = Align(64);
8159 break;
8160 case X86::AVX2_SETALLONES:
8161 case X86::AVX1_SETALLONES:
8162 case X86::AVX_SET0:
8163 case X86::AVX512_256_SET0:
8164 Alignment = Align(32);
8165 break;
8166 case X86::V_SET0:
8167 case X86::V_SETALLONES:
8168 case X86::AVX512_128_SET0:
8169 case X86::FsFLD0F128:
8170 case X86::AVX512_FsFLD0F128:
8171 Alignment = Align(16);
8172 break;
8173 case X86::MMX_SET0:
8174 case X86::FsFLD0SD:
8175 case X86::AVX512_FsFLD0SD:
8176 Alignment = Align(8);
8177 break;
8178 case X86::FsFLD0SS:
8179 case X86::AVX512_FsFLD0SS:
8180 Alignment = Align(4);
8181 break;
8182 case X86::FsFLD0SH:
8183 case X86::AVX512_FsFLD0SH:
8184 Alignment = Align(2);
8185 break;
8186 default:
8187 return nullptr;
8188 }
8189 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
8190 unsigned NewOpc = 0;
8191 switch (MI.getOpcode()) {
8192 default:
8193 return nullptr;
8194 case X86::TEST8rr:
8195 NewOpc = X86::CMP8ri;
8196 break;
8197 case X86::TEST16rr:
8198 NewOpc = X86::CMP16ri;
8199 break;
8200 case X86::TEST32rr:
8201 NewOpc = X86::CMP32ri;
8202 break;
8203 case X86::TEST64rr:
8204 NewOpc = X86::CMP64ri32;
8205 break;
8206 }
8207 // Change to CMPXXri r, 0 first.
8208 MI.setDesc(get(NewOpc));
8209 MI.getOperand(1).ChangeToImmediate(0);
8210 } else if (Ops.size() != 1)
8211 return nullptr;
8212
8213 // Make sure the subregisters match.
8214 // Otherwise we risk changing the size of the load.
8215 if (LoadMI.getOperand(0).getSubReg() != MI.getOperand(Ops[0]).getSubReg())
8216 return nullptr;
8217
8219 switch (LoadOpc) {
8220 case X86::MMX_SET0:
8221 case X86::V_SET0:
8222 case X86::V_SETALLONES:
8223 case X86::AVX2_SETALLONES:
8224 case X86::AVX1_SETALLONES:
8225 case X86::AVX_SET0:
8226 case X86::AVX512_128_SET0:
8227 case X86::AVX512_256_SET0:
8228 case X86::AVX512_512_SET0:
8229 case X86::AVX512_512_SETALLONES:
8230 case X86::FsFLD0SH:
8231 case X86::AVX512_FsFLD0SH:
8232 case X86::FsFLD0SD:
8233 case X86::AVX512_FsFLD0SD:
8234 case X86::FsFLD0SS:
8235 case X86::AVX512_FsFLD0SS:
8236 case X86::FsFLD0F128:
8237 case X86::AVX512_FsFLD0F128: {
8238 // Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure.
8239 // Create a constant-pool entry and operands to load from it.
8240
8241 // Large code model can't fold loads this way.
8243 return nullptr;
8244
8245 // x86-32 PIC requires a PIC base register for constant pools.
8246 unsigned PICBase = 0;
8247 // Since we're using Small or Kernel code model, we can always use
8248 // RIP-relative addressing for a smaller encoding.
8249 if (Subtarget.is64Bit()) {
8250 PICBase = X86::RIP;
8251 } else if (MF.getTarget().isPositionIndependent()) {
8252 // FIXME: PICBase = getGlobalBaseReg(&MF);
8253 // This doesn't work for several reasons.
8254 // 1. GlobalBaseReg may have been spilled.
8255 // 2. It may not be live at MI.
8256 return nullptr;
8257 }
8258
8259 // Create a constant-pool entry.
8261 Type *Ty;
8262 bool IsAllOnes = false;
8263 switch (LoadOpc) {
8264 case X86::FsFLD0SS:
8265 case X86::AVX512_FsFLD0SS:
8267 break;
8268 case X86::FsFLD0SD:
8269 case X86::AVX512_FsFLD0SD:
8271 break;
8272 case X86::FsFLD0F128:
8273 case X86::AVX512_FsFLD0F128:
8275 break;
8276 case X86::FsFLD0SH:
8277 case X86::AVX512_FsFLD0SH:
8279 break;
8280 case X86::AVX512_512_SETALLONES:
8281 IsAllOnes = true;
8282 [[fallthrough]];
8283 case X86::AVX512_512_SET0:
8285 16);
8286 break;
8287 case X86::AVX1_SETALLONES:
8288 case X86::AVX2_SETALLONES:
8289 IsAllOnes = true;
8290 [[fallthrough]];
8291 case X86::AVX512_256_SET0:
8292 case X86::AVX_SET0:
8294 8);
8295
8296 break;
8297 case X86::MMX_SET0:
8299 2);
8300 break;
8301 case X86::V_SETALLONES:
8302 IsAllOnes = true;
8303 [[fallthrough]];
8304 case X86::V_SET0:
8305 case X86::AVX512_128_SET0:
8307 4);
8308 break;
8309 }
8310
8311 const Constant *C =
8313 unsigned CPI = MCP.getConstantPoolIndex(C, Alignment);
8314
8315 // Create operands to load from the constant pool entry.
8316 MOs.push_back(MachineOperand::CreateReg(PICBase, false));
8318 MOs.push_back(MachineOperand::CreateReg(0, false));
8320 MOs.push_back(MachineOperand::CreateReg(0, false));
8321 break;
8322 }
8323 case X86::VPBROADCASTBZ128rm:
8324 case X86::VPBROADCASTBZ256rm:
8325 case X86::VPBROADCASTBZrm:
8326 case X86::VBROADCASTF32X2Z256rm:
8327 case X86::VBROADCASTF32X2Zrm:
8328 case X86::VBROADCASTI32X2Z128rm:
8329 case X86::VBROADCASTI32X2Z256rm:
8330 case X86::VBROADCASTI32X2Zrm:
8331 // No instructions currently fuse with 8bits or 32bits x 2.
8332 return nullptr;
8333
8334#define FOLD_BROADCAST(SIZE) \
8335 MOs.append(LoadMI.operands_begin() + NumOps - X86::AddrNumOperands, \
8336 LoadMI.operands_begin() + NumOps); \
8337 return foldMemoryBroadcast(MF, MI, Ops[0], MOs, InsertPt, /*Size=*/SIZE, \
8338 /*AllowCommute=*/true);
8339 case X86::VPBROADCASTWZ128rm:
8340 case X86::VPBROADCASTWZ256rm:
8341 case X86::VPBROADCASTWZrm:
8342 FOLD_BROADCAST(16);
8343 case X86::VPBROADCASTDZ128rm:
8344 case X86::VPBROADCASTDZ256rm:
8345 case X86::VPBROADCASTDZrm:
8346 case X86::VBROADCASTSSZ128rm:
8347 case X86::VBROADCASTSSZ256rm:
8348 case X86::VBROADCASTSSZrm:
8349 FOLD_BROADCAST(32);
8350 case X86::VPBROADCASTQZ128rm:
8351 case X86::VPBROADCASTQZ256rm:
8352 case X86::VPBROADCASTQZrm:
8353 case X86::VBROADCASTSDZ256rm:
8354 case X86::VBROADCASTSDZrm:
8355 FOLD_BROADCAST(64);
8356 default: {
8357 if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF))
8358 return nullptr;
8359
8360 // Folding a normal load. Just copy the load's address operands.
8362 LoadMI.operands_begin() + NumOps);
8363 break;
8364 }
8365 }
8366 return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, InsertPt,
8367 /*Size=*/0, Alignment, /*AllowCommute=*/true);
8368}
8369
8371X86InstrInfo::foldMemoryBroadcast(MachineFunction &MF, MachineInstr &MI,
8372 unsigned OpNum, ArrayRef<MachineOperand> MOs,
8374 unsigned BitsSize, bool AllowCommute) const {
8375
8376 if (auto *I = lookupBroadcastFoldTable(MI.getOpcode(), OpNum))
8377 return matchBroadcastSize(*I, BitsSize)
8378 ? fuseInst(MF, I->DstOp, OpNum, MOs, InsertPt, MI, *this)
8379 : nullptr;
8380
8381 if (AllowCommute) {
8382 // If the instruction and target operand are commutable, commute the
8383 // instruction and try again.
8384 unsigned CommuteOpIdx2 = commuteOperandsForFold(MI, OpNum);
8385 if (CommuteOpIdx2 == OpNum) {
8386 printFailMsgforFold(MI, OpNum);
8387 return nullptr;
8388 }
8389 MachineInstr *NewMI =
8390 foldMemoryBroadcast(MF, MI, CommuteOpIdx2, MOs, InsertPt, BitsSize,
8391 /*AllowCommute=*/false);
8392 if (NewMI)
8393 return NewMI;
8394 // Folding failed again - undo the commute before returning.
8395 commuteInstruction(MI, false, OpNum, CommuteOpIdx2);
8396 }
8397
8398 printFailMsgforFold(MI, OpNum);
8399 return nullptr;
8400}
8401
8405
8406 for (MachineMemOperand *MMO : MMOs) {
8407 if (!MMO->isLoad())
8408 continue;
8409
8410 if (!MMO->isStore()) {
8411 // Reuse the MMO.
8412 LoadMMOs.push_back(MMO);
8413 } else {
8414 // Clone the MMO and unset the store flag.
8415 LoadMMOs.push_back(MF.getMachineMemOperand(
8416 MMO, MMO->getFlags() & ~MachineMemOperand::MOStore));
8417 }
8418 }
8419
8420 return LoadMMOs;
8421}
8422
8426
8427 for (MachineMemOperand *MMO : MMOs) {
8428 if (!MMO->isStore())
8429 continue;
8430
8431 if (!MMO->isLoad()) {
8432 // Reuse the MMO.
8433 StoreMMOs.push_back(MMO);
8434 } else {
8435 // Clone the MMO and unset the load flag.
8436 StoreMMOs.push_back(MF.getMachineMemOperand(
8437 MMO, MMO->getFlags() & ~MachineMemOperand::MOLoad));
8438 }
8439 }
8440
8441 return StoreMMOs;
8442}
8443
8445 const TargetRegisterClass *RC,
8446 const X86Subtarget &STI) {
8447 assert(STI.hasAVX512() && "Expected at least AVX512!");
8448 unsigned SpillSize = STI.getRegisterInfo()->getSpillSize(*RC);
8449 assert((SpillSize == 64 || STI.hasVLX()) &&
8450 "Can't broadcast less than 64 bytes without AVX512VL!");
8451
8452#define CASE_BCAST_TYPE_OPC(TYPE, OP16, OP32, OP64) \
8453 case TYPE: \
8454 switch (SpillSize) { \
8455 default: \
8456 llvm_unreachable("Unknown spill size"); \
8457 case 16: \
8458 return X86::OP16; \
8459 case 32: \
8460 return X86::OP32; \
8461 case 64: \
8462 return X86::OP64; \
8463 } \
8464 break;
8465
8466 switch (I->Flags & TB_BCAST_MASK) {
8467 default:
8468 llvm_unreachable("Unexpected broadcast type!");
8469 CASE_BCAST_TYPE_OPC(TB_BCAST_W, VPBROADCASTWZ128rm, VPBROADCASTWZ256rm,
8470 VPBROADCASTWZrm)
8471 CASE_BCAST_TYPE_OPC(TB_BCAST_D, VPBROADCASTDZ128rm, VPBROADCASTDZ256rm,
8472 VPBROADCASTDZrm)
8473 CASE_BCAST_TYPE_OPC(TB_BCAST_Q, VPBROADCASTQZ128rm, VPBROADCASTQZ256rm,
8474 VPBROADCASTQZrm)
8475 CASE_BCAST_TYPE_OPC(TB_BCAST_SH, VPBROADCASTWZ128rm, VPBROADCASTWZ256rm,
8476 VPBROADCASTWZrm)
8477 CASE_BCAST_TYPE_OPC(TB_BCAST_SS, VBROADCASTSSZ128rm, VBROADCASTSSZ256rm,
8478 VBROADCASTSSZrm)
8479 CASE_BCAST_TYPE_OPC(TB_BCAST_SD, VMOVDDUPZ128rm, VBROADCASTSDZ256rm,
8480 VBROADCASTSDZrm)
8481 }
8482}
8483
8485 MachineFunction &MF, MachineInstr &MI, Register Reg, bool UnfoldLoad,
8486 bool UnfoldStore, SmallVectorImpl<MachineInstr *> &NewMIs) const {
8487 const X86FoldTableEntry *I = lookupUnfoldTable(MI.getOpcode());
8488 if (I == nullptr)
8489 return false;
8490 unsigned Opc = I->DstOp;
8491 unsigned Index = I->Flags & TB_INDEX_MASK;
8492 bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
8493 bool FoldedStore = I->Flags & TB_FOLDED_STORE;
8494 if (UnfoldLoad && !FoldedLoad)
8495 return false;
8496 UnfoldLoad &= FoldedLoad;
8497 if (UnfoldStore && !FoldedStore)
8498 return false;
8499 UnfoldStore &= FoldedStore;
8500
8501 const MCInstrDesc &MCID = get(Opc);
8502
8503 const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI);
8505 // TODO: Check if 32-byte or greater accesses are slow too?
8506 if (!MI.hasOneMemOperand() && RC == &X86::VR128RegClass &&
8507 Subtarget.isUnalignedMem16Slow())
8508 // Without memoperands, loadRegFromAddr and storeRegToStackSlot will
8509 // conservatively assume the address is unaligned. That's bad for
8510 // performance.
8511 return false;
8516 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
8517 MachineOperand &Op = MI.getOperand(i);
8518 if (i >= Index && i < Index + X86::AddrNumOperands)
8519 AddrOps.push_back(Op);
8520 else if (Op.isReg() && Op.isImplicit())
8521 ImpOps.push_back(Op);
8522 else if (i < Index)
8523 BeforeOps.push_back(Op);
8524 else if (i > Index)
8525 AfterOps.push_back(Op);
8526 }
8527
8528 // Emit the load or broadcast instruction.
8529 if (UnfoldLoad) {
8530 auto MMOs = extractLoadMMOs(MI.memoperands(), MF);
8531
8532 unsigned Opc;
8533 if (I->Flags & TB_BCAST_MASK) {
8534 Opc = getBroadcastOpcode(I, RC, Subtarget);
8535 } else {
8536 unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
8537 bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
8538 Opc = getLoadRegOpcode(Reg, RC, isAligned, Subtarget);
8539 }
8540
8541 DebugLoc DL;
8542 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), Reg);
8543 for (const MachineOperand &AddrOp : AddrOps)
8544 MIB.add(AddrOp);
8545 MIB.setMemRefs(MMOs);
8546 NewMIs.push_back(MIB);
8547
8548 if (UnfoldStore) {
8549 // Address operands cannot be marked isKill.
8550 for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) {
8551 MachineOperand &MO = NewMIs[0]->getOperand(i);
8552 if (MO.isReg())
8553 MO.setIsKill(false);
8554 }
8555 }
8556 }
8557
8558 // Emit the data processing instruction.
8559 MachineInstr *DataMI = MF.CreateMachineInstr(MCID, MI.getDebugLoc(), true);
8560 MachineInstrBuilder MIB(MF, DataMI);
8561
8562 if (FoldedStore)
8563 MIB.addReg(Reg, RegState::Define);
8564 for (MachineOperand &BeforeOp : BeforeOps)
8565 MIB.add(BeforeOp);
8566 if (FoldedLoad)
8567 MIB.addReg(Reg);
8568 for (MachineOperand &AfterOp : AfterOps)
8569 MIB.add(AfterOp);
8570 for (MachineOperand &ImpOp : ImpOps) {
8571 MIB.addReg(ImpOp.getReg(), getDefRegState(ImpOp.isDef()) |
8573 getKillRegState(ImpOp.isKill()) |
8574 getDeadRegState(ImpOp.isDead()) |
8575 getUndefRegState(ImpOp.isUndef()));
8576 }
8577 // Change CMP32ri r, 0 back to TEST32rr r, r, etc.
8578 switch (DataMI->getOpcode()) {
8579 default:
8580 break;
8581 case X86::CMP64ri32:
8582 case X86::CMP32ri:
8583 case X86::CMP16ri:
8584 case X86::CMP8ri: {
8585 MachineOperand &MO0 = DataMI->getOperand(0);
8586 MachineOperand &MO1 = DataMI->getOperand(1);
8587 if (MO1.isImm() && MO1.getImm() == 0) {
8588 unsigned NewOpc;
8589 switch (DataMI->getOpcode()) {
8590 default:
8591 llvm_unreachable("Unreachable!");
8592 case X86::CMP64ri32:
8593 NewOpc = X86::TEST64rr;
8594 break;
8595 case X86::CMP32ri:
8596 NewOpc = X86::TEST32rr;
8597 break;
8598 case X86::CMP16ri:
8599 NewOpc = X86::TEST16rr;
8600 break;
8601 case X86::CMP8ri:
8602 NewOpc = X86::TEST8rr;
8603 break;
8604 }
8605 DataMI->setDesc(get(NewOpc));
8606 MO1.ChangeToRegister(MO0.getReg(), false);
8607 }
8608 }
8609 }
8610 NewMIs.push_back(DataMI);
8611
8612 // Emit the store instruction.
8613 if (UnfoldStore) {
8614 const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI);
8615 auto MMOs = extractStoreMMOs(MI.memoperands(), MF);
8616 unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*DstRC), 16);
8617 bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
8618 unsigned Opc = getStoreRegOpcode(Reg, DstRC, isAligned, Subtarget);
8619 DebugLoc DL;
8620 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc));
8621 for (const MachineOperand &AddrOp : AddrOps)
8622 MIB.add(AddrOp);
8623 MIB.addReg(Reg, RegState::Kill);
8624 MIB.setMemRefs(MMOs);
8625 NewMIs.push_back(MIB);
8626 }
8627
8628 return true;
8629}
8630
8632 SelectionDAG &DAG, SDNode *N, SmallVectorImpl<SDNode *> &NewNodes) const {
8633 if (!N->isMachineOpcode())
8634 return false;
8635
8636 const X86FoldTableEntry *I = lookupUnfoldTable(N->getMachineOpcode());
8637 if (I == nullptr)
8638 return false;
8639 unsigned Opc = I->DstOp;
8640 unsigned Index = I->Flags & TB_INDEX_MASK;
8641 bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
8642 bool FoldedStore = I->Flags & TB_FOLDED_STORE;
8643 const MCInstrDesc &MCID = get(Opc);
8646 const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI);
8647 unsigned NumDefs = MCID.NumDefs;
8648 std::vector<SDValue> AddrOps;
8649 std::vector<SDValue> BeforeOps;
8650 std::vector<SDValue> AfterOps;
8651 SDLoc dl(N);
8652 unsigned NumOps = N->getNumOperands();
8653 for (unsigned i = 0; i != NumOps - 1; ++i) {
8654 SDValue Op = N->getOperand(i);
8655 if (i >= Index - NumDefs && i < Index - NumDefs + X86::AddrNumOperands)
8656 AddrOps.push_back(Op);
8657 else if (i < Index - NumDefs)
8658 BeforeOps.push_back(Op);
8659 else if (i > Index - NumDefs)
8660 AfterOps.push_back(Op);
8661 }
8662 SDValue Chain = N->getOperand(NumOps - 1);
8663 AddrOps.push_back(Chain);
8664
8665 // Emit the load instruction.
8666 SDNode *Load = nullptr;
8667 if (FoldedLoad) {
8668 EVT VT = *TRI.legalclasstypes_begin(*RC);
8669 auto MMOs = extractLoadMMOs(cast<MachineSDNode>(N)->memoperands(), MF);
8670 if (MMOs.empty() && RC == &X86::VR128RegClass &&
8671 Subtarget.isUnalignedMem16Slow())
8672 // Do not introduce a slow unaligned load.
8673 return false;
8674 // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte
8675 // memory access is slow above.
8676
8677 unsigned Opc;
8678 if (I->Flags & TB_BCAST_MASK) {
8679 Opc = getBroadcastOpcode(I, RC, Subtarget);
8680 } else {
8681 unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
8682 bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
8683 Opc = getLoadRegOpcode(0, RC, isAligned, Subtarget);
8684 }
8685
8686 Load = DAG.getMachineNode(Opc, dl, VT, MVT::Other, AddrOps);
8687 NewNodes.push_back(Load);
8688
8689 // Preserve memory reference information.
8690 DAG.setNodeMemRefs(cast<MachineSDNode>(Load), MMOs);
8691 }
8692
8693 // Emit the data processing instruction.
8694 std::vector<EVT> VTs;
8695 const TargetRegisterClass *DstRC = nullptr;
8696 if (MCID.getNumDefs() > 0) {
8697 DstRC = getRegClass(MCID, 0, &RI);
8698 VTs.push_back(*TRI.legalclasstypes_begin(*DstRC));
8699 }
8700 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
8701 EVT VT = N->getValueType(i);
8702 if (VT != MVT::Other && i >= (unsigned)MCID.getNumDefs())
8703 VTs.push_back(VT);
8704 }
8705 if (Load)
8706 BeforeOps.push_back(SDValue(Load, 0));
8707 llvm::append_range(BeforeOps, AfterOps);
8708 // Change CMP32ri r, 0 back to TEST32rr r, r, etc.
8709 switch (Opc) {
8710 default:
8711 break;
8712 case X86::CMP64ri32:
8713 case X86::CMP32ri:
8714 case X86::CMP16ri:
8715 case X86::CMP8ri:
8716 if (isNullConstant(BeforeOps[1])) {
8717 switch (Opc) {
8718 default:
8719 llvm_unreachable("Unreachable!");
8720 case X86::CMP64ri32:
8721 Opc = X86::TEST64rr;
8722 break;
8723 case X86::CMP32ri:
8724 Opc = X86::TEST32rr;
8725 break;
8726 case X86::CMP16ri:
8727 Opc = X86::TEST16rr;
8728 break;
8729 case X86::CMP8ri:
8730 Opc = X86::TEST8rr;
8731 break;
8732 }
8733 BeforeOps[1] = BeforeOps[0];
8734 }
8735 }
8736 SDNode *NewNode = DAG.getMachineNode(Opc, dl, VTs, BeforeOps);
8737 NewNodes.push_back(NewNode);
8738
8739 // Emit the store instruction.
8740 if (FoldedStore) {
8741 AddrOps.pop_back();
8742 AddrOps.push_back(SDValue(NewNode, 0));
8743 AddrOps.push_back(Chain);
8744 auto MMOs = extractStoreMMOs(cast<MachineSDNode>(N)->memoperands(), MF);
8745 if (MMOs.empty() && RC == &X86::VR128RegClass &&
8746 Subtarget.isUnalignedMem16Slow())
8747 // Do not introduce a slow unaligned store.
8748 return false;
8749 // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte
8750 // memory access is slow above.
8751 unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
8752 bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
8753 SDNode *Store =
8754 DAG.getMachineNode(getStoreRegOpcode(0, DstRC, isAligned, Subtarget),
8755 dl, MVT::Other, AddrOps);
8756 NewNodes.push_back(Store);
8757
8758 // Preserve memory reference information.
8759 DAG.setNodeMemRefs(cast<MachineSDNode>(Store), MMOs);
8760 }
8761
8762 return true;
8763}
8764
8765unsigned
8767 bool UnfoldStore,
8768 unsigned *LoadRegIndex) const {
8770 if (I == nullptr)
8771 return 0;
8772 bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
8773 bool FoldedStore = I->Flags & TB_FOLDED_STORE;
8774 if (UnfoldLoad && !FoldedLoad)
8775 return 0;
8776 if (UnfoldStore && !FoldedStore)
8777 return 0;
8778 if (LoadRegIndex)
8779 *LoadRegIndex = I->Flags & TB_INDEX_MASK;
8780 return I->DstOp;
8781}
8782
8784 int64_t &Offset1,
8785 int64_t &Offset2) const {
8786 if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode())
8787 return false;
8788
8789 auto IsLoadOpcode = [&](unsigned Opcode) {
8790 switch (Opcode) {
8791 default:
8792 return false;
8793 case X86::MOV8rm:
8794 case X86::MOV16rm:
8795 case X86::MOV32rm:
8796 case X86::MOV64rm:
8797 case X86::LD_Fp32m:
8798 case X86::LD_Fp64m:
8799 case X86::LD_Fp80m:
8800 case X86::MOVSSrm:
8801 case X86::MOVSSrm_alt:
8802 case X86::MOVSDrm:
8803 case X86::MOVSDrm_alt:
8804 case X86::MMX_MOVD64rm:
8805 case X86::MMX_MOVQ64rm:
8806 case X86::MOVAPSrm:
8807 case X86::MOVUPSrm:
8808 case X86::MOVAPDrm:
8809 case X86::MOVUPDrm:
8810 case X86::MOVDQArm:
8811 case X86::MOVDQUrm:
8812 // AVX load instructions
8813 case X86::VMOVSSrm:
8814 case X86::VMOVSSrm_alt:
8815 case X86::VMOVSDrm:
8816 case X86::VMOVSDrm_alt:
8817 case X86::VMOVAPSrm:
8818 case X86::VMOVUPSrm:
8819 case X86::VMOVAPDrm:
8820 case X86::VMOVUPDrm:
8821 case X86::VMOVDQArm:
8822 case X86::VMOVDQUrm:
8823 case X86::VMOVAPSYrm:
8824 case X86::VMOVUPSYrm:
8825 case X86::VMOVAPDYrm:
8826 case X86::VMOVUPDYrm:
8827 case X86::VMOVDQAYrm:
8828 case X86::VMOVDQUYrm:
8829 // AVX512 load instructions
8830 case X86::VMOVSSZrm:
8831 case X86::VMOVSSZrm_alt:
8832 case X86::VMOVSDZrm:
8833 case X86::VMOVSDZrm_alt:
8834 case X86::VMOVAPSZ128rm:
8835 case X86::VMOVUPSZ128rm:
8836 case X86::VMOVAPSZ128rm_NOVLX:
8837 case X86::VMOVUPSZ128rm_NOVLX:
8838 case X86::VMOVAPDZ128rm:
8839 case X86::VMOVUPDZ128rm:
8840 case X86::VMOVDQU8Z128rm:
8841 case X86::VMOVDQU16Z128rm:
8842 case X86::VMOVDQA32Z128rm:
8843 case X86::VMOVDQU32Z128rm:
8844 case X86::VMOVDQA64Z128rm:
8845 case X86::VMOVDQU64Z128rm:
8846 case X86::VMOVAPSZ256rm:
8847 case X86::VMOVUPSZ256rm:
8848 case X86::VMOVAPSZ256rm_NOVLX:
8849 case X86::VMOVUPSZ256rm_NOVLX:
8850 case X86::VMOVAPDZ256rm:
8851 case X86::VMOVUPDZ256rm:
8852 case X86::VMOVDQU8Z256rm:
8853 case X86::VMOVDQU16Z256rm:
8854 case X86::VMOVDQA32Z256rm:
8855 case X86::VMOVDQU32Z256rm:
8856 case X86::VMOVDQA64Z256rm:
8857 case X86::VMOVDQU64Z256rm:
8858 case X86::VMOVAPSZrm:
8859 case X86::VMOVUPSZrm:
8860 case X86::VMOVAPDZrm:
8861 case X86::VMOVUPDZrm:
8862 case X86::VMOVDQU8Zrm:
8863 case X86::VMOVDQU16Zrm:
8864 case X86::VMOVDQA32Zrm:
8865 case X86::VMOVDQU32Zrm:
8866 case X86::VMOVDQA64Zrm:
8867 case X86::VMOVDQU64Zrm:
8868 case X86::KMOVBkm:
8869 case X86::KMOVBkm_EVEX:
8870 case X86::KMOVWkm:
8871 case X86::KMOVWkm_EVEX:
8872 case X86::KMOVDkm:
8873 case X86::KMOVDkm_EVEX:
8874 case X86::KMOVQkm:
8875 case X86::KMOVQkm_EVEX:
8876 return true;
8877 }
8878 };
8879
8880 if (!IsLoadOpcode(Load1->getMachineOpcode()) ||
8881 !IsLoadOpcode(Load2->getMachineOpcode()))
8882 return false;
8883
8884 // Lambda to check if both the loads have the same value for an operand index.
8885 auto HasSameOp = [&](int I) {
8886 return Load1->getOperand(I) == Load2->getOperand(I);
8887 };
8888
8889 // All operands except the displacement should match.
8890 if (!HasSameOp(X86::AddrBaseReg) || !HasSameOp(X86::AddrScaleAmt) ||
8891 !HasSameOp(X86::AddrIndexReg) || !HasSameOp(X86::AddrSegmentReg))
8892 return false;
8893
8894 // Chain Operand must be the same.
8895 if (!HasSameOp(5))
8896 return false;
8897
8898 // Now let's examine if the displacements are constants.
8901 if (!Disp1 || !Disp2)
8902 return false;
8903
8904 Offset1 = Disp1->getSExtValue();
8905 Offset2 = Disp2->getSExtValue();
8906 return true;
8907}
8908
8910 int64_t Offset1, int64_t Offset2,
8911 unsigned NumLoads) const {
8912 assert(Offset2 > Offset1);
8913 if ((Offset2 - Offset1) / 8 > 64)
8914 return false;
8915
8916 unsigned Opc1 = Load1->getMachineOpcode();
8917 unsigned Opc2 = Load2->getMachineOpcode();
8918 if (Opc1 != Opc2)
8919 return false; // FIXME: overly conservative?
8920
8921 switch (Opc1) {
8922 default:
8923 break;
8924 case X86::LD_Fp32m:
8925 case X86::LD_Fp64m:
8926 case X86::LD_Fp80m:
8927 case X86::MMX_MOVD64rm:
8928 case X86::MMX_MOVQ64rm:
8929 return false;
8930 }
8931
8932 EVT VT = Load1->getValueType(0);
8933 switch (VT.getSimpleVT().SimpleTy) {
8934 default:
8935 // XMM registers. In 64-bit mode we can be a bit more aggressive since we
8936 // have 16 of them to play with.
8937 if (Subtarget.is64Bit()) {
8938 if (NumLoads >= 3)
8939 return false;
8940 } else if (NumLoads) {
8941 return false;
8942 }
8943 break;
8944 case MVT::i8:
8945 case MVT::i16:
8946 case MVT::i32:
8947 case MVT::i64:
8948 case MVT::f32:
8949 case MVT::f64:
8950 if (NumLoads)
8951 return false;
8952 break;
8953 }
8954
8955 return true;
8956}
8957
8959 const MachineBasicBlock *MBB,
8960 const MachineFunction &MF) const {
8961
8962 // ENDBR instructions should not be scheduled around.
8963 unsigned Opcode = MI.getOpcode();
8964 if (Opcode == X86::ENDBR64 || Opcode == X86::ENDBR32 ||
8965 Opcode == X86::PLDTILECFGV)
8966 return true;
8967
8968 // Frame setup and destroy can't be scheduled around.
8969 if (MI.getFlag(MachineInstr::FrameSetup) ||
8971 return true;
8972
8974}
8975
8978 assert(Cond.size() == 1 && "Invalid X86 branch condition!");
8979 X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm());
8980 Cond[0].setImm(GetOppositeBranchCondition(CC));
8981 return false;
8982}
8983
8985 const TargetRegisterClass *RC) const {
8986 // FIXME: Return false for x87 stack register classes for now. We can't
8987 // allow any loads of these registers before FpGet_ST0_80.
8988 return !(RC == &X86::CCRRegClass || RC == &X86::DFCCRRegClass ||
8989 RC == &X86::RFP32RegClass || RC == &X86::RFP64RegClass ||
8990 RC == &X86::RFP80RegClass);
8991}
8992
8993/// Return a virtual register initialized with the
8994/// the global base register value. Output instructions required to
8995/// initialize the register in the function entry block, if necessary.
8996///
8997/// TODO: Eliminate this and move the code to X86MachineFunctionInfo.
8998///
9001 Register GlobalBaseReg = X86FI->getGlobalBaseReg();
9002 if (GlobalBaseReg)
9003 return GlobalBaseReg;
9004
9005 // Create the register. The code to initialize it is inserted
9006 // later, by the CGBR pass (below).
9007 MachineRegisterInfo &RegInfo = MF->getRegInfo();
9008 GlobalBaseReg = RegInfo.createVirtualRegister(
9009 Subtarget.is64Bit() ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass);
9010 X86FI->setGlobalBaseReg(GlobalBaseReg);
9011 return GlobalBaseReg;
9012}
9013
9014// FIXME: Some shuffle and unpack instructions have equivalents in different
9015// domains, but they require a bit more work than just switching opcodes.
9016
9017static const uint16_t *lookup(unsigned opcode, unsigned domain,
9018 ArrayRef<uint16_t[3]> Table) {
9019 for (const uint16_t(&Row)[3] : Table)
9020 if (Row[domain - 1] == opcode)
9021 return Row;
9022 return nullptr;
9023}
9024
9025static const uint16_t *lookupAVX512(unsigned opcode, unsigned domain,
9026 ArrayRef<uint16_t[4]> Table) {
9027 // If this is the integer domain make sure to check both integer columns.
9028 for (const uint16_t(&Row)[4] : Table)
9029 if (Row[domain - 1] == opcode || (domain == 3 && Row[3] == opcode))
9030 return Row;
9031 return nullptr;
9032}
9033
9034// Helper to attempt to widen/narrow blend masks.
9035static bool AdjustBlendMask(unsigned OldMask, unsigned OldWidth,
9036 unsigned NewWidth, unsigned *pNewMask = nullptr) {
9037 assert(((OldWidth % NewWidth) == 0 || (NewWidth % OldWidth) == 0) &&
9038 "Illegal blend mask scale");
9039 unsigned NewMask = 0;
9040
9041 if ((OldWidth % NewWidth) == 0) {
9042 unsigned Scale = OldWidth / NewWidth;
9043 unsigned SubMask = (1u << Scale) - 1;
9044 for (unsigned i = 0; i != NewWidth; ++i) {
9045 unsigned Sub = (OldMask >> (i * Scale)) & SubMask;
9046 if (Sub == SubMask)
9047 NewMask |= (1u << i);
9048 else if (Sub != 0x0)
9049 return false;
9050 }
9051 } else {
9052 unsigned Scale = NewWidth / OldWidth;
9053 unsigned SubMask = (1u << Scale) - 1;
9054 for (unsigned i = 0; i != OldWidth; ++i) {
9055 if (OldMask & (1 << i)) {
9056 NewMask |= (SubMask << (i * Scale));
9057 }
9058 }
9059 }
9060
9061 if (pNewMask)
9062 *pNewMask = NewMask;
9063 return true;
9064}
9065
9067 unsigned Opcode = MI.getOpcode();
9068 unsigned NumOperands = MI.getDesc().getNumOperands();
9069
9070 auto GetBlendDomains = [&](unsigned ImmWidth, bool Is256) {
9071 uint16_t validDomains = 0;
9072 if (MI.getOperand(NumOperands - 1).isImm()) {
9073 unsigned Imm = MI.getOperand(NumOperands - 1).getImm();
9074 if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4))
9075 validDomains |= 0x2; // PackedSingle
9076 if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2))
9077 validDomains |= 0x4; // PackedDouble
9078 if (!Is256 || Subtarget.hasAVX2())
9079 validDomains |= 0x8; // PackedInt
9080 }
9081 return validDomains;
9082 };
9083
9084 switch (Opcode) {
9085 case X86::BLENDPDrmi:
9086 case X86::BLENDPDrri:
9087 case X86::VBLENDPDrmi:
9088 case X86::VBLENDPDrri:
9089 return GetBlendDomains(2, false);
9090 case X86::VBLENDPDYrmi:
9091 case X86::VBLENDPDYrri:
9092 return GetBlendDomains(4, true);
9093 case X86::BLENDPSrmi:
9094 case X86::BLENDPSrri:
9095 case X86::VBLENDPSrmi:
9096 case X86::VBLENDPSrri:
9097 case X86::VPBLENDDrmi:
9098 case X86::VPBLENDDrri:
9099 return GetBlendDomains(4, false);
9100 case X86::VBLENDPSYrmi:
9101 case X86::VBLENDPSYrri:
9102 case X86::VPBLENDDYrmi:
9103 case X86::VPBLENDDYrri:
9104 return GetBlendDomains(8, true);
9105 case X86::PBLENDWrmi:
9106 case X86::PBLENDWrri:
9107 case X86::VPBLENDWrmi:
9108 case X86::VPBLENDWrri:
9109 // Treat VPBLENDWY as a 128-bit vector as it repeats the lo/hi masks.
9110 case X86::VPBLENDWYrmi:
9111 case X86::VPBLENDWYrri:
9112 return GetBlendDomains(8, false);
9113 case X86::VPANDDZ128rr:
9114 case X86::VPANDDZ128rm:
9115 case X86::VPANDDZ256rr:
9116 case X86::VPANDDZ256rm:
9117 case X86::VPANDQZ128rr:
9118 case X86::VPANDQZ128rm:
9119 case X86::VPANDQZ256rr:
9120 case X86::VPANDQZ256rm:
9121 case X86::VPANDNDZ128rr:
9122 case X86::VPANDNDZ128rm:
9123 case X86::VPANDNDZ256rr:
9124 case X86::VPANDNDZ256rm:
9125 case X86::VPANDNQZ128rr:
9126 case X86::VPANDNQZ128rm:
9127 case X86::VPANDNQZ256rr:
9128 case X86::VPANDNQZ256rm:
9129 case X86::VPORDZ128rr:
9130 case X86::VPORDZ128rm:
9131 case X86::VPORDZ256rr:
9132 case X86::VPORDZ256rm:
9133 case X86::VPORQZ128rr:
9134 case X86::VPORQZ128rm:
9135 case X86::VPORQZ256rr:
9136 case X86::VPORQZ256rm:
9137 case X86::VPXORDZ128rr:
9138 case X86::VPXORDZ128rm:
9139 case X86::VPXORDZ256rr:
9140 case X86::VPXORDZ256rm:
9141 case X86::VPXORQZ128rr:
9142 case X86::VPXORQZ128rm:
9143 case X86::VPXORQZ256rr:
9144 case X86::VPXORQZ256rm:
9145 // If we don't have DQI see if we can still switch from an EVEX integer
9146 // instruction to a VEX floating point instruction.
9147 if (Subtarget.hasDQI())
9148 return 0;
9149
9150 if (RI.getEncodingValue(MI.getOperand(0).getReg()) >= 16)
9151 return 0;
9152 if (RI.getEncodingValue(MI.getOperand(1).getReg()) >= 16)
9153 return 0;
9154 // Register forms will have 3 operands. Memory form will have more.
9155 if (NumOperands == 3 &&
9156 RI.getEncodingValue(MI.getOperand(2).getReg()) >= 16)
9157 return 0;
9158
9159 // All domains are valid.
9160 return 0xe;
9161 case X86::MOVHLPSrr:
9162 // We can swap domains when both inputs are the same register.
9163 // FIXME: This doesn't catch all the cases we would like. If the input
9164 // register isn't KILLed by the instruction, the two address instruction
9165 // pass puts a COPY on one input. The other input uses the original
9166 // register. This prevents the same physical register from being used by
9167 // both inputs.
9168 if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg() &&
9169 MI.getOperand(0).getSubReg() == 0 &&
9170 MI.getOperand(1).getSubReg() == 0 && MI.getOperand(2).getSubReg() == 0)
9171 return 0x6;
9172 return 0;
9173 case X86::SHUFPDrri:
9174 return 0x6;
9175 }
9176 return 0;
9177}
9178
9179#include "X86ReplaceableInstrs.def"
9180
9182 unsigned Domain) const {
9183 assert(Domain > 0 && Domain < 4 && "Invalid execution domain");
9184 uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
9185 assert(dom && "Not an SSE instruction");
9186
9187 unsigned Opcode = MI.getOpcode();
9188 unsigned NumOperands = MI.getDesc().getNumOperands();
9189
9190 auto SetBlendDomain = [&](unsigned ImmWidth, bool Is256) {
9191 if (MI.getOperand(NumOperands - 1).isImm()) {
9192 unsigned Imm = MI.getOperand(NumOperands - 1).getImm() & 255;
9193 Imm = (ImmWidth == 16 ? ((Imm << 8) | Imm) : Imm);
9194 unsigned NewImm = Imm;
9195
9196 const uint16_t *table = lookup(Opcode, dom, ReplaceableBlendInstrs);
9197 if (!table)
9198 table = lookup(Opcode, dom, ReplaceableBlendAVX2Instrs);
9199
9200 if (Domain == 1) { // PackedSingle
9201 AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm);
9202 } else if (Domain == 2) { // PackedDouble
9203 AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2, &NewImm);
9204 } else if (Domain == 3) { // PackedInt
9205 if (Subtarget.hasAVX2()) {
9206 // If we are already VPBLENDW use that, else use VPBLENDD.
9207 if ((ImmWidth / (Is256 ? 2 : 1)) != 8) {
9208 table = lookup(Opcode, dom, ReplaceableBlendAVX2Instrs);
9209 AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm);
9210 }
9211 } else {
9212 assert(!Is256 && "128-bit vector expected");
9213 AdjustBlendMask(Imm, ImmWidth, 8, &NewImm);
9214 }
9215 }
9216
9217 assert(table && table[Domain - 1] && "Unknown domain op");
9218 MI.setDesc(get(table[Domain - 1]));
9219 MI.getOperand(NumOperands - 1).setImm(NewImm & 255);
9220 }
9221 return true;
9222 };
9223
9224 switch (Opcode) {
9225 case X86::BLENDPDrmi:
9226 case X86::BLENDPDrri:
9227 case X86::VBLENDPDrmi:
9228 case X86::VBLENDPDrri:
9229 return SetBlendDomain(2, false);
9230 case X86::VBLENDPDYrmi:
9231 case X86::VBLENDPDYrri:
9232 return SetBlendDomain(4, true);
9233 case X86::BLENDPSrmi:
9234 case X86::BLENDPSrri:
9235 case X86::VBLENDPSrmi:
9236 case X86::VBLENDPSrri:
9237 case X86::VPBLENDDrmi:
9238 case X86::VPBLENDDrri:
9239 return SetBlendDomain(4, false);
9240 case X86::VBLENDPSYrmi:
9241 case X86::VBLENDPSYrri:
9242 case X86::VPBLENDDYrmi:
9243 case X86::VPBLENDDYrri:
9244 return SetBlendDomain(8, true);
9245 case X86::PBLENDWrmi:
9246 case X86::PBLENDWrri:
9247 case X86::VPBLENDWrmi:
9248 case X86::VPBLENDWrri:
9249 return SetBlendDomain(8, false);
9250 case X86::VPBLENDWYrmi:
9251 case X86::VPBLENDWYrri:
9252 return SetBlendDomain(16, true);
9253 case X86::VPANDDZ128rr:
9254 case X86::VPANDDZ128rm:
9255 case X86::VPANDDZ256rr:
9256 case X86::VPANDDZ256rm:
9257 case X86::VPANDQZ128rr:
9258 case X86::VPANDQZ128rm:
9259 case X86::VPANDQZ256rr:
9260 case X86::VPANDQZ256rm:
9261 case X86::VPANDNDZ128rr:
9262 case X86::VPANDNDZ128rm:
9263 case X86::VPANDNDZ256rr:
9264 case X86::VPANDNDZ256rm:
9265 case X86::VPANDNQZ128rr:
9266 case X86::VPANDNQZ128rm:
9267 case X86::VPANDNQZ256rr:
9268 case X86::VPANDNQZ256rm:
9269 case X86::VPORDZ128rr:
9270 case X86::VPORDZ128rm:
9271 case X86::VPORDZ256rr:
9272 case X86::VPORDZ256rm:
9273 case X86::VPORQZ128rr:
9274 case X86::VPORQZ128rm:
9275 case X86::VPORQZ256rr:
9276 case X86::VPORQZ256rm:
9277 case X86::VPXORDZ128rr:
9278 case X86::VPXORDZ128rm:
9279 case X86::VPXORDZ256rr:
9280 case X86::VPXORDZ256rm:
9281 case X86::VPXORQZ128rr:
9282 case X86::VPXORQZ128rm:
9283 case X86::VPXORQZ256rr:
9284 case X86::VPXORQZ256rm: {
9285 // Without DQI, convert EVEX instructions to VEX instructions.
9286 if (Subtarget.hasDQI())
9287 return false;
9288
9289 const uint16_t *table =
9290 lookupAVX512(MI.getOpcode(), dom, ReplaceableCustomAVX512LogicInstrs);
9291 assert(table && "Instruction not found in table?");
9292 // Don't change integer Q instructions to D instructions and
9293 // use D intructions if we started with a PS instruction.
9294 if (Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
9295 Domain = 4;
9296 MI.setDesc(get(table[Domain - 1]));
9297 return true;
9298 }
9299 case X86::UNPCKHPDrr:
9300 case X86::MOVHLPSrr:
9301 // We just need to commute the instruction which will switch the domains.
9302 if (Domain != dom && Domain != 3 &&
9303 MI.getOperand(1).getReg() == MI.getOperand(2).getReg() &&
9304 MI.getOperand(0).getSubReg() == 0 &&
9305 MI.getOperand(1).getSubReg() == 0 &&
9306 MI.getOperand(2).getSubReg() == 0) {
9307 commuteInstruction(MI, false);
9308 return true;
9309 }
9310 // We must always return true for MOVHLPSrr.
9311 if (Opcode == X86::MOVHLPSrr)
9312 return true;
9313 break;
9314 case X86::SHUFPDrri: {
9315 if (Domain == 1) {
9316 unsigned Imm = MI.getOperand(3).getImm();
9317 unsigned NewImm = 0x44;
9318 if (Imm & 1)
9319 NewImm |= 0x0a;
9320 if (Imm & 2)
9321 NewImm |= 0xa0;
9322 MI.getOperand(3).setImm(NewImm);
9323 MI.setDesc(get(X86::SHUFPSrri));
9324 }
9325 return true;
9326 }
9327 }
9328 return false;
9329}
9330
9331std::pair<uint16_t, uint16_t>
9333 uint16_t domain = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
9334 unsigned opcode = MI.getOpcode();
9335 uint16_t validDomains = 0;
9336 if (domain) {
9337 // Attempt to match for custom instructions.
9338 validDomains = getExecutionDomainCustom(MI);
9339 if (validDomains)
9340 return std::make_pair(domain, validDomains);
9341
9342 if (lookup(opcode, domain, ReplaceableInstrs)) {
9343 validDomains = 0xe;
9344 } else if (lookup(opcode, domain, ReplaceableInstrsAVX2)) {
9345 validDomains = Subtarget.hasAVX2() ? 0xe : 0x6;
9346 } else if (lookup(opcode, domain, ReplaceableInstrsFP)) {
9347 validDomains = 0x6;
9348 } else if (lookup(opcode, domain, ReplaceableInstrsAVX2InsertExtract)) {
9349 // Insert/extract instructions should only effect domain if AVX2
9350 // is enabled.
9351 if (!Subtarget.hasAVX2())
9352 return std::make_pair(0, 0);
9353 validDomains = 0xe;
9354 } else if (lookupAVX512(opcode, domain, ReplaceableInstrsAVX512)) {
9355 validDomains = 0xe;
9356 } else if (Subtarget.hasDQI() &&
9357 lookupAVX512(opcode, domain, ReplaceableInstrsAVX512DQ)) {
9358 validDomains = 0xe;
9359 } else if (Subtarget.hasDQI()) {
9360 if (const uint16_t *table =
9361 lookupAVX512(opcode, domain, ReplaceableInstrsAVX512DQMasked)) {
9362 if (domain == 1 || (domain == 3 && table[3] == opcode))
9363 validDomains = 0xa;
9364 else
9365 validDomains = 0xc;
9366 }
9367 }
9368 }
9369 return std::make_pair(domain, validDomains);
9370}
9371
9373 assert(Domain > 0 && Domain < 4 && "Invalid execution domain");
9374 uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
9375 assert(dom && "Not an SSE instruction");
9376
9377 // Attempt to match for custom instructions.
9379 return;
9380
9381 const uint16_t *table = lookup(MI.getOpcode(), dom, ReplaceableInstrs);
9382 if (!table) { // try the other table
9383 assert((Subtarget.hasAVX2() || Domain < 3) &&
9384 "256-bit vector operations only available in AVX2");
9385 table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2);
9386 }
9387 if (!table) { // try the FP table
9388 table = lookup(MI.getOpcode(), dom, ReplaceableInstrsFP);
9389 assert((!table || Domain < 3) &&
9390 "Can only select PackedSingle or PackedDouble");
9391 }
9392 if (!table) { // try the other table
9393 assert(Subtarget.hasAVX2() &&
9394 "256-bit insert/extract only available in AVX2");
9395 table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2InsertExtract);
9396 }
9397 if (!table) { // try the AVX512 table
9398 assert(Subtarget.hasAVX512() && "Requires AVX-512");
9399 table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512);
9400 // Don't change integer Q instructions to D instructions.
9401 if (table && Domain == 3 && table[3] == MI.getOpcode())
9402 Domain = 4;
9403 }
9404 if (!table) { // try the AVX512DQ table
9405 assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ");
9406 table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQ);
9407 // Don't change integer Q instructions to D instructions and
9408 // use D instructions if we started with a PS instruction.
9409 if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
9410 Domain = 4;
9411 }
9412 if (!table) { // try the AVX512DQMasked table
9413 assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ");
9414 table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQMasked);
9415 if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
9416 Domain = 4;
9417 }
9418 assert(table && "Cannot change domain");
9419 MI.setDesc(get(table[Domain - 1]));
9420}
9421
9427
9428/// Return the noop instruction to use for a noop.
9430 MCInst Nop;
9431 Nop.setOpcode(X86::NOOP);
9432 return Nop;
9433}
9434
9436 switch (opc) {
9437 default:
9438 return false;
9439 case X86::DIVPDrm:
9440 case X86::DIVPDrr:
9441 case X86::DIVPSrm:
9442 case X86::DIVPSrr:
9443 case X86::DIVSDrm:
9444 case X86::DIVSDrm_Int:
9445 case X86::DIVSDrr:
9446 case X86::DIVSDrr_Int:
9447 case X86::DIVSSrm:
9448 case X86::DIVSSrm_Int:
9449 case X86::DIVSSrr:
9450 case X86::DIVSSrr_Int:
9451 case X86::SQRTPDm:
9452 case X86::SQRTPDr:
9453 case X86::SQRTPSm:
9454 case X86::SQRTPSr:
9455 case X86::SQRTSDm:
9456 case X86::SQRTSDm_Int:
9457 case X86::SQRTSDr:
9458 case X86::SQRTSDr_Int:
9459 case X86::SQRTSSm:
9460 case X86::SQRTSSm_Int:
9461 case X86::SQRTSSr:
9462 case X86::SQRTSSr_Int:
9463 // AVX instructions with high latency
9464 case X86::VDIVPDrm:
9465 case X86::VDIVPDrr:
9466 case X86::VDIVPDYrm:
9467 case X86::VDIVPDYrr:
9468 case X86::VDIVPSrm:
9469 case X86::VDIVPSrr:
9470 case X86::VDIVPSYrm:
9471 case X86::VDIVPSYrr:
9472 case X86::VDIVSDrm:
9473 case X86::VDIVSDrm_Int:
9474 case X86::VDIVSDrr:
9475 case X86::VDIVSDrr_Int:
9476 case X86::VDIVSSrm:
9477 case X86::VDIVSSrm_Int:
9478 case X86::VDIVSSrr:
9479 case X86::VDIVSSrr_Int:
9480 case X86::VSQRTPDm:
9481 case X86::VSQRTPDr:
9482 case X86::VSQRTPDYm:
9483 case X86::VSQRTPDYr:
9484 case X86::VSQRTPSm:
9485 case X86::VSQRTPSr:
9486 case X86::VSQRTPSYm:
9487 case X86::VSQRTPSYr:
9488 case X86::VSQRTSDm:
9489 case X86::VSQRTSDm_Int:
9490 case X86::VSQRTSDr:
9491 case X86::VSQRTSDr_Int:
9492 case X86::VSQRTSSm:
9493 case X86::VSQRTSSm_Int:
9494 case X86::VSQRTSSr:
9495 case X86::VSQRTSSr_Int:
9496 // AVX512 instructions with high latency
9497 case X86::VDIVPDZ128rm:
9498 case X86::VDIVPDZ128rmb:
9499 case X86::VDIVPDZ128rmbk:
9500 case X86::VDIVPDZ128rmbkz:
9501 case X86::VDIVPDZ128rmk:
9502 case X86::VDIVPDZ128rmkz:
9503 case X86::VDIVPDZ128rr:
9504 case X86::VDIVPDZ128rrk:
9505 case X86::VDIVPDZ128rrkz:
9506 case X86::VDIVPDZ256rm:
9507 case X86::VDIVPDZ256rmb:
9508 case X86::VDIVPDZ256rmbk:
9509 case X86::VDIVPDZ256rmbkz:
9510 case X86::VDIVPDZ256rmk:
9511 case X86::VDIVPDZ256rmkz:
9512 case X86::VDIVPDZ256rr:
9513 case X86::VDIVPDZ256rrk:
9514 case X86::VDIVPDZ256rrkz:
9515 case X86::VDIVPDZrrb:
9516 case X86::VDIVPDZrrbk:
9517 case X86::VDIVPDZrrbkz:
9518 case X86::VDIVPDZrm:
9519 case X86::VDIVPDZrmb:
9520 case X86::VDIVPDZrmbk:
9521 case X86::VDIVPDZrmbkz:
9522 case X86::VDIVPDZrmk:
9523 case X86::VDIVPDZrmkz:
9524 case X86::VDIVPDZrr:
9525 case X86::VDIVPDZrrk:
9526 case X86::VDIVPDZrrkz:
9527 case X86::VDIVPSZ128rm:
9528 case X86::VDIVPSZ128rmb:
9529 case X86::VDIVPSZ128rmbk:
9530 case X86::VDIVPSZ128rmbkz:
9531 case X86::VDIVPSZ128rmk:
9532 case X86::VDIVPSZ128rmkz:
9533 case X86::VDIVPSZ128rr:
9534 case X86::VDIVPSZ128rrk:
9535 case X86::VDIVPSZ128rrkz:
9536 case X86::VDIVPSZ256rm:
9537 case X86::VDIVPSZ256rmb:
9538 case X86::VDIVPSZ256rmbk:
9539 case X86::VDIVPSZ256rmbkz:
9540 case X86::VDIVPSZ256rmk:
9541 case X86::VDIVPSZ256rmkz:
9542 case X86::VDIVPSZ256rr:
9543 case X86::VDIVPSZ256rrk:
9544 case X86::VDIVPSZ256rrkz:
9545 case X86::VDIVPSZrrb:
9546 case X86::VDIVPSZrrbk:
9547 case X86::VDIVPSZrrbkz:
9548 case X86::VDIVPSZrm:
9549 case X86::VDIVPSZrmb:
9550 case X86::VDIVPSZrmbk:
9551 case X86::VDIVPSZrmbkz:
9552 case X86::VDIVPSZrmk:
9553 case X86::VDIVPSZrmkz:
9554 case X86::VDIVPSZrr:
9555 case X86::VDIVPSZrrk:
9556 case X86::VDIVPSZrrkz:
9557 case X86::VDIVSDZrm:
9558 case X86::VDIVSDZrr:
9559 case X86::VDIVSDZrm_Int:
9560 case X86::VDIVSDZrmk_Int:
9561 case X86::VDIVSDZrmkz_Int:
9562 case X86::VDIVSDZrr_Int:
9563 case X86::VDIVSDZrrk_Int:
9564 case X86::VDIVSDZrrkz_Int:
9565 case X86::VDIVSDZrrb_Int:
9566 case X86::VDIVSDZrrbk_Int:
9567 case X86::VDIVSDZrrbkz_Int:
9568 case X86::VDIVSSZrm:
9569 case X86::VDIVSSZrr:
9570 case X86::VDIVSSZrm_Int:
9571 case X86::VDIVSSZrmk_Int:
9572 case X86::VDIVSSZrmkz_Int:
9573 case X86::VDIVSSZrr_Int:
9574 case X86::VDIVSSZrrk_Int:
9575 case X86::VDIVSSZrrkz_Int:
9576 case X86::VDIVSSZrrb_Int:
9577 case X86::VDIVSSZrrbk_Int:
9578 case X86::VDIVSSZrrbkz_Int:
9579 case X86::VSQRTPDZ128m:
9580 case X86::VSQRTPDZ128mb:
9581 case X86::VSQRTPDZ128mbk:
9582 case X86::VSQRTPDZ128mbkz:
9583 case X86::VSQRTPDZ128mk:
9584 case X86::VSQRTPDZ128mkz:
9585 case X86::VSQRTPDZ128r:
9586 case X86::VSQRTPDZ128rk:
9587 case X86::VSQRTPDZ128rkz:
9588 case X86::VSQRTPDZ256m:
9589 case X86::VSQRTPDZ256mb:
9590 case X86::VSQRTPDZ256mbk:
9591 case X86::VSQRTPDZ256mbkz:
9592 case X86::VSQRTPDZ256mk:
9593 case X86::VSQRTPDZ256mkz:
9594 case X86::VSQRTPDZ256r:
9595 case X86::VSQRTPDZ256rk:
9596 case X86::VSQRTPDZ256rkz:
9597 case X86::VSQRTPDZm:
9598 case X86::VSQRTPDZmb:
9599 case X86::VSQRTPDZmbk:
9600 case X86::VSQRTPDZmbkz:
9601 case X86::VSQRTPDZmk:
9602 case X86::VSQRTPDZmkz:
9603 case X86::VSQRTPDZr:
9604 case X86::VSQRTPDZrb:
9605 case X86::VSQRTPDZrbk:
9606 case X86::VSQRTPDZrbkz:
9607 case X86::VSQRTPDZrk:
9608 case X86::VSQRTPDZrkz:
9609 case X86::VSQRTPSZ128m:
9610 case X86::VSQRTPSZ128mb:
9611 case X86::VSQRTPSZ128mbk:
9612 case X86::VSQRTPSZ128mbkz:
9613 case X86::VSQRTPSZ128mk:
9614 case X86::VSQRTPSZ128mkz:
9615 case X86::VSQRTPSZ128r:
9616 case X86::VSQRTPSZ128rk:
9617 case X86::VSQRTPSZ128rkz:
9618 case X86::VSQRTPSZ256m:
9619 case X86::VSQRTPSZ256mb:
9620 case X86::VSQRTPSZ256mbk:
9621 case X86::VSQRTPSZ256mbkz:
9622 case X86::VSQRTPSZ256mk:
9623 case X86::VSQRTPSZ256mkz:
9624 case X86::VSQRTPSZ256r:
9625 case X86::VSQRTPSZ256rk:
9626 case X86::VSQRTPSZ256rkz:
9627 case X86::VSQRTPSZm:
9628 case X86::VSQRTPSZmb:
9629 case X86::VSQRTPSZmbk:
9630 case X86::VSQRTPSZmbkz:
9631 case X86::VSQRTPSZmk:
9632 case X86::VSQRTPSZmkz:
9633 case X86::VSQRTPSZr:
9634 case X86::VSQRTPSZrb:
9635 case X86::VSQRTPSZrbk:
9636 case X86::VSQRTPSZrbkz:
9637 case X86::VSQRTPSZrk:
9638 case X86::VSQRTPSZrkz:
9639 case X86::VSQRTSDZm:
9640 case X86::VSQRTSDZm_Int:
9641 case X86::VSQRTSDZmk_Int:
9642 case X86::VSQRTSDZmkz_Int:
9643 case X86::VSQRTSDZr:
9644 case X86::VSQRTSDZr_Int:
9645 case X86::VSQRTSDZrk_Int:
9646 case X86::VSQRTSDZrkz_Int:
9647 case X86::VSQRTSDZrb_Int:
9648 case X86::VSQRTSDZrbk_Int:
9649 case X86::VSQRTSDZrbkz_Int:
9650 case X86::VSQRTSSZm:
9651 case X86::VSQRTSSZm_Int:
9652 case X86::VSQRTSSZmk_Int:
9653 case X86::VSQRTSSZmkz_Int:
9654 case X86::VSQRTSSZr:
9655 case X86::VSQRTSSZr_Int:
9656 case X86::VSQRTSSZrk_Int:
9657 case X86::VSQRTSSZrkz_Int:
9658 case X86::VSQRTSSZrb_Int:
9659 case X86::VSQRTSSZrbk_Int:
9660 case X86::VSQRTSSZrbkz_Int:
9661
9662 case X86::VGATHERDPDYrm:
9663 case X86::VGATHERDPDZ128rm:
9664 case X86::VGATHERDPDZ256rm:
9665 case X86::VGATHERDPDZrm:
9666 case X86::VGATHERDPDrm:
9667 case X86::VGATHERDPSYrm:
9668 case X86::VGATHERDPSZ128rm:
9669 case X86::VGATHERDPSZ256rm:
9670 case X86::VGATHERDPSZrm:
9671 case X86::VGATHERDPSrm:
9672 case X86::VGATHERPF0DPDm:
9673 case X86::VGATHERPF0DPSm:
9674 case X86::VGATHERPF0QPDm:
9675 case X86::VGATHERPF0QPSm:
9676 case X86::VGATHERPF1DPDm:
9677 case X86::VGATHERPF1DPSm:
9678 case X86::VGATHERPF1QPDm:
9679 case X86::VGATHERPF1QPSm:
9680 case X86::VGATHERQPDYrm:
9681 case X86::VGATHERQPDZ128rm:
9682 case X86::VGATHERQPDZ256rm:
9683 case X86::VGATHERQPDZrm:
9684 case X86::VGATHERQPDrm:
9685 case X86::VGATHERQPSYrm:
9686 case X86::VGATHERQPSZ128rm:
9687 case X86::VGATHERQPSZ256rm:
9688 case X86::VGATHERQPSZrm:
9689 case X86::VGATHERQPSrm:
9690 case X86::VPGATHERDDYrm:
9691 case X86::VPGATHERDDZ128rm:
9692 case X86::VPGATHERDDZ256rm:
9693 case X86::VPGATHERDDZrm:
9694 case X86::VPGATHERDDrm:
9695 case X86::VPGATHERDQYrm:
9696 case X86::VPGATHERDQZ128rm:
9697 case X86::VPGATHERDQZ256rm:
9698 case X86::VPGATHERDQZrm:
9699 case X86::VPGATHERDQrm:
9700 case X86::VPGATHERQDYrm:
9701 case X86::VPGATHERQDZ128rm:
9702 case X86::VPGATHERQDZ256rm:
9703 case X86::VPGATHERQDZrm:
9704 case X86::VPGATHERQDrm:
9705 case X86::VPGATHERQQYrm:
9706 case X86::VPGATHERQQZ128rm:
9707 case X86::VPGATHERQQZ256rm:
9708 case X86::VPGATHERQQZrm:
9709 case X86::VPGATHERQQrm:
9710 case X86::VSCATTERDPDZ128mr:
9711 case X86::VSCATTERDPDZ256mr:
9712 case X86::VSCATTERDPDZmr:
9713 case X86::VSCATTERDPSZ128mr:
9714 case X86::VSCATTERDPSZ256mr:
9715 case X86::VSCATTERDPSZmr:
9716 case X86::VSCATTERPF0DPDm:
9717 case X86::VSCATTERPF0DPSm:
9718 case X86::VSCATTERPF0QPDm:
9719 case X86::VSCATTERPF0QPSm:
9720 case X86::VSCATTERPF1DPDm:
9721 case X86::VSCATTERPF1DPSm:
9722 case X86::VSCATTERPF1QPDm:
9723 case X86::VSCATTERPF1QPSm:
9724 case X86::VSCATTERQPDZ128mr:
9725 case X86::VSCATTERQPDZ256mr:
9726 case X86::VSCATTERQPDZmr:
9727 case X86::VSCATTERQPSZ128mr:
9728 case X86::VSCATTERQPSZ256mr:
9729 case X86::VSCATTERQPSZmr:
9730 case X86::VPSCATTERDDZ128mr:
9731 case X86::VPSCATTERDDZ256mr:
9732 case X86::VPSCATTERDDZmr:
9733 case X86::VPSCATTERDQZ128mr:
9734 case X86::VPSCATTERDQZ256mr:
9735 case X86::VPSCATTERDQZmr:
9736 case X86::VPSCATTERQDZ128mr:
9737 case X86::VPSCATTERQDZ256mr:
9738 case X86::VPSCATTERQDZmr:
9739 case X86::VPSCATTERQQZ128mr:
9740 case X86::VPSCATTERQQZ256mr:
9741 case X86::VPSCATTERQQZmr:
9742 return true;
9743 }
9744}
9745
9747 const MachineRegisterInfo *MRI,
9748 const MachineInstr &DefMI,
9749 unsigned DefIdx,
9750 const MachineInstr &UseMI,
9751 unsigned UseIdx) const {
9752 return isHighLatencyDef(DefMI.getOpcode());
9753}
9754
9756 const MachineBasicBlock *MBB) const {
9757 assert(Inst.getNumExplicitOperands() == 3 && Inst.getNumExplicitDefs() == 1 &&
9758 Inst.getNumDefs() <= 2 && "Reassociation needs binary operators");
9759
9760 // Integer binary math/logic instructions have a third source operand:
9761 // the EFLAGS register. That operand must be both defined here and never
9762 // used; ie, it must be dead. If the EFLAGS operand is live, then we can
9763 // not change anything because rearranging the operands could affect other
9764 // instructions that depend on the exact status flags (zero, sign, etc.)
9765 // that are set by using these particular operands with this operation.
9766 const MachineOperand *FlagDef =
9767 Inst.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr);
9768 assert((Inst.getNumDefs() == 1 || FlagDef) && "Implicit def isn't flags?");
9769 if (FlagDef && !FlagDef->isDead())
9770 return false;
9771
9773}
9774
9775// TODO: There are many more machine instruction opcodes to match:
9776// 1. Other data types (integer, vectors)
9777// 2. Other math / logic operations (xor, or)
9778// 3. Other forms of the same operation (intrinsics and other variants)
9780 bool Invert) const {
9781 if (Invert)
9782 return false;
9783 switch (Inst.getOpcode()) {
9784 CASE_ND(ADD8rr)
9785 CASE_ND(ADD16rr)
9786 CASE_ND(ADD32rr)
9787 CASE_ND(ADD64rr)
9788 CASE_ND(AND8rr)
9789 CASE_ND(AND16rr)
9790 CASE_ND(AND32rr)
9791 CASE_ND(AND64rr)
9792 CASE_ND(OR8rr)
9793 CASE_ND(OR16rr)
9794 CASE_ND(OR32rr)
9795 CASE_ND(OR64rr)
9796 CASE_ND(XOR8rr)
9797 CASE_ND(XOR16rr)
9798 CASE_ND(XOR32rr)
9799 CASE_ND(XOR64rr)
9800 CASE_ND(IMUL16rr)
9801 CASE_ND(IMUL32rr)
9802 CASE_ND(IMUL64rr)
9803 case X86::PANDrr:
9804 case X86::PORrr:
9805 case X86::PXORrr:
9806 case X86::ANDPDrr:
9807 case X86::ANDPSrr:
9808 case X86::ORPDrr:
9809 case X86::ORPSrr:
9810 case X86::XORPDrr:
9811 case X86::XORPSrr:
9812 case X86::PADDBrr:
9813 case X86::PADDWrr:
9814 case X86::PADDDrr:
9815 case X86::PADDQrr:
9816 case X86::PMULLWrr:
9817 case X86::PMULLDrr:
9818 case X86::PMAXSBrr:
9819 case X86::PMAXSDrr:
9820 case X86::PMAXSWrr:
9821 case X86::PMAXUBrr:
9822 case X86::PMAXUDrr:
9823 case X86::PMAXUWrr:
9824 case X86::PMINSBrr:
9825 case X86::PMINSDrr:
9826 case X86::PMINSWrr:
9827 case X86::PMINUBrr:
9828 case X86::PMINUDrr:
9829 case X86::PMINUWrr:
9830 case X86::VPANDrr:
9831 case X86::VPANDYrr:
9832 case X86::VPANDDZ128rr:
9833 case X86::VPANDDZ256rr:
9834 case X86::VPANDDZrr:
9835 case X86::VPANDQZ128rr:
9836 case X86::VPANDQZ256rr:
9837 case X86::VPANDQZrr:
9838 case X86::VPORrr:
9839 case X86::VPORYrr:
9840 case X86::VPORDZ128rr:
9841 case X86::VPORDZ256rr:
9842 case X86::VPORDZrr:
9843 case X86::VPORQZ128rr:
9844 case X86::VPORQZ256rr:
9845 case X86::VPORQZrr:
9846 case X86::VPXORrr:
9847 case X86::VPXORYrr:
9848 case X86::VPXORDZ128rr:
9849 case X86::VPXORDZ256rr:
9850 case X86::VPXORDZrr:
9851 case X86::VPXORQZ128rr:
9852 case X86::VPXORQZ256rr:
9853 case X86::VPXORQZrr:
9854 case X86::VANDPDrr:
9855 case X86::VANDPSrr:
9856 case X86::VANDPDYrr:
9857 case X86::VANDPSYrr:
9858 case X86::VANDPDZ128rr:
9859 case X86::VANDPSZ128rr:
9860 case X86::VANDPDZ256rr:
9861 case X86::VANDPSZ256rr:
9862 case X86::VANDPDZrr:
9863 case X86::VANDPSZrr:
9864 case X86::VORPDrr:
9865 case X86::VORPSrr:
9866 case X86::VORPDYrr:
9867 case X86::VORPSYrr:
9868 case X86::VORPDZ128rr:
9869 case X86::VORPSZ128rr:
9870 case X86::VORPDZ256rr:
9871 case X86::VORPSZ256rr:
9872 case X86::VORPDZrr:
9873 case X86::VORPSZrr:
9874 case X86::VXORPDrr:
9875 case X86::VXORPSrr:
9876 case X86::VXORPDYrr:
9877 case X86::VXORPSYrr:
9878 case X86::VXORPDZ128rr:
9879 case X86::VXORPSZ128rr:
9880 case X86::VXORPDZ256rr:
9881 case X86::VXORPSZ256rr:
9882 case X86::VXORPDZrr:
9883 case X86::VXORPSZrr:
9884 case X86::KADDBkk:
9885 case X86::KADDWkk:
9886 case X86::KADDDkk:
9887 case X86::KADDQkk:
9888 case X86::KANDBkk:
9889 case X86::KANDWkk:
9890 case X86::KANDDkk:
9891 case X86::KANDQkk:
9892 case X86::KORBkk:
9893 case X86::KORWkk:
9894 case X86::KORDkk:
9895 case X86::KORQkk:
9896 case X86::KXORBkk:
9897 case X86::KXORWkk:
9898 case X86::KXORDkk:
9899 case X86::KXORQkk:
9900 case X86::VPADDBrr:
9901 case X86::VPADDWrr:
9902 case X86::VPADDDrr:
9903 case X86::VPADDQrr:
9904 case X86::VPADDBYrr:
9905 case X86::VPADDWYrr:
9906 case X86::VPADDDYrr:
9907 case X86::VPADDQYrr:
9908 case X86::VPADDBZ128rr:
9909 case X86::VPADDWZ128rr:
9910 case X86::VPADDDZ128rr:
9911 case X86::VPADDQZ128rr:
9912 case X86::VPADDBZ256rr:
9913 case X86::VPADDWZ256rr:
9914 case X86::VPADDDZ256rr:
9915 case X86::VPADDQZ256rr:
9916 case X86::VPADDBZrr:
9917 case X86::VPADDWZrr:
9918 case X86::VPADDDZrr:
9919 case X86::VPADDQZrr:
9920 case X86::VPMULLWrr:
9921 case X86::VPMULLWYrr:
9922 case X86::VPMULLWZ128rr:
9923 case X86::VPMULLWZ256rr:
9924 case X86::VPMULLWZrr:
9925 case X86::VPMULLDrr:
9926 case X86::VPMULLDYrr:
9927 case X86::VPMULLDZ128rr:
9928 case X86::VPMULLDZ256rr:
9929 case X86::VPMULLDZrr:
9930 case X86::VPMULLQZ128rr:
9931 case X86::VPMULLQZ256rr:
9932 case X86::VPMULLQZrr:
9933 case X86::VPMAXSBrr:
9934 case X86::VPMAXSBYrr:
9935 case X86::VPMAXSBZ128rr:
9936 case X86::VPMAXSBZ256rr:
9937 case X86::VPMAXSBZrr:
9938 case X86::VPMAXSDrr:
9939 case X86::VPMAXSDYrr:
9940 case X86::VPMAXSDZ128rr:
9941 case X86::VPMAXSDZ256rr:
9942 case X86::VPMAXSDZrr:
9943 case X86::VPMAXSQZ128rr:
9944 case X86::VPMAXSQZ256rr:
9945 case X86::VPMAXSQZrr:
9946 case X86::VPMAXSWrr:
9947 case X86::VPMAXSWYrr:
9948 case X86::VPMAXSWZ128rr:
9949 case X86::VPMAXSWZ256rr:
9950 case X86::VPMAXSWZrr:
9951 case X86::VPMAXUBrr:
9952 case X86::VPMAXUBYrr:
9953 case X86::VPMAXUBZ128rr:
9954 case X86::VPMAXUBZ256rr:
9955 case X86::VPMAXUBZrr:
9956 case X86::VPMAXUDrr:
9957 case X86::VPMAXUDYrr:
9958 case X86::VPMAXUDZ128rr:
9959 case X86::VPMAXUDZ256rr:
9960 case X86::VPMAXUDZrr:
9961 case X86::VPMAXUQZ128rr:
9962 case X86::VPMAXUQZ256rr:
9963 case X86::VPMAXUQZrr:
9964 case X86::VPMAXUWrr:
9965 case X86::VPMAXUWYrr:
9966 case X86::VPMAXUWZ128rr:
9967 case X86::VPMAXUWZ256rr:
9968 case X86::VPMAXUWZrr:
9969 case X86::VPMINSBrr:
9970 case X86::VPMINSBYrr:
9971 case X86::VPMINSBZ128rr:
9972 case X86::VPMINSBZ256rr:
9973 case X86::VPMINSBZrr:
9974 case X86::VPMINSDrr:
9975 case X86::VPMINSDYrr:
9976 case X86::VPMINSDZ128rr:
9977 case X86::VPMINSDZ256rr:
9978 case X86::VPMINSDZrr:
9979 case X86::VPMINSQZ128rr:
9980 case X86::VPMINSQZ256rr:
9981 case X86::VPMINSQZrr:
9982 case X86::VPMINSWrr:
9983 case X86::VPMINSWYrr:
9984 case X86::VPMINSWZ128rr:
9985 case X86::VPMINSWZ256rr:
9986 case X86::VPMINSWZrr:
9987 case X86::VPMINUBrr:
9988 case X86::VPMINUBYrr:
9989 case X86::VPMINUBZ128rr:
9990 case X86::VPMINUBZ256rr:
9991 case X86::VPMINUBZrr:
9992 case X86::VPMINUDrr:
9993 case X86::VPMINUDYrr:
9994 case X86::VPMINUDZ128rr:
9995 case X86::VPMINUDZ256rr:
9996 case X86::VPMINUDZrr:
9997 case X86::VPMINUQZ128rr:
9998 case X86::VPMINUQZ256rr:
9999 case X86::VPMINUQZrr:
10000 case X86::VPMINUWrr:
10001 case X86::VPMINUWYrr:
10002 case X86::VPMINUWZ128rr:
10003 case X86::VPMINUWZ256rr:
10004 case X86::VPMINUWZrr:
10005 // Normal min/max instructions are not commutative because of NaN and signed
10006 // zero semantics, but these are. Thus, there's no need to check for global
10007 // relaxed math; the instructions themselves have the properties we need.
10008 case X86::MAXCPDrr:
10009 case X86::MAXCPSrr:
10010 case X86::MAXCSDrr:
10011 case X86::MAXCSSrr:
10012 case X86::MINCPDrr:
10013 case X86::MINCPSrr:
10014 case X86::MINCSDrr:
10015 case X86::MINCSSrr:
10016 case X86::VMAXCPDrr:
10017 case X86::VMAXCPSrr:
10018 case X86::VMAXCPDYrr:
10019 case X86::VMAXCPSYrr:
10020 case X86::VMAXCPDZ128rr:
10021 case X86::VMAXCPSZ128rr:
10022 case X86::VMAXCPDZ256rr:
10023 case X86::VMAXCPSZ256rr:
10024 case X86::VMAXCPDZrr:
10025 case X86::VMAXCPSZrr:
10026 case X86::VMAXCSDrr:
10027 case X86::VMAXCSSrr:
10028 case X86::VMAXCSDZrr:
10029 case X86::VMAXCSSZrr:
10030 case X86::VMINCPDrr:
10031 case X86::VMINCPSrr:
10032 case X86::VMINCPDYrr:
10033 case X86::VMINCPSYrr:
10034 case X86::VMINCPDZ128rr:
10035 case X86::VMINCPSZ128rr:
10036 case X86::VMINCPDZ256rr:
10037 case X86::VMINCPSZ256rr:
10038 case X86::VMINCPDZrr:
10039 case X86::VMINCPSZrr:
10040 case X86::VMINCSDrr:
10041 case X86::VMINCSSrr:
10042 case X86::VMINCSDZrr:
10043 case X86::VMINCSSZrr:
10044 case X86::VMAXCPHZ128rr:
10045 case X86::VMAXCPHZ256rr:
10046 case X86::VMAXCPHZrr:
10047 case X86::VMAXCSHZrr:
10048 case X86::VMINCPHZ128rr:
10049 case X86::VMINCPHZ256rr:
10050 case X86::VMINCPHZrr:
10051 case X86::VMINCSHZrr:
10052 return true;
10053 case X86::ADDPDrr:
10054 case X86::ADDPSrr:
10055 case X86::ADDSDrr:
10056 case X86::ADDSSrr:
10057 case X86::MULPDrr:
10058 case X86::MULPSrr:
10059 case X86::MULSDrr:
10060 case X86::MULSSrr:
10061 case X86::VADDPDrr:
10062 case X86::VADDPSrr:
10063 case X86::VADDPDYrr:
10064 case X86::VADDPSYrr:
10065 case X86::VADDPDZ128rr:
10066 case X86::VADDPSZ128rr:
10067 case X86::VADDPDZ256rr:
10068 case X86::VADDPSZ256rr:
10069 case X86::VADDPDZrr:
10070 case X86::VADDPSZrr:
10071 case X86::VADDSDrr:
10072 case X86::VADDSSrr:
10073 case X86::VADDSDZrr:
10074 case X86::VADDSSZrr:
10075 case X86::VMULPDrr:
10076 case X86::VMULPSrr:
10077 case X86::VMULPDYrr:
10078 case X86::VMULPSYrr:
10079 case X86::VMULPDZ128rr:
10080 case X86::VMULPSZ128rr:
10081 case X86::VMULPDZ256rr:
10082 case X86::VMULPSZ256rr:
10083 case X86::VMULPDZrr:
10084 case X86::VMULPSZrr:
10085 case X86::VMULSDrr:
10086 case X86::VMULSSrr:
10087 case X86::VMULSDZrr:
10088 case X86::VMULSSZrr:
10089 case X86::VADDPHZ128rr:
10090 case X86::VADDPHZ256rr:
10091 case X86::VADDPHZrr:
10092 case X86::VADDSHZrr:
10093 case X86::VMULPHZ128rr:
10094 case X86::VMULPHZ256rr:
10095 case X86::VMULPHZrr:
10096 case X86::VMULSHZrr:
10099 default:
10100 return false;
10101 }
10102}
10103
10104/// If \p DescribedReg overlaps with the MOVrr instruction's destination
10105/// register then, if possible, describe the value in terms of the source
10106/// register.
10107static std::optional<ParamLoadedValue>
10109 const TargetRegisterInfo *TRI) {
10110 Register DestReg = MI.getOperand(0).getReg();
10111 Register SrcReg = MI.getOperand(1).getReg();
10112
10113 auto Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), {});
10114
10115 // If the described register is the destination, just return the source.
10116 if (DestReg == DescribedReg)
10117 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
10118
10119 // If the described register is a sub-register of the destination register,
10120 // then pick out the source register's corresponding sub-register.
10121 if (unsigned SubRegIdx = TRI->getSubRegIndex(DestReg, DescribedReg)) {
10122 Register SrcSubReg = TRI->getSubReg(SrcReg, SubRegIdx);
10123 return ParamLoadedValue(MachineOperand::CreateReg(SrcSubReg, false), Expr);
10124 }
10125
10126 // The remaining case to consider is when the described register is a
10127 // super-register of the destination register. MOV8rr and MOV16rr does not
10128 // write to any of the other bytes in the register, meaning that we'd have to
10129 // describe the value using a combination of the source register and the
10130 // non-overlapping bits in the described register, which is not currently
10131 // possible.
10132 if (MI.getOpcode() == X86::MOV8rr || MI.getOpcode() == X86::MOV16rr ||
10133 !TRI->isSuperRegister(DestReg, DescribedReg))
10134 return std::nullopt;
10135
10136 assert(MI.getOpcode() == X86::MOV32rr && "Unexpected super-register case");
10137 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
10138}
10139
10140std::optional<ParamLoadedValue>
10142 const MachineOperand *Op = nullptr;
10143 DIExpression *Expr = nullptr;
10144
10146
10147 switch (MI.getOpcode()) {
10148 case X86::LEA32r:
10149 case X86::LEA64r:
10150 case X86::LEA64_32r: {
10151 // We may need to describe a 64-bit parameter with a 32-bit LEA.
10152 if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg))
10153 return std::nullopt;
10154
10155 // Operand 4 could be global address. For now we do not support
10156 // such situation.
10157 if (!MI.getOperand(4).isImm() || !MI.getOperand(2).isImm())
10158 return std::nullopt;
10159
10160 const MachineOperand &Op1 = MI.getOperand(1);
10161 const MachineOperand &Op2 = MI.getOperand(3);
10162 assert(Op2.isReg() &&
10163 (Op2.getReg() == X86::NoRegister || Op2.getReg().isPhysical()));
10164
10165 // Omit situations like:
10166 // %rsi = lea %rsi, 4, ...
10167 if ((Op1.isReg() && Op1.getReg() == MI.getOperand(0).getReg()) ||
10168 Op2.getReg() == MI.getOperand(0).getReg())
10169 return std::nullopt;
10170 else if ((Op1.isReg() && Op1.getReg() != X86::NoRegister &&
10171 TRI->regsOverlap(Op1.getReg(), MI.getOperand(0).getReg())) ||
10172 (Op2.getReg() != X86::NoRegister &&
10173 TRI->regsOverlap(Op2.getReg(), MI.getOperand(0).getReg())))
10174 return std::nullopt;
10175
10176 int64_t Coef = MI.getOperand(2).getImm();
10177 int64_t Offset = MI.getOperand(4).getImm();
10179
10180 if ((Op1.isReg() && Op1.getReg() != X86::NoRegister)) {
10181 Op = &Op1;
10182 } else if (Op1.isFI())
10183 Op = &Op1;
10184
10185 if (Op && Op->isReg() && Op->getReg() == Op2.getReg() && Coef > 0) {
10186 Ops.push_back(dwarf::DW_OP_constu);
10187 Ops.push_back(Coef + 1);
10188 Ops.push_back(dwarf::DW_OP_mul);
10189 } else {
10190 if (Op && Op2.getReg() != X86::NoRegister) {
10191 int dwarfReg = TRI->getDwarfRegNum(Op2.getReg(), false);
10192 if (dwarfReg < 0)
10193 return std::nullopt;
10194 else if (dwarfReg < 32) {
10195 Ops.push_back(dwarf::DW_OP_breg0 + dwarfReg);
10196 Ops.push_back(0);
10197 } else {
10198 Ops.push_back(dwarf::DW_OP_bregx);
10199 Ops.push_back(dwarfReg);
10200 Ops.push_back(0);
10201 }
10202 } else if (!Op) {
10203 assert(Op2.getReg() != X86::NoRegister);
10204 Op = &Op2;
10205 }
10206
10207 if (Coef > 1) {
10208 assert(Op2.getReg() != X86::NoRegister);
10209 Ops.push_back(dwarf::DW_OP_constu);
10210 Ops.push_back(Coef);
10211 Ops.push_back(dwarf::DW_OP_mul);
10212 }
10213
10214 if (((Op1.isReg() && Op1.getReg() != X86::NoRegister) || Op1.isFI()) &&
10215 Op2.getReg() != X86::NoRegister) {
10216 Ops.push_back(dwarf::DW_OP_plus);
10217 }
10218 }
10219
10221 Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), Ops);
10222
10223 return ParamLoadedValue(*Op, Expr);
10224 }
10225 case X86::MOV8ri:
10226 case X86::MOV16ri:
10227 // TODO: Handle MOV8ri and MOV16ri.
10228 return std::nullopt;
10229 case X86::MOV32ri:
10230 case X86::MOV64ri:
10231 case X86::MOV64ri32:
10232 // MOV32ri may be used for producing zero-extended 32-bit immediates in
10233 // 64-bit parameters, so we need to consider super-registers.
10234 if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg))
10235 return std::nullopt;
10236 return ParamLoadedValue(MI.getOperand(1), Expr);
10237 case X86::MOV8rr:
10238 case X86::MOV16rr:
10239 case X86::MOV32rr:
10240 case X86::MOV64rr:
10241 return describeMOVrrLoadedValue(MI, Reg, TRI);
10242 case X86::XOR32rr: {
10243 // 64-bit parameters are zero-materialized using XOR32rr, so also consider
10244 // super-registers.
10245 if (!TRI->isSuperRegisterEq(MI.getOperand(0).getReg(), Reg))
10246 return std::nullopt;
10247 if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
10249 return std::nullopt;
10250 }
10251 case X86::MOVSX64rr32: {
10252 // We may need to describe the lower 32 bits of the MOVSX; for example, in
10253 // cases like this:
10254 //
10255 // $ebx = [...]
10256 // $rdi = MOVSX64rr32 $ebx
10257 // $esi = MOV32rr $edi
10258 if (!TRI->isSubRegisterEq(MI.getOperand(0).getReg(), Reg))
10259 return std::nullopt;
10260
10261 Expr = DIExpression::get(MI.getMF()->getFunction().getContext(), {});
10262
10263 // If the described register is the destination register we need to
10264 // sign-extend the source register from 32 bits. The other case we handle
10265 // is when the described register is the 32-bit sub-register of the
10266 // destination register, in case we just need to return the source
10267 // register.
10268 if (Reg == MI.getOperand(0).getReg())
10269 Expr = DIExpression::appendExt(Expr, 32, 64, true);
10270 else
10271 assert(X86MCRegisterClasses[X86::GR32RegClassID].contains(Reg) &&
10272 "Unhandled sub-register case for MOVSX64rr32");
10273
10274 return ParamLoadedValue(MI.getOperand(1), Expr);
10275 }
10276 default:
10277 assert(!MI.isMoveImmediate() && "Unexpected MoveImm instruction");
10279 }
10280}
10281
10282/// This is an architecture-specific helper function of reassociateOps.
10283/// Set special operand attributes for new instructions after reassociation.
10285 MachineInstr &OldMI2,
10286 MachineInstr &NewMI1,
10287 MachineInstr &NewMI2) const {
10288 // Integer instructions may define an implicit EFLAGS dest register operand.
10289 MachineOperand *OldFlagDef1 =
10290 OldMI1.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr);
10291 MachineOperand *OldFlagDef2 =
10292 OldMI2.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr);
10293
10294 assert(!OldFlagDef1 == !OldFlagDef2 &&
10295 "Unexpected instruction type for reassociation");
10296
10297 if (!OldFlagDef1 || !OldFlagDef2)
10298 return;
10299
10300 assert(OldFlagDef1->isDead() && OldFlagDef2->isDead() &&
10301 "Must have dead EFLAGS operand in reassociable instruction");
10302
10303 MachineOperand *NewFlagDef1 =
10304 NewMI1.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr);
10305 MachineOperand *NewFlagDef2 =
10306 NewMI2.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr);
10307
10308 assert(NewFlagDef1 && NewFlagDef2 &&
10309 "Unexpected operand in reassociable instruction");
10310
10311 // Mark the new EFLAGS operands as dead to be helpful to subsequent iterations
10312 // of this pass or other passes. The EFLAGS operands must be dead in these new
10313 // instructions because the EFLAGS operands in the original instructions must
10314 // be dead in order for reassociation to occur.
10315 NewFlagDef1->setIsDead();
10316 NewFlagDef2->setIsDead();
10317}
10318
10319std::pair<unsigned, unsigned>
10321 return std::make_pair(TF, 0u);
10322}
10323
10326 using namespace X86II;
10327 static const std::pair<unsigned, const char *> TargetFlags[] = {
10328 {MO_GOT_ABSOLUTE_ADDRESS, "x86-got-absolute-address"},
10329 {MO_PIC_BASE_OFFSET, "x86-pic-base-offset"},
10330 {MO_GOT, "x86-got"},
10331 {MO_GOTOFF, "x86-gotoff"},
10332 {MO_GOTPCREL, "x86-gotpcrel"},
10333 {MO_GOTPCREL_NORELAX, "x86-gotpcrel-norelax"},
10334 {MO_PLT, "x86-plt"},
10335 {MO_TLSGD, "x86-tlsgd"},
10336 {MO_TLSLD, "x86-tlsld"},
10337 {MO_TLSLDM, "x86-tlsldm"},
10338 {MO_GOTTPOFF, "x86-gottpoff"},
10339 {MO_INDNTPOFF, "x86-indntpoff"},
10340 {MO_TPOFF, "x86-tpoff"},
10341 {MO_DTPOFF, "x86-dtpoff"},
10342 {MO_NTPOFF, "x86-ntpoff"},
10343 {MO_GOTNTPOFF, "x86-gotntpoff"},
10344 {MO_DLLIMPORT, "x86-dllimport"},
10345 {MO_DARWIN_NONLAZY, "x86-darwin-nonlazy"},
10346 {MO_DARWIN_NONLAZY_PIC_BASE, "x86-darwin-nonlazy-pic-base"},
10347 {MO_TLVP, "x86-tlvp"},
10348 {MO_TLVP_PIC_BASE, "x86-tlvp-pic-base"},
10349 {MO_SECREL, "x86-secrel"},
10350 {MO_COFFSTUB, "x86-coffstub"}};
10351 return ArrayRef(TargetFlags);
10352}
10353
10354namespace {
10355/// Create Global Base Reg pass. This initializes the PIC
10356/// global base register for x86-32.
10357struct CGBR : public MachineFunctionPass {
10358 static char ID;
10359 CGBR() : MachineFunctionPass(ID) {}
10360
10361 bool runOnMachineFunction(MachineFunction &MF) override {
10362 const X86TargetMachine *TM =
10363 static_cast<const X86TargetMachine *>(&MF.getTarget());
10364 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
10365
10366 // Only emit a global base reg in PIC mode.
10367 if (!TM->isPositionIndependent())
10368 return false;
10369
10371 Register GlobalBaseReg = X86FI->getGlobalBaseReg();
10372
10373 // If we didn't need a GlobalBaseReg, don't insert code.
10374 if (GlobalBaseReg == 0)
10375 return false;
10376
10377 // Insert the set of GlobalBaseReg into the first MBB of the function
10378 MachineBasicBlock &FirstMBB = MF.front();
10380 DebugLoc DL = FirstMBB.findDebugLoc(MBBI);
10382 const X86InstrInfo *TII = STI.getInstrInfo();
10383
10384 Register PC;
10385 if (STI.isPICStyleGOT())
10386 PC = RegInfo.createVirtualRegister(&X86::GR32RegClass);
10387 else
10388 PC = GlobalBaseReg;
10389
10390 if (STI.is64Bit()) {
10391 if (TM->getCodeModel() == CodeModel::Large) {
10392 // In the large code model, we are aiming for this code, though the
10393 // register allocation may vary:
10394 // leaq .LN$pb(%rip), %rax
10395 // movq $_GLOBAL_OFFSET_TABLE_ - .LN$pb, %rcx
10396 // addq %rcx, %rax
10397 // RAX now holds address of _GLOBAL_OFFSET_TABLE_.
10398 Register PBReg = RegInfo.createVirtualRegister(&X86::GR64RegClass);
10399 Register GOTReg = RegInfo.createVirtualRegister(&X86::GR64RegClass);
10400 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PBReg)
10401 .addReg(X86::RIP)
10402 .addImm(0)
10403 .addReg(0)
10405 .addReg(0);
10406 std::prev(MBBI)->setPreInstrSymbol(MF, MF.getPICBaseSymbol());
10407 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOV64ri), GOTReg)
10408 .addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
10410 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD64rr), PC)
10411 .addReg(PBReg, RegState::Kill)
10412 .addReg(GOTReg, RegState::Kill);
10413 } else {
10414 // In other code models, use a RIP-relative LEA to materialize the
10415 // GOT.
10416 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PC)
10417 .addReg(X86::RIP)
10418 .addImm(0)
10419 .addReg(0)
10420 .addExternalSymbol("_GLOBAL_OFFSET_TABLE_")
10421 .addReg(0);
10422 }
10423 } else {
10424 // Operand of MovePCtoStack is completely ignored by asm printer. It's
10425 // only used in JIT code emission as displacement to pc.
10426 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0);
10427
10428 // If we're using vanilla 'GOT' PIC style, we should use relative
10429 // addressing not to pc, but to _GLOBAL_OFFSET_TABLE_ external.
10430 if (STI.isPICStyleGOT()) {
10431 // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel],
10432 // %some_register
10433 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg)
10434 .addReg(PC)
10435 .addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
10437 }
10438 }
10439
10440 return true;
10441 }
10442
10443 StringRef getPassName() const override {
10444 return "X86 PIC Global Base Reg Initialization";
10445 }
10446
10447 void getAnalysisUsage(AnalysisUsage &AU) const override {
10448 AU.setPreservesCFG();
10450 }
10451};
10452} // namespace
10453
10454char CGBR::ID = 0;
10456
10457namespace {
10458struct LDTLSCleanup : public MachineFunctionPass {
10459 static char ID;
10460 LDTLSCleanup() : MachineFunctionPass(ID) {}
10461
10462 bool runOnMachineFunction(MachineFunction &MF) override {
10463 if (skipFunction(MF.getFunction()))
10464 return false;
10465
10466 X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
10467 if (MFI->getNumLocalDynamicTLSAccesses() < 2) {
10468 // No point folding accesses if there isn't at least two.
10469 return false;
10470 }
10471
10472 MachineDominatorTree *DT =
10473 &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
10474 return VisitNode(DT->getRootNode(), Register());
10475 }
10476
10477 // Visit the dominator subtree rooted at Node in pre-order.
10478 // If TLSBaseAddrReg is non-null, then use that to replace any
10479 // TLS_base_addr instructions. Otherwise, create the register
10480 // when the first such instruction is seen, and then use it
10481 // as we encounter more instructions.
10482 bool VisitNode(MachineDomTreeNode *Node, Register TLSBaseAddrReg) {
10483 MachineBasicBlock *BB = Node->getBlock();
10484 bool Changed = false;
10485
10486 // Traverse the current block.
10487 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;
10488 ++I) {
10489 switch (I->getOpcode()) {
10490 case X86::TLS_base_addr32:
10491 case X86::TLS_base_addr64:
10492 if (TLSBaseAddrReg)
10493 I = ReplaceTLSBaseAddrCall(*I, TLSBaseAddrReg);
10494 else
10495 I = SetRegister(*I, &TLSBaseAddrReg);
10496 Changed = true;
10497 break;
10498 default:
10499 break;
10500 }
10501 }
10502
10503 // Visit the children of this block in the dominator tree.
10504 for (auto &I : *Node) {
10505 Changed |= VisitNode(I, TLSBaseAddrReg);
10506 }
10507
10508 return Changed;
10509 }
10510
10511 // Replace the TLS_base_addr instruction I with a copy from
10512 // TLSBaseAddrReg, returning the new instruction.
10513 MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr &I,
10514 Register TLSBaseAddrReg) {
10515 MachineFunction *MF = I.getParent()->getParent();
10516 const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
10517 const bool is64Bit = STI.is64Bit();
10518 const X86InstrInfo *TII = STI.getInstrInfo();
10519
10520 // Insert a Copy from TLSBaseAddrReg to RAX/EAX.
10521 MachineInstr *Copy =
10522 BuildMI(*I.getParent(), I, I.getDebugLoc(),
10523 TII->get(TargetOpcode::COPY), is64Bit ? X86::RAX : X86::EAX)
10524 .addReg(TLSBaseAddrReg);
10525
10526 // Erase the TLS_base_addr instruction.
10527 I.eraseFromParent();
10528
10529 return Copy;
10530 }
10531
10532 // Create a virtual register in *TLSBaseAddrReg, and populate it by
10533 // inserting a copy instruction after I. Returns the new instruction.
10534 MachineInstr *SetRegister(MachineInstr &I, Register *TLSBaseAddrReg) {
10535 MachineFunction *MF = I.getParent()->getParent();
10536 const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
10537 const bool is64Bit = STI.is64Bit();
10538 const X86InstrInfo *TII = STI.getInstrInfo();
10539
10540 // Create a virtual register for the TLS base address.
10541 MachineRegisterInfo &RegInfo = MF->getRegInfo();
10542 *TLSBaseAddrReg = RegInfo.createVirtualRegister(
10543 is64Bit ? &X86::GR64RegClass : &X86::GR32RegClass);
10544
10545 // Insert a copy from RAX/EAX to TLSBaseAddrReg.
10546 MachineInstr *Next = I.getNextNode();
10547 MachineInstr *Copy = BuildMI(*I.getParent(), Next, I.getDebugLoc(),
10548 TII->get(TargetOpcode::COPY), *TLSBaseAddrReg)
10549 .addReg(is64Bit ? X86::RAX : X86::EAX);
10550
10551 return Copy;
10552 }
10553
10554 StringRef getPassName() const override {
10555 return "Local Dynamic TLS Access Clean-up";
10556 }
10557
10558 void getAnalysisUsage(AnalysisUsage &AU) const override {
10559 AU.setPreservesCFG();
10560 AU.addRequired<MachineDominatorTreeWrapperPass>();
10562 }
10563};
10564} // namespace
10565
10566char LDTLSCleanup::ID = 0;
10568 return new LDTLSCleanup();
10569}
10570
10571/// Constants defining how certain sequences should be outlined.
10572///
10573/// \p MachineOutlinerDefault implies that the function is called with a call
10574/// instruction, and a return must be emitted for the outlined function frame.
10575///
10576/// That is,
10577///
10578/// I1 OUTLINED_FUNCTION:
10579/// I2 --> call OUTLINED_FUNCTION I1
10580/// I3 I2
10581/// I3
10582/// ret
10583///
10584/// * Call construction overhead: 1 (call instruction)
10585/// * Frame construction overhead: 1 (return instruction)
10586///
10587/// \p MachineOutlinerTailCall implies that the function is being tail called.
10588/// A jump is emitted instead of a call, and the return is already present in
10589/// the outlined sequence. That is,
10590///
10591/// I1 OUTLINED_FUNCTION:
10592/// I2 --> jmp OUTLINED_FUNCTION I1
10593/// ret I2
10594/// ret
10595///
10596/// * Call construction overhead: 1 (jump instruction)
10597/// * Frame construction overhead: 0 (don't need to return)
10598///
10600
10601std::optional<std::unique_ptr<outliner::OutlinedFunction>>
10603 const MachineModuleInfo &MMI,
10604 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
10605 unsigned MinRepeats) const {
10606 unsigned SequenceSize = 0;
10607 for (auto &MI : RepeatedSequenceLocs[0]) {
10608 // FIXME: x86 doesn't implement getInstSizeInBytes, so
10609 // we can't tell the cost. Just assume each instruction
10610 // is one byte.
10611 if (MI.isDebugInstr() || MI.isKill())
10612 continue;
10613 SequenceSize += 1;
10614 }
10615
10616 // We check to see if CFI Instructions are present, and if they are
10617 // we find the number of CFI Instructions in the candidates.
10618 unsigned CFICount = 0;
10619 for (auto &I : RepeatedSequenceLocs[0]) {
10620 if (I.isCFIInstruction())
10621 CFICount++;
10622 }
10623
10624 // We compare the number of found CFI Instructions to the number of CFI
10625 // instructions in the parent function for each candidate. We must check this
10626 // since if we outline one of the CFI instructions in a function, we have to
10627 // outline them all for correctness. If we do not, the address offsets will be
10628 // incorrect between the two sections of the program.
10629 for (outliner::Candidate &C : RepeatedSequenceLocs) {
10630 std::vector<MCCFIInstruction> CFIInstructions =
10631 C.getMF()->getFrameInstructions();
10632
10633 if (CFICount > 0 && CFICount != CFIInstructions.size())
10634 return std::nullopt;
10635 }
10636
10637 // FIXME: Use real size in bytes for call and ret instructions.
10638 if (RepeatedSequenceLocs[0].back().isTerminator()) {
10639 for (outliner::Candidate &C : RepeatedSequenceLocs)
10640 C.setCallInfo(MachineOutlinerTailCall, 1);
10641
10642 return std::make_unique<outliner::OutlinedFunction>(
10643 RepeatedSequenceLocs, SequenceSize,
10644 0, // Number of bytes to emit frame.
10645 MachineOutlinerTailCall // Type of frame.
10646 );
10647 }
10648
10649 if (CFICount > 0)
10650 return std::nullopt;
10651
10652 for (outliner::Candidate &C : RepeatedSequenceLocs)
10653 C.setCallInfo(MachineOutlinerDefault, 1);
10654
10655 return std::make_unique<outliner::OutlinedFunction>(
10656 RepeatedSequenceLocs, SequenceSize, 1, MachineOutlinerDefault);
10657}
10658
10660 MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {
10661 const Function &F = MF.getFunction();
10662
10663 // Does the function use a red zone? If it does, then we can't risk messing
10664 // with the stack.
10665 if (Subtarget.getFrameLowering()->has128ByteRedZone(MF)) {
10666 // It could have a red zone. If it does, then we don't want to touch it.
10668 if (!X86FI || X86FI->getUsesRedZone())
10669 return false;
10670 }
10671
10672 // If we *don't* want to outline from things that could potentially be deduped
10673 // then return false.
10674 if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
10675 return false;
10676
10677 // This function is viable for outlining, so return true.
10678 return true;
10679}
10680
10684 unsigned Flags) const {
10685 MachineInstr &MI = *MIT;
10686
10687 // Is this a terminator for a basic block?
10688 if (MI.isTerminator())
10689 // TargetInstrInfo::getOutliningType has already filtered out anything
10690 // that would break this, so we can allow it here.
10692
10693 // Don't outline anything that modifies or reads from the stack pointer.
10694 //
10695 // FIXME: There are instructions which are being manually built without
10696 // explicit uses/defs so we also have to check the MCInstrDesc. We should be
10697 // able to remove the extra checks once those are fixed up. For example,
10698 // sometimes we might get something like %rax = POP64r 1. This won't be
10699 // caught by modifiesRegister or readsRegister even though the instruction
10700 // really ought to be formed so that modifiesRegister/readsRegister would
10701 // catch it.
10702 if (MI.modifiesRegister(X86::RSP, &RI) || MI.readsRegister(X86::RSP, &RI) ||
10703 MI.getDesc().hasImplicitUseOfPhysReg(X86::RSP) ||
10704 MI.getDesc().hasImplicitDefOfPhysReg(X86::RSP))
10706
10707 // Outlined calls change the instruction pointer, so don't read from it.
10708 if (MI.readsRegister(X86::RIP, &RI) ||
10709 MI.getDesc().hasImplicitUseOfPhysReg(X86::RIP) ||
10710 MI.getDesc().hasImplicitDefOfPhysReg(X86::RIP))
10712
10713 // Don't outline CFI instructions.
10714 if (MI.isCFIInstruction())
10716
10718}
10719
10722 const outliner::OutlinedFunction &OF) const {
10723 // If we're a tail call, we already have a return, so don't do anything.
10724 if (OF.FrameConstructionID == MachineOutlinerTailCall)
10725 return;
10726
10727 // We're a normal call, so our sequence doesn't have a return instruction.
10728 // Add it in.
10729 MachineInstr *retq = BuildMI(MF, DebugLoc(), get(X86::RET64));
10730 MBB.insert(MBB.end(), retq);
10731}
10732
10736 // Is it a tail call?
10737 if (C.CallConstructionID == MachineOutlinerTailCall) {
10738 // Yes, just insert a JMP.
10739 It = MBB.insert(It, BuildMI(MF, DebugLoc(), get(X86::TAILJMPd64))
10740 .addGlobalAddress(M.getNamedValue(MF.getName())));
10741 } else {
10742 // No, insert a call.
10743 It = MBB.insert(It, BuildMI(MF, DebugLoc(), get(X86::CALL64pcrel32))
10744 .addGlobalAddress(M.getNamedValue(MF.getName())));
10745 }
10746
10747 return It;
10748}
10749
10752 DebugLoc &DL,
10753 bool AllowSideEffects) const {
10754 const MachineFunction &MF = *MBB.getParent();
10755 const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
10757
10758 if (ST.hasMMX() && X86::VR64RegClass.contains(Reg))
10759 // FIXME: Should we ignore MMX registers?
10760 return;
10761
10762 if (TRI.isGeneralPurposeRegister(MF, Reg)) {
10763 // Convert register to the 32-bit version. Both 'movl' and 'xorl' clear the
10764 // upper bits of a 64-bit register automagically.
10765 Reg = getX86SubSuperRegister(Reg, 32);
10766
10767 if (!AllowSideEffects)
10768 // XOR affects flags, so use a MOV instead.
10769 BuildMI(MBB, Iter, DL, get(X86::MOV32ri), Reg).addImm(0);
10770 else
10771 BuildMI(MBB, Iter, DL, get(X86::XOR32rr), Reg)
10772 .addReg(Reg, RegState::Undef)
10773 .addReg(Reg, RegState::Undef);
10774 } else if (X86::VR128RegClass.contains(Reg)) {
10775 // XMM#
10776 if (!ST.hasSSE1())
10777 return;
10778
10779 // PXOR is safe to use because it doesn't affect flags.
10780 BuildMI(MBB, Iter, DL, get(X86::PXORrr), Reg)
10781 .addReg(Reg, RegState::Undef)
10782 .addReg(Reg, RegState::Undef);
10783 } else if (X86::VR256RegClass.contains(Reg)) {
10784 // YMM#
10785 if (!ST.hasAVX())
10786 return;
10787
10788 // VPXOR is safe to use because it doesn't affect flags.
10789 BuildMI(MBB, Iter, DL, get(X86::VPXORrr), Reg)
10790 .addReg(Reg, RegState::Undef)
10791 .addReg(Reg, RegState::Undef);
10792 } else if (X86::VR512RegClass.contains(Reg)) {
10793 // ZMM#
10794 if (!ST.hasAVX512())
10795 return;
10796
10797 // VPXORY is safe to use because it doesn't affect flags.
10798 BuildMI(MBB, Iter, DL, get(X86::VPXORYrr), Reg)
10799 .addReg(Reg, RegState::Undef)
10800 .addReg(Reg, RegState::Undef);
10801 } else if (X86::VK1RegClass.contains(Reg) || X86::VK2RegClass.contains(Reg) ||
10802 X86::VK4RegClass.contains(Reg) || X86::VK8RegClass.contains(Reg) ||
10803 X86::VK16RegClass.contains(Reg)) {
10804 if (!ST.hasVLX())
10805 return;
10806
10807 // KXOR is safe to use because it doesn't affect flags.
10808 unsigned Op = ST.hasBWI() ? X86::KXORQkk : X86::KXORWkk;
10809 BuildMI(MBB, Iter, DL, get(Op), Reg)
10810 .addReg(Reg, RegState::Undef)
10811 .addReg(Reg, RegState::Undef);
10812 }
10813}
10814
10816 MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns,
10817 bool DoRegPressureReduce) const {
10818 unsigned Opc = Root.getOpcode();
10819 switch (Opc) {
10820 case X86::VPDPWSSDrr:
10821 case X86::VPDPWSSDrm:
10822 case X86::VPDPWSSDYrr:
10823 case X86::VPDPWSSDYrm: {
10824 if (!Subtarget.hasFastDPWSSD()) {
10826 return true;
10827 }
10828 break;
10829 }
10830 case X86::VPDPWSSDZ128rr:
10831 case X86::VPDPWSSDZ128rm:
10832 case X86::VPDPWSSDZ256rr:
10833 case X86::VPDPWSSDZ256rm:
10834 case X86::VPDPWSSDZrr:
10835 case X86::VPDPWSSDZrm: {
10836 if (Subtarget.hasBWI() && !Subtarget.hasFastDPWSSD()) {
10838 return true;
10839 }
10840 break;
10841 }
10842 }
10844 Patterns, DoRegPressureReduce);
10845}
10846
10847static void
10851 DenseMap<Register, unsigned> &InstrIdxForVirtReg) {
10852 MachineFunction *MF = Root.getMF();
10854
10855 unsigned Opc = Root.getOpcode();
10856 unsigned AddOpc = 0;
10857 unsigned MaddOpc = 0;
10858 switch (Opc) {
10859 default:
10860 assert(false && "It should not reach here");
10861 break;
10862 // vpdpwssd xmm2,xmm3,xmm1
10863 // -->
10864 // vpmaddwd xmm3,xmm3,xmm1
10865 // vpaddd xmm2,xmm2,xmm3
10866 case X86::VPDPWSSDrr:
10867 MaddOpc = X86::VPMADDWDrr;
10868 AddOpc = X86::VPADDDrr;
10869 break;
10870 case X86::VPDPWSSDrm:
10871 MaddOpc = X86::VPMADDWDrm;
10872 AddOpc = X86::VPADDDrr;
10873 break;
10874 case X86::VPDPWSSDZ128rr:
10875 MaddOpc = X86::VPMADDWDZ128rr;
10876 AddOpc = X86::VPADDDZ128rr;
10877 break;
10878 case X86::VPDPWSSDZ128rm:
10879 MaddOpc = X86::VPMADDWDZ128rm;
10880 AddOpc = X86::VPADDDZ128rr;
10881 break;
10882 // vpdpwssd ymm2,ymm3,ymm1
10883 // -->
10884 // vpmaddwd ymm3,ymm3,ymm1
10885 // vpaddd ymm2,ymm2,ymm3
10886 case X86::VPDPWSSDYrr:
10887 MaddOpc = X86::VPMADDWDYrr;
10888 AddOpc = X86::VPADDDYrr;
10889 break;
10890 case X86::VPDPWSSDYrm:
10891 MaddOpc = X86::VPMADDWDYrm;
10892 AddOpc = X86::VPADDDYrr;
10893 break;
10894 case X86::VPDPWSSDZ256rr:
10895 MaddOpc = X86::VPMADDWDZ256rr;
10896 AddOpc = X86::VPADDDZ256rr;
10897 break;
10898 case X86::VPDPWSSDZ256rm:
10899 MaddOpc = X86::VPMADDWDZ256rm;
10900 AddOpc = X86::VPADDDZ256rr;
10901 break;
10902 // vpdpwssd zmm2,zmm3,zmm1
10903 // -->
10904 // vpmaddwd zmm3,zmm3,zmm1
10905 // vpaddd zmm2,zmm2,zmm3
10906 case X86::VPDPWSSDZrr:
10907 MaddOpc = X86::VPMADDWDZrr;
10908 AddOpc = X86::VPADDDZrr;
10909 break;
10910 case X86::VPDPWSSDZrm:
10911 MaddOpc = X86::VPMADDWDZrm;
10912 AddOpc = X86::VPADDDZrr;
10913 break;
10914 }
10915 // Create vpmaddwd.
10916 const TargetRegisterClass *RC =
10917 RegInfo.getRegClass(Root.getOperand(0).getReg());
10918 Register NewReg = RegInfo.createVirtualRegister(RC);
10919 MachineInstr *Madd = Root.getMF()->CloneMachineInstr(&Root);
10920 Madd->setDesc(TII.get(MaddOpc));
10921 Madd->untieRegOperand(1);
10922 Madd->removeOperand(1);
10923 Madd->getOperand(0).setReg(NewReg);
10924 InstrIdxForVirtReg.insert(std::make_pair(NewReg, 0));
10925 // Create vpaddd.
10926 Register DstReg = Root.getOperand(0).getReg();
10927 bool IsKill = Root.getOperand(1).isKill();
10928 MachineInstr *Add =
10929 BuildMI(*MF, MIMetadata(Root), TII.get(AddOpc), DstReg)
10930 .addReg(Root.getOperand(1).getReg(), getKillRegState(IsKill))
10931 .addReg(Madd->getOperand(0).getReg(), getKillRegState(true));
10932 InsInstrs.push_back(Madd);
10933 InsInstrs.push_back(Add);
10934 DelInstrs.push_back(&Root);
10935}
10936
10938 MachineInstr &Root, unsigned Pattern,
10941 DenseMap<Register, unsigned> &InstrIdxForVirtReg) const {
10942 switch (Pattern) {
10943 default:
10944 // Reassociate instructions.
10946 DelInstrs, InstrIdxForVirtReg);
10947 return;
10949 genAlternativeDpCodeSequence(Root, *this, InsInstrs, DelInstrs,
10950 InstrIdxForVirtReg);
10951 return;
10952 }
10953}
10954
10955// See also: X86DAGToDAGISel::SelectInlineAsmMemoryOperand().
10957 int FI) const {
10960 M.Base.FrameIndex = FI;
10961 M.getFullAddress(Ops);
10962}
10963
10964#define GET_INSTRINFO_HELPERS
10965#include "X86GenInstrInfo.inc"
unsigned SubReg
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
return SDValue()
MachineOutlinerClass
Constants defining how certain sequences should be outlined.
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
@ MachineOutlinerDefault
unsigned RegSize
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
DXIL Forward Handle Accesses
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
static bool lookup(const GsymReader &GR, DataExtractor &Data, uint64_t &Offset, uint64_t BaseAddr, uint64_t Addr, SourceLocations &SrcLocs, llvm::Error &Err)
A Lookup helper functions.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
static bool Expand2AddrUndef(MachineInstrBuilder &MIB, const MCInstrDesc &Desc)
Expand a single-def pseudo instruction to a two-addr instruction with two undef reads of the register...
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
bool IsDead
This file contains some templates that are useful if you are working with the STL at all.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:480
Provides some synthesis utilities to produce sequences of values.
static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC)
#define LLVM_DEBUG(...)
Definition Debug.h:114
#define FROM_TO(FROM, TO)
cl::opt< bool > X86EnableAPXForRelocation
static bool is64Bit(const char *name)
#define GET_EGPR_IF_ENABLED(OPC)
static bool isLEA(unsigned Opcode)
static void addOperands(MachineInstrBuilder &MIB, ArrayRef< MachineOperand > MOs, int PtrOffset=0)
static std::optional< ParamLoadedValue > describeMOVrrLoadedValue(const MachineInstr &MI, Register DescribedReg, const TargetRegisterInfo *TRI)
If DescribedReg overlaps with the MOVrr instruction's destination register then, if possible,...
static cl::opt< unsigned > PartialRegUpdateClearance("partial-reg-update-clearance", cl::desc("Clearance between two register writes " "for inserting XOR to avoid partial " "register update"), cl::init(64), cl::Hidden)
static bool shouldPreventUndefRegUpdateMemFold(MachineFunction &MF, MachineInstr &MI)
static unsigned CopyToFromAsymmetricReg(Register DestReg, Register SrcReg, const X86Subtarget &Subtarget)
static bool isConvertibleLEA(MachineInstr *MI)
static bool ExpandMOVImmSExti8(MachineInstrBuilder &MIB, const TargetInstrInfo &TII, const X86Subtarget &Subtarget)
static bool isAMXOpcode(unsigned Opc)
static int getJumpTableIndexFromReg(const MachineRegisterInfo &MRI, Register Reg)
static void updateOperandRegConstraints(MachineFunction &MF, MachineInstr &NewMI, const TargetInstrInfo &TII)
static int getJumpTableIndexFromAddr(const MachineInstr &MI)
static bool AdjustBlendMask(unsigned OldMask, unsigned OldWidth, unsigned NewWidth, unsigned *pNewMask=nullptr)
static bool expandMOV32r1(MachineInstrBuilder &MIB, const TargetInstrInfo &TII, bool MinusOne)
static unsigned getNewOpcFromTable(ArrayRef< X86TableEntry > Table, unsigned Opc)
static unsigned getStoreRegOpcode(Register SrcReg, const TargetRegisterClass *RC, bool IsStackAligned, const X86Subtarget &STI)
#define FOLD_BROADCAST(SIZE)
static cl::opt< unsigned > UndefRegClearance("undef-reg-clearance", cl::desc("How many idle instructions we would like before " "certain undef register reads"), cl::init(128), cl::Hidden)
#define CASE_BCAST_TYPE_OPC(TYPE, OP16, OP32, OP64)
static bool isTruncatedShiftCountForLEA(unsigned ShAmt)
Check whether the given shift count is appropriate can be represented by a LEA instruction.
static cl::opt< bool > ReMatPICStubLoad("remat-pic-stub-load", cl::desc("Re-materialize load from stub in PIC mode"), cl::init(false), cl::Hidden)
static SmallVector< MachineMemOperand *, 2 > extractLoadMMOs(ArrayRef< MachineMemOperand * > MMOs, MachineFunction &MF)
static MachineInstr * fuseTwoAddrInst(MachineFunction &MF, unsigned Opcode, ArrayRef< MachineOperand > MOs, MachineBasicBlock::iterator InsertPt, MachineInstr &MI, const TargetInstrInfo &TII)
static void printFailMsgforFold(const MachineInstr &MI, unsigned Idx)
static bool canConvert2Copy(unsigned Opc)
static cl::opt< bool > NoFusing("disable-spill-fusing", cl::desc("Disable fusing of spill code into instructions"), cl::Hidden)
static bool expandNOVLXStore(MachineInstrBuilder &MIB, const TargetRegisterInfo *TRI, const MCInstrDesc &StoreDesc, const MCInstrDesc &ExtractDesc, unsigned SubIdx)
static bool isX87Reg(Register Reg)
Return true if the Reg is X87 register.
static bool Expand2AddrKreg(MachineInstrBuilder &MIB, const MCInstrDesc &Desc, Register Reg)
Expand a single-def pseudo instruction to a two-addr instruction with two k0 reads.
static bool isFrameLoadOpcode(int Opcode, TypeSize &MemBytes)
#define VPERM_CASES_BROADCAST(Suffix)
static std::pair< X86::CondCode, unsigned > isUseDefConvertible(const MachineInstr &MI)
Check whether the use can be converted to remove a comparison against zero.
static bool findRedundantFlagInstr(MachineInstr &CmpInstr, MachineInstr &CmpValDefInstr, const MachineRegisterInfo *MRI, MachineInstr **AndInstr, const TargetRegisterInfo *TRI, const X86Subtarget &ST, bool &NoSignFlag, bool &ClearsOverflowFlag)
static bool expandSHXDROT(MachineInstrBuilder &MIB, const MCInstrDesc &Desc)
static unsigned getLoadRegOpcode(Register DestReg, const TargetRegisterClass *RC, bool IsStackAligned, const X86Subtarget &STI)
static void expandLoadStackGuard(MachineInstrBuilder &MIB, const TargetInstrInfo &TII)
static bool hasUndefRegUpdate(unsigned Opcode, unsigned OpNum, bool ForLoadFold=false)
static MachineInstr * makeM0Inst(const TargetInstrInfo &TII, unsigned Opcode, ArrayRef< MachineOperand > MOs, MachineBasicBlock::iterator InsertPt, MachineInstr &MI)
#define GET_ND_IF_ENABLED(OPC)
static bool expandMOVSHP(MachineInstrBuilder &MIB, MachineInstr &MI, const TargetInstrInfo &TII, bool HasAVX)
static bool hasPartialRegUpdate(unsigned Opcode, const X86Subtarget &Subtarget, bool ForLoadFold=false)
Return true for all instructions that only update the first 32 or 64-bits of the destination register...
#define CASE_NF(OP)
static const uint16_t * lookupAVX512(unsigned opcode, unsigned domain, ArrayRef< uint16_t[4]> Table)
static unsigned getLoadStoreRegOpcode(Register Reg, const TargetRegisterClass *RC, bool IsStackAligned, const X86Subtarget &STI, bool Load)
#define VPERM_CASES(Suffix)
#define FROM_TO_SIZE(A, B, S)
static void commuteVPTERNLOG(MachineInstr &MI, unsigned SrcOpIdx1, unsigned SrcOpIdx2)
static bool isDefConvertible(const MachineInstr &MI, bool &NoSignFlag, bool &ClearsOverflowFlag)
Check whether the definition can be converted to remove a comparison against zero.
static MachineInstr * fuseInst(MachineFunction &MF, unsigned Opcode, unsigned OpNo, ArrayRef< MachineOperand > MOs, MachineBasicBlock::iterator InsertPt, MachineInstr &MI, const TargetInstrInfo &TII, int PtrOffset=0)
static X86::CondCode getSwappedCondition(X86::CondCode CC)
Assuming the flags are set by MI(a,b), return the condition code if we modify the instructions such t...
static unsigned getCommutedVPERMV3Opcode(unsigned Opcode)
static bool expandXorFP(MachineInstrBuilder &MIB, const TargetInstrInfo &TII)
static MachineBasicBlock * getFallThroughMBB(MachineBasicBlock *MBB, MachineBasicBlock *TBB)
static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI, const MachineInstr &UserMI, const MachineFunction &MF)
Check if LoadMI is a partial register load that we can't fold into MI because the latter uses content...
static unsigned getLoadStoreOpcodeForFP16(bool Load, const X86Subtarget &STI)
static bool isHReg(Register Reg)
Test if the given register is a physical h register.
static cl::opt< bool > PrintFailedFusing("print-failed-fuse-candidates", cl::desc("Print instructions that the allocator wants to" " fuse, but the X86 backend currently can't"), cl::Hidden)
static bool expandNOVLXLoad(MachineInstrBuilder &MIB, const TargetRegisterInfo *TRI, const MCInstrDesc &LoadDesc, const MCInstrDesc &BroadcastDesc, unsigned SubIdx)
static void genAlternativeDpCodeSequence(MachineInstr &Root, const TargetInstrInfo &TII, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg)
#define CASE_ND(OP)
static unsigned getThreeSrcCommuteCase(uint64_t TSFlags, unsigned SrcOpIdx1, unsigned SrcOpIdx2)
This determines which of three possible cases of a three source commute the source indexes correspond...
static bool isFrameStoreOpcode(int Opcode, TypeSize &MemBytes)
static unsigned getTruncatedShiftCount(const MachineInstr &MI, unsigned ShiftAmtOperandIdx)
Check whether the shift count for a machine operand is non-zero.
static SmallVector< MachineMemOperand *, 2 > extractStoreMMOs(ArrayRef< MachineMemOperand * > MMOs, MachineFunction &MF)
static unsigned getBroadcastOpcode(const X86FoldTableEntry *I, const TargetRegisterClass *RC, const X86Subtarget &STI)
static unsigned convertALUrr2ALUri(unsigned Opc)
Convert an ALUrr opcode to corresponding ALUri opcode.
static bool regIsPICBase(Register BaseReg, const MachineRegisterInfo &MRI)
Return true if register is PIC base; i.e.g defined by X86::MOVPC32r.
static bool isCommutableVPERMV3Instruction(unsigned Opcode)
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
Definition APInt.h:206
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
Definition APInt.h:209
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Definition APInt.h:219
AnalysisUsage & addRequired()
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
iterator end() const
Definition ArrayRef.h:136
size_t size() const
size - Get the array size.
Definition ArrayRef.h:147
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:678
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:681
@ ICMP_SLT
signed less than
Definition InstrTypes.h:707
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:708
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:684
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition InstrTypes.h:693
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:682
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:683
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:702
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:701
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:705
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition InstrTypes.h:692
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:686
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:689
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:703
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition InstrTypes.h:690
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:685
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:687
@ ICMP_NE
not equal
Definition InstrTypes.h:700
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:706
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:694
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:704
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition InstrTypes.h:691
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:688
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
DWARF expression.
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
static LLVM_ABI DIExpression * appendExt(const DIExpression *Expr, unsigned FromSize, unsigned ToSize, bool Signed)
Append a zero- or sign-extension to Expr.
A debug info location.
Definition DebugLoc.h:124
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:214
DomTreeNodeBase< NodeT > * getRootNode()
getRootNode - This returns the entry node for the CFG of the function.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition Type.cpp:803
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition Function.h:706
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition Function.h:703
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
LiveInterval - This class represents the liveness of a register, or stack slot.
SlotIndex InsertMachineInstrInMaps(MachineInstr &MI)
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
A set of physical registers with utility functions to track liveness when walking backward/forward th...
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
LLVM_ABI VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
static LocationSize precise(uint64_t Value)
bool usesWindowsCFI() const
Definition MCAsmInfo.h:652
static MCCFIInstruction createAdjustCfaOffset(MCSymbol *L, int64_t Adjustment, SMLoc Loc={})
.cfi_adjust_cfa_offset Same as .cfi_def_cfa_offset, but Offset is a relative value that is added/subt...
Definition MCDwarf.h:608
Instances of this class represent a single low-level machine instruction.
Definition MCInst.h:188
void setOpcode(unsigned Op)
Definition MCInst.h:201
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition MCInstrDesc.h:87
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:33
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1561
Set of metadata that should be preserved when using BuildMI().
SimpleValueType SimpleTy
MachineInstrBundleIterator< const MachineInstr > const_iterator
void push_back(MachineInstr *MI)
MachineInstr * remove(MachineInstr *I)
Remove the unbundled instruction from the instruction list without deleting it.
LLVM_ABI DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
LLVM_ABI bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
LLVM_ABI void eraseFromParent()
This method unlinks 'this' from the containing function and deletes it.
LLVM_ABI instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator_range< succ_iterator > successors()
MachineInstrBundleIterator< MachineInstr > iterator
@ LQR_Dead
Register is known to be fully dead.
This class is a data container for one entry in a MachineConstantPool.
union llvm::MachineConstantPoolEntry::@004270020304201266316354007027341142157160323045 Val
The constant itself.
bool isMachineConstantPoolEntry() const
isMachineConstantPoolEntry - Return true if the MachineConstantPoolEntry is indeed a target specific ...
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
MCSymbol * getPICBaseSymbol() const
getPICBaseSymbol - Return a function-local symbol to represent the PIC base.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
bool needsFrameMoves() const
True if this function needs frame moves for debug or exceptions.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const MachineBasicBlock & front() const
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addDisp(const MachineOperand &Disp, int64_t off, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
mop_iterator operands_begin()
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isImplicitDef() const
const MachineBasicBlock * getParent() const
void dropDebugNumber()
Drop any variable location debugging information associated with this instruction.
LLVM_ABI void setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol)
Set a symbol that will be emitted just prior to the instruction itself.
LLVM_ABI void addImplicitDefUseOperands(MachineFunction &MF)
Add all implicit def and use operands to this instruction.
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
void untieRegOperand(unsigned OpIdx)
Break any tie involving OpIdx.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
LLVM_ABI unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
LLVM_ABI void eraseFromBundle()
Unlink 'this' from its basic block and delete it.
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
LLVM_ABI void substituteRegister(Register FromReg, Register ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
Replace all occurrences of FromReg with ToReg:SubIdx, properly composing subreg indices where necessa...
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
void setFlag(MIFlag Flag)
Set a MI flag.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
LLVM_ABI void dump() const
const MachineOperand & getOperand(unsigned i) const
unsigned getNumDefs() const
Returns the total number of definitions.
void setDebugLoc(DebugLoc DL)
Replace current source information with new such.
MachineOperand * findRegisterDefOperand(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
A description of a memory reference used in the backend.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
void setSubReg(unsigned subReg)
unsigned getSubReg() const
void setImplicit(bool Val=true)
void setImm(int64_t immVal)
int64_t getImm() const
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isCPI() const
isCPI - Tests if this is a MO_ConstantPoolIndex operand.
void setIsDead(bool Val=true)
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
bool isJTI() const
isJTI - Tests if this is a MO_JumpTableIndex operand.
LLVM_ABI void ChangeToRegister(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isDebug=false)
ChangeToRegister - Replace this operand with a new register operand of the specified value.
static MachineOperand CreateImm(int64_t Val)
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static MachineOperand CreateCPI(unsigned Idx, int Offset, unsigned TargetFlags=0)
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
int64_t getOffset() const
Return the offset from the symbol in this operand.
static MachineOperand CreateFI(int Idx)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Wrapper class representing virtual and physical registers.
Definition Register.h:19
constexpr bool isValid() const
Definition Register.h:107
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:74
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:78
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
MachineFunction & getMachineFunction() const
SlotIndex - An opaque wrapper around machine indexes.
Definition SlotIndexes.h:66
SlotIndex getBaseIndex() const
Returns the base index for associated with this index.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Information about stack frame layout on the target.
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum, const TargetRegisterInfo *TRI) const
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
bool isPositionIndependent() const
CodeModel::Model getCodeModel() const
Returns the code model.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Target - Wrapper for Target specific information.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
static constexpr TypeSize getZero()
Definition TypeSize.h:349
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:297
static LLVM_ABI Type * getFP128Ty(LLVMContext &C)
Definition Type.cpp:290
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
Definition Type.cpp:286
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
Definition Type.cpp:285
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
Definition Type.cpp:283
SlotIndex def
The index of the defining instruction.
LLVM Value Representation.
Definition Value.h:75
void BuildCFI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, const MCCFIInstruction &CFIInst, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
Wraps up getting a CFI index and building a MachineInstr for it.
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
void getFrameIndexOperands(SmallVectorImpl< MachineOperand > &Ops, int FI) const override
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override
Check if there exists an earlier instruction that operates on the same source operands and sets eflag...
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
Overrides the isSchedulingBoundary from Codegen/TargetInstrInfo.cpp to make it capable of identifying...
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
void replaceBranchWithTailCall(MachineBasicBlock &MBB, SmallVectorImpl< MachineOperand > &Cond, const MachineInstr &TailCall) const override
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
bool canInsertSelect(const MachineBasicBlock &, ArrayRef< MachineOperand > Cond, Register, Register, Register, int &, int &, int &) const override
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
unsigned getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore, unsigned *LoadRegIndex=nullptr) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
Returns true iff the routine could find two commutable operands in the given machine instruction.
bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, int64_t &Offset2) const override
X86InstrInfo(const X86Subtarget &STI)
static bool isDataInvariantLoad(MachineInstr &MI)
Returns true if the instruction has no behavior (specified or otherwise) that is based on the value l...
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned CommuteOpIdx1, unsigned CommuteOpIdx2) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
const X86RegisterInfo & getRegisterInfo() const
getRegisterInfo - TargetInstrInfo is a superset of MRegister info.
bool hasCommutePreference(MachineInstr &MI, bool &Commute) const override
Returns true if we have preference on the operands order in MI, the commute decision is returned in C...
bool hasLiveCondCodeDef(MachineInstr &MI) const
True if MI has a condition code def, e.g.
std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const override
bool canMakeTailCallConditional(SmallVectorImpl< MachineOperand > &Cond, const MachineInstr &TailCall) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &LdSt, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, Register Reg, bool UnfoldLoad, bool UnfoldStore, SmallVectorImpl< MachineInstr * > &NewMIs) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
convertToThreeAddress - This method must be implemented by targets that set the M_CONVERTIBLE_TO_3_AD...
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
bool expandPostRAPseudo(MachineInstr &MI) const override
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
MCInst getNop() const override
Return the noop instruction to use for a noop.
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const override
bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, int64_t Offset1, int64_t Offset2, unsigned NumLoads) const override
This is a used by the pre-regalloc scheduler to determine (in conjunction with areLoadsFromSameBasePt...
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
Fold a load or store of the specified stack slot into the specified machine instruction for the speci...
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &CmpMask, int64_t &CmpValue) const override
bool getConstValDefinedInReg(const MachineInstr &MI, const Register Reg, int64_t &ImmVal) const override
std::optional< ExtAddrMode > getAddrModeFromMemoryOp(const MachineInstr &MemI, const TargetRegisterInfo *TRI) const override
Register isStoreToStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
isStoreToStackSlotPostFE - Check for post-frame ptr elimination stack locations as well.
bool isUnconditionalTailCall(const MachineInstr &MI) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
bool classifyLEAReg(MachineInstr &MI, const MachineOperand &Src, unsigned LEAOpcode, bool AllowSP, Register &NewSrc, unsigned &NewSrcSubReg, bool &isKill, MachineOperand &ImplicitOp, LiveVariables *LV, LiveIntervals *LIS) const
Given an operand within a MachineInstr, insert preceding code to put it into the right format for a p...
Register isLoadFromStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
isLoadFromStackSlotPostFE - Check for post-frame ptr elimination stack locations as well.
void setExecutionDomain(MachineInstr &MI, unsigned Domain) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum, const TargetRegisterInfo *TRI) const override
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool setExecutionDomainCustom(MachineInstr &MI, unsigned Domain) const
int getSPAdjust(const MachineInstr &MI) const override
getSPAdjust - This returns the stack pointer adjustment made by this instruction.
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool isReMaterializableImpl(const MachineInstr &MI) const override
Register getGlobalBaseReg(MachineFunction *MF) const
getGlobalBaseReg - Return a virtual register initialized with the the global base register value.
int getJumpTableIndex(const MachineInstr &MI) const override
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const override
This is an architecture-specific helper function of reassociateOps.
std::pair< uint16_t, uint16_t > getExecutionDomain(const MachineInstr &MI) const override
bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, Register &DstReg, unsigned &SubIdx) const override
isCoalescableExtInstr - Return true if the instruction is a "coalescable" extension instruction.
void loadStoreTileReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned Opc, Register Reg, int FrameIdx, bool isKill=false) const
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const override
When getMachineCombinerPatterns() finds potential patterns, this function generates the instructions ...
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
bool analyzeBranchPredicate(MachineBasicBlock &MBB, TargetInstrInfo::MachineBranchPredicate &MBP, bool AllowModify=false) const override
static bool isDataInvariant(MachineInstr &MI)
Returns true if the instruction has no behavior (specified or otherwise) that is based on the value o...
unsigned getUndefRegClearance(const MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const override
Inform the BreakFalseDeps pass how many idle instructions we would like before certain undef register...
void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const override
void buildClearRegister(Register Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator Iter, DebugLoc &DL, bool AllowSideEffects=true) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
int64_t getFrameAdjustment(const MachineInstr &I) const
Returns the stack pointer adjustment that happens inside the frame setup..destroy sequence (e....
bool hasHighOperandLatency(const TargetSchedModel &SchedModel, const MachineRegisterInfo *MRI, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const override
bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const override
uint16_t getExecutionDomainCustom(const MachineInstr &MI) const
bool isHighLatencyDef(int opc) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
foldImmediate - 'Reg' is known to be defined by a move immediate instruction, try to fold the immedia...
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
unsigned getFMA3OpcodeToCommuteOperands(const MachineInstr &MI, unsigned SrcOpIdx1, unsigned SrcOpIdx2, const X86InstrFMA3Group &FMA3Group) const
Returns an adjusted FMA opcode that must be used in FMA instruction that performs the same computatio...
bool preservesZeroValueInReg(const MachineInstr *MI, const Register NullValueReg, const TargetRegisterInfo *TRI) const override
unsigned getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const override
Inform the BreakFalseDeps pass how many idle instructions we would like before a partial register upd...
X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...
const TargetRegisterClass * constrainRegClassToNonRex2(const TargetRegisterClass *RC) const
bool isPICStyleGOT() const
const X86InstrInfo * getInstrInfo() const override
bool hasAVX512() const
const X86RegisterInfo * getRegisterInfo() const override
bool hasAVX() const
const X86FrameLowering * getFrameLowering() const override
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
@ X86
Windows x64, Windows Itanium (IA-64)
Definition MCAsmInfo.h:50
X86II - This namespace holds all of the target specific flags that instruction info tracks.
bool isKMergeMasked(uint64_t TSFlags)
bool hasNewDataDest(uint64_t TSFlags)
@ MO_GOT_ABSOLUTE_ADDRESS
MO_GOT_ABSOLUTE_ADDRESS - On a symbol operand, this represents a relocation of: SYMBOL_LABEL + [.
@ MO_INDNTPOFF
MO_INDNTPOFF - On a symbol operand this indicates that the immediate is the absolute address of the G...
@ MO_GOTNTPOFF
MO_GOTNTPOFF - On a symbol operand this indicates that the immediate is the offset of the GOT entry w...
@ MO_GOTTPOFF
MO_GOTTPOFF - On a symbol operand this indicates that the immediate is the offset of the GOT entry wi...
@ MO_PIC_BASE_OFFSET
MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the immediate should get the value of th...
@ MO_GOTPCREL
MO_GOTPCREL - On a symbol operand this indicates that the immediate is offset to the GOT entry for th...
@ EVEX
EVEX - Specifies that this instruction use EVEX form which provides syntax support up to 32 512-bit r...
@ SSEDomainShift
Execution domain for SSE instructions.
bool canUseApxExtendedReg(const MCInstrDesc &Desc)
bool isPseudo(uint64_t TSFlags)
bool isKMasked(uint64_t TSFlags)
int getMemoryOperandNo(uint64_t TSFlags)
unsigned getOperandBias(const MCInstrDesc &Desc)
Compute whether all of the def operands are repeated in the uses and therefore should be skipped.
Define some predicates that are used for node matching.
CondCode getCondFromBranch(const MachineInstr &MI)
CondCode getCondFromCFCMov(const MachineInstr &MI)
@ LAST_VALID_COND
Definition X86BaseInfo.h:94
CondCode getCondFromMI(const MachineInstr &MI)
Return the condition code of the instruction.
int getFirstAddrOperandIdx(const MachineInstr &MI)
Return the index of the instruction's first address operand, if it has a memory reference,...
@ AddrNumOperands
Definition X86BaseInfo.h:36
unsigned getSwappedVCMPImm(unsigned Imm)
Get the VCMP immediate if the opcodes are swapped.
CondCode GetOppositeBranchCondition(CondCode CC)
GetOppositeBranchCondition - Return the inverse of the specified cond, e.g.
unsigned getSwappedVPCOMImm(unsigned Imm)
Get the VPCOM immediate if the opcodes are swapped.
bool isX87Instruction(MachineInstr &MI)
Check if the instruction is X87 instruction.
unsigned getNonNDVariant(unsigned Opc)
unsigned getVPCMPImmForCond(ISD::CondCode CC)
Get the VPCMP immediate for the given condition.
std::pair< CondCode, bool > getX86ConditionCode(CmpInst::Predicate Predicate)
Return a pair of condition code for the given predicate and whether the instruction operands should b...
CondCode getCondFromSETCC(const MachineInstr &MI)
unsigned getSwappedVPCMPImm(unsigned Imm)
Get the VPCMP immediate if the opcodes are swapped.
CondCode getCondFromCCMP(const MachineInstr &MI)
int getCCMPCondFlagsFromCondCode(CondCode CC)
int getCondSrcNoFromDesc(const MCInstrDesc &MCID)
Return the source operand # for condition code by MCID.
const Constant * getConstantFromPool(const MachineInstr &MI, unsigned OpNo)
Find any constant pool entry associated with a specific instruction operand.
unsigned getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand=false, bool HasNDD=false)
Return a cmov opcode for the given register size in bytes, and operand type.
unsigned getNFVariant(unsigned Opc)
unsigned getVectorRegisterWidth(const MCOperandInfo &Info)
Get the width of the vector register operand.
CondCode getCondFromCMov(const MachineInstr &MI)
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:318
@ Offset
Definition DWP.cpp:477
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1705
static bool isAddMemInstrWithRelocation(const MachineInstr &MI)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:174
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
static bool isMem(const MachineInstr &MI, unsigned Op)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
Definition Alignment.h:145
MCRegister getX86SubSuperRegister(MCRegister Reg, unsigned Size, bool High=false)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
FunctionPass * createX86GlobalBaseRegPass()
This pass initializes a global base register for PIC on x86-32.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2116
static const MachineInstrBuilder & addRegReg(const MachineInstrBuilder &MIB, Register Reg1, bool isKill1, unsigned SubReg1, Register Reg2, bool isKill2, unsigned SubReg2)
addRegReg - This function is used to add a memory reference of the form: [Reg + Reg].
unsigned getDeadRegState(bool B)
static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)
addFrameReference - This function is used to add a reference to the base of an abstract object on the...
FunctionPass * createCleanupLocalDynamicTLSPass()
This pass combines multiple accesses to local-dynamic TLS variables so that the TLS base address for ...
Op::Description Desc
const X86FoldTableEntry * lookupBroadcastFoldTable(unsigned RegOp, unsigned OpNum)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:186
const X86InstrFMA3Group * getFMA3Group(unsigned Opcode, uint64_t TSFlags)
Returns a reference to a group of FMA3 opcodes to where the given Opcode is included.
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1719
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
const X86FoldTableEntry * lookupTwoAddrFoldTable(unsigned RegOp)
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
bool is_sorted(R &&Range, Compare C)
Wrapper function around std::is_sorted to check if elements in a range R are sorted with respect to a...
Definition STLExtras.h:1900
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:198
DomTreeNodeBase< MachineBasicBlock > MachineDomTreeNode
static bool isMemInstrWithGOTPCREL(const MachineInstr &MI)
static const MachineInstrBuilder & addOffset(const MachineInstrBuilder &MIB, int Offset)
unsigned getUndefRegState(bool B)
unsigned getRegState(const MachineOperand &RegOp)
Get all register state flags from machine operand RegOp.
unsigned getDefRegState(bool B)
auto lower_bound(R &&Range, T &&Value)
Provide wrappers to std::lower_bound which take ranges instead of having to pass begin/end explicitly...
Definition STLExtras.h:1974
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
unsigned getKillRegState(bool B)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:155
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
const X86FoldTableEntry * lookupUnfoldTable(unsigned MemOp)
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
bool matchBroadcastSize(const X86FoldTableEntry &Entry, unsigned BroadcastBits)
std::pair< MachineOperand, DIExpression * > ParamLoadedValue
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition Sequence.h:305
const X86FoldTableEntry * lookupFoldTable(unsigned RegOp, unsigned OpNum)
static const MachineInstrBuilder & addRegOffset(const MachineInstrBuilder &MIB, Register Reg, bool isKill, int Offset)
addRegOffset - This function is used to add a memory reference of the form [Reg + Offset],...
int popcount(T Value) noexcept
Count the number of set bits in a value.
Definition bit.h:154
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:853
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
Extended Value Type.
Definition ValueTypes.h:35
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:316
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
std::vector< MachineInstr * > Kills
Kills - List of MachineInstruction's which are the last use of this virtual register (kill it) in the...
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
X86AddressMode - This struct holds a generalized full x86 address mode.
enum llvm::X86AddressMode::@202116273335065351270200035056227005202106004277 BaseType
This class is used to group {132, 213, 231} forms of FMA opcodes together.
unsigned get213Opcode() const
Returns the 213 form of FMA opcode.
unsigned get231Opcode() const
Returns the 231 form of FMA opcode.
bool isIntrinsic() const
Returns true iff the group of FMA opcodes holds intrinsic opcodes.
unsigned get132Opcode() const
Returns the 132 form of FMA opcode.
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.