50void AMDGPUInstPrinter::printU16ImmOperand(
const MCInst *
MI,
unsigned OpNo,
61 int64_t
Imm =
Op.getImm();
63 O << formatHex(static_cast<uint64_t>(
Imm & 0xffff));
65 printU32ImmOperand(
MI, OpNo, STI, O);
68void AMDGPUInstPrinter::printU16ImmDecOperand(
const MCInst *
MI,
unsigned OpNo,
73void AMDGPUInstPrinter::printU32ImmOperand(
const MCInst *
MI,
unsigned OpNo,
76 O <<
formatHex(
MI->getOperand(OpNo).getImm() & 0xffffffff);
79void AMDGPUInstPrinter::printFP64ImmOperand(
const MCInst *
MI,
unsigned OpNo,
83 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
84 uint64_t
Imm =
MI->getOperand(OpNo).getImm();
85 printLiteral64(
Desc,
Imm, STI, O,
true);
88void AMDGPUInstPrinter::printNamedBit(
const MCInst *
MI,
unsigned OpNo,
90 if (
MI->getOperand(OpNo).getImm()) {
95void AMDGPUInstPrinter::printOffset(
const MCInst *
MI,
unsigned OpNo,
98 uint32_t
Imm =
MI->getOperand(OpNo).getImm();
103 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
106 O << formatDec(SignExtend32<24>(
Imm));
108 printU16ImmDecOperand(
MI, OpNo, O);
112void AMDGPUInstPrinter::printFlatOffset(
const MCInst *
MI,
unsigned OpNo,
115 uint32_t
Imm =
MI->getOperand(OpNo).getImm();
119 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
127 printU16ImmDecOperand(
MI, OpNo, O);
131void AMDGPUInstPrinter::printSMRDOffset8(
const MCInst *
MI,
unsigned OpNo,
134 printU32ImmOperand(
MI, OpNo, STI, O);
137void AMDGPUInstPrinter::printSMEMOffset(
const MCInst *
MI,
unsigned OpNo,
143void AMDGPUInstPrinter::printSMRDLiteralOffset(
const MCInst *
MI,
unsigned OpNo,
146 printU32ImmOperand(
MI, OpNo, STI, O);
149void AMDGPUInstPrinter::printCPol(
const MCInst *
MI,
unsigned OpNo,
151 auto Imm =
MI->getOperand(OpNo).getImm();
158 O <<
" scale_offset";
160 printTH(
MI, TH, Scope, O);
161 printScope(Scope, O);
180 O <<
" /* unexpected cache policy bit */";
183void AMDGPUInstPrinter::printTH(
const MCInst *
MI, int64_t TH, int64_t Scope,
189 const unsigned Opcode =
MI->getOpcode();
190 const MCInstrDesc &TID =
MII.get(Opcode);
213 O << (IsStore ?
"TH_STORE_" :
"TH_LOAD_");
223 : (IsStore ?
"WB" :
"LU"));
244void AMDGPUInstPrinter::printScope(int64_t Scope,
raw_ostream &O) {
260void AMDGPUInstPrinter::printDim(
const MCInst *
MI,
unsigned OpNo,
262 unsigned Dim =
MI->getOperand(OpNo).getImm();
263 O <<
" dim:SQ_RSRC_IMG_";
272void AMDGPUInstPrinter::printR128A16(
const MCInst *
MI,
unsigned OpNo,
275 printNamedBit(
MI, OpNo, O,
"a16");
277 printNamedBit(
MI, OpNo, O,
"r128");
280void AMDGPUInstPrinter::printFORMAT(
const MCInst *
MI,
unsigned OpNo,
285void AMDGPUInstPrinter::printSymbolicFormat(
const MCInst *
MI,
288 using namespace llvm::AMDGPU::MTBUFFormat;
291 AMDGPU::getNamedOperandIdx(
MI->getOpcode(), AMDGPU::OpName::format);
294 unsigned Val =
MI->getOperand(OpNo).getImm();
296 if (Val == UFMT_DEFAULT)
301 O <<
" format:" << Val;
304 if (Val == DFMT_NFMT_DEFAULT)
311 if (Dfmt != DFMT_DEFAULT) {
313 if (Nfmt != NFMT_DEFAULT) {
317 if (Nfmt != NFMT_DEFAULT) {
322 O <<
" format:" << Val;
330 unsigned Enc =
MRI.getEncodingValue(
Reg);
335 unsigned RegNo = Idx % 0x100;
337 if (RC->
getID() == AMDGPU::VGPR_16RegClassID) {
356 unsigned Enc =
MRI.getEncodingValue(
Reg);
363 unsigned Opc =
Desc.getOpcode();
365 for (
I = 0;
I < 4; ++
I) {
366 if (
Ops.first[
I] != AMDGPU::OpName::NUM_OPERAND_NAMES &&
367 (
unsigned)AMDGPU::getNamedOperandIdx(
Opc,
Ops.first[
I]) == OpNo)
369 if (
Ops.second &&
Ops.second[
I] != AMDGPU::OpName::NUM_OPERAND_NAMES &&
370 (
unsigned)AMDGPU::getNamedOperandIdx(
Opc,
Ops.second[
I]) == OpNo)
375 unsigned OpMSBs = (VgprMSBs >> (
I * 2)) & 3;
389 case AMDGPU::PRIVATE_RSRC_REG:
399 if (PrintReg != Reg.id())
412void AMDGPUInstPrinter::printVOPDst(
const MCInst *
MI,
unsigned OpNo,
414 auto Opcode =
MI->getOpcode();
432 printRegularOperand(
MI, OpNo, STI, O);
438 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx10:
439 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx10:
440 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx10:
441 case AMDGPU::V_ADD_CO_CI_U32_sdwa_gfx10:
442 case AMDGPU::V_SUB_CO_CI_U32_sdwa_gfx10:
443 case AMDGPU::V_SUBREV_CO_CI_U32_sdwa_gfx10:
444 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx10:
445 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx10:
446 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx10:
447 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx10:
448 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx10:
449 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx10:
450 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx11:
451 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx11:
452 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx11:
453 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx11:
454 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx11:
455 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx11:
456 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx11:
457 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx11:
458 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx11:
459 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx12:
460 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx12:
461 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx12:
462 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx12:
463 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx12:
464 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx12:
465 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx12:
466 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx12:
467 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx12:
468 printDefaultVccOperand(
false, STI, O);
473void AMDGPUInstPrinter::printVINTRPDst(
const MCInst *
MI,
unsigned OpNo,
480 printRegularOperand(
MI, OpNo, STI, O);
483void AMDGPUInstPrinter::printImmediateInt16(uint32_t
Imm,
486 int32_t SImm =
static_cast<int32_t
>(
Imm);
492 if (printImmediateFloat32(
Imm, STI, O))
495 O << formatHex(static_cast<uint64_t>(
Imm & 0xffff));
502 else if (
Imm == 0xBC00)
504 else if (
Imm == 0x3800)
506 else if (
Imm == 0xB800)
508 else if (
Imm == 0x4000)
510 else if (
Imm == 0xC000)
512 else if (
Imm == 0x4400)
514 else if (
Imm == 0xC400)
516 else if (
Imm == 0x3118 && STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
528 else if (
Imm == 0xBF80)
530 else if (
Imm == 0x3F00)
532 else if (
Imm == 0xBF00)
534 else if (
Imm == 0x4000)
536 else if (
Imm == 0xC000)
538 else if (
Imm == 0x4080)
540 else if (
Imm == 0xC080)
542 else if (
Imm == 0x3E22 && STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
550void AMDGPUInstPrinter::printImmediateBF16(uint32_t
Imm,
553 int16_t SImm =
static_cast<int16_t
>(
Imm);
562 O << formatHex(static_cast<uint64_t>(
Imm));
565void AMDGPUInstPrinter::printImmediateF16(uint32_t
Imm,
568 int16_t SImm =
static_cast<int16_t
>(
Imm);
574 uint16_t HImm =
static_cast<uint16_t
>(
Imm);
578 uint64_t
Imm16 =
static_cast<uint16_t
>(
Imm);
582void AMDGPUInstPrinter::printImmediateV216(uint32_t
Imm, uint8_t OpType,
585 int32_t SImm =
static_cast<int32_t
>(
Imm);
594 if (printImmediateFloat32(
Imm, STI, O))
615 O << formatHex(static_cast<uint64_t>(
Imm));
618bool AMDGPUInstPrinter::printImmediateFloat32(uint32_t
Imm,
639 else if (
Imm == 0x3e22f983 &&
640 STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
648void AMDGPUInstPrinter::printImmediate32(uint32_t
Imm,
651 int32_t SImm =
static_cast<int32_t
>(
Imm);
657 if (printImmediateFloat32(
Imm, STI, O))
660 O << formatHex(static_cast<uint64_t>(
Imm));
666 int64_t SImm =
static_cast<int64_t
>(
Imm);
667 if (SImm >= -16 && SImm <= 64) {
690 else if (
Imm == 0x3fc45f306dc9c882 &&
691 STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
692 O <<
"0.15915494309189532";
694 printLiteral64(
Desc,
Imm, STI, O, IsFP);
701 bool CanUse64BitLiterals =
702 STI.
hasFeature(AMDGPU::Feature64BitLiterals) &&
705 if (CanUse64BitLiterals &&
Lo_32(
Imm))
706 O <<
"lit64(" <<
formatHex(
static_cast<uint64_t
>(
Imm)) <<
')';
708 O << formatHex(static_cast<uint64_t>(
Hi_32(
Imm)));
711 O <<
"lit64(" <<
formatHex(
static_cast<uint64_t
>(
Imm)) <<
')';
713 O << formatHex(static_cast<uint64_t>(
Imm));
717void AMDGPUInstPrinter::printBLGP(
const MCInst *
MI,
unsigned OpNo,
720 unsigned Imm =
MI->getOperand(OpNo).getImm();
725 switch (
MI->getOpcode()) {
726 case AMDGPU::V_MFMA_F64_16X16X4F64_gfx940_acd:
727 case AMDGPU::V_MFMA_F64_16X16X4F64_gfx940_vcd:
728 case AMDGPU::V_MFMA_F64_4X4X4F64_gfx940_acd:
729 case AMDGPU::V_MFMA_F64_4X4X4F64_gfx940_vcd:
730 O <<
" neg:[" << (
Imm & 1) <<
',' << ((
Imm >> 1) & 1) <<
','
731 << ((
Imm >> 2) & 1) <<
']';
736 O <<
" blgp:" <<
Imm;
739void AMDGPUInstPrinter::printDefaultVccOperand(
bool FirstOperand,
753 unsigned OpNo)
const {
757 (
Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC) ||
758 Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC_LO));
762void AMDGPUInstPrinter::printOperand(
const MCInst *
MI,
unsigned OpNo,
765 unsigned Opc =
MI->getOpcode();
767 int ModIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0_modifiers);
774 (
Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC) ||
775 Desc.hasImplicitDefOfPhysReg(AMDGPU::VCC_LO)))
776 printDefaultVccOperand(
true, STI, O);
778 printRegularOperand(
MI, OpNo, STI, O);
782void AMDGPUInstPrinter::printRegularOperand(
const MCInst *
MI,
unsigned OpNo,
785 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
787 if (OpNo >=
MI->getNumOperands()) {
788 O <<
"/*Missing OP" << OpNo <<
"*/";
792 const MCOperand &
Op =
MI->getOperand(OpNo);
799 int RCID =
Desc.operands()[OpNo].RegClass;
801 const MCRegisterClass RC =
MRI.getRegClass(RCID);
804 O <<
"/*Invalid register, operand has \'" <<
MRI.getRegClassName(&RC)
805 <<
"\' register class*/";
808 }
else if (
Op.isImm()) {
809 const uint8_t OpTy =
Desc.operands()[OpNo].OperandType;
821 printImmediate32(
Op.getImm(), STI, O);
825 printImmediate64(
Desc,
Op.getImm(), STI, O,
false);
830 printImmediate64(
Desc,
Op.getImm(), STI, O,
true);
834 printImmediateInt16(
Op.getImm(), STI, O);
838 printImmediateF16(
Op.getImm(), STI, O);
842 printImmediateBF16(
Op.getImm(), STI, O);
851 printImmediateV216(
Op.getImm(), OpTy, STI, O);
860 printImmediate32(
Op.getImm(), STI, O);
861 O <<
"/*Invalid immediate*/";
868 }
else if (
Op.isExpr()) {
869 const MCExpr *
Exp =
Op.getExpr();
870 MAI.printExpr(O, *Exp);
876 switch (
MI->getOpcode()) {
879 case AMDGPU::V_CNDMASK_B32_e32_gfx10:
880 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx10:
881 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx10:
882 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx10:
883 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx10:
884 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx10:
885 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx10:
886 case AMDGPU::V_CNDMASK_B32_dpp8_gfx10:
887 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx10:
888 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx10:
889 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx10:
890 case AMDGPU::V_CNDMASK_B32_e32_gfx11:
891 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx11:
892 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx11:
893 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx11:
894 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx11:
895 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx11:
896 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx11:
897 case AMDGPU::V_CNDMASK_B32_dpp8_gfx11:
898 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx11:
899 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx11:
900 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx11:
901 case AMDGPU::V_CNDMASK_B32_e32_gfx12:
902 case AMDGPU::V_ADD_CO_CI_U32_e32_gfx12:
903 case AMDGPU::V_SUB_CO_CI_U32_e32_gfx12:
904 case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx12:
905 case AMDGPU::V_CNDMASK_B32_dpp_gfx12:
906 case AMDGPU::V_ADD_CO_CI_U32_dpp_gfx12:
907 case AMDGPU::V_SUB_CO_CI_U32_dpp_gfx12:
908 case AMDGPU::V_SUBREV_CO_CI_U32_dpp_gfx12:
909 case AMDGPU::V_CNDMASK_B32_dpp8_gfx12:
910 case AMDGPU::V_ADD_CO_CI_U32_dpp8_gfx12:
911 case AMDGPU::V_SUB_CO_CI_U32_dpp8_gfx12:
912 case AMDGPU::V_SUBREV_CO_CI_U32_dpp8_gfx12:
914 case AMDGPU::V_CNDMASK_B32_e32_gfx6_gfx7:
915 case AMDGPU::V_CNDMASK_B32_e32_vi:
916 if ((
int)OpNo == AMDGPU::getNamedOperandIdx(
MI->getOpcode(),
917 AMDGPU::OpName::src1))
918 printDefaultVccOperand(OpNo == 0, STI, O);
924 AMDGPU::getNamedOperandIdx(
MI->getOpcode(), AMDGPU::OpName::soffset);
926 if ((
int)OpNo == SOffsetIdx)
927 printSymbolicFormat(
MI, STI, O);
931void AMDGPUInstPrinter::printOperandAndFPInputMods(
const MCInst *
MI,
935 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
936 if (needsImpliedVcc(
Desc, OpNo))
937 printDefaultVccOperand(
true, STI, O);
939 unsigned InputModifiers =
MI->getOperand(OpNo).getImm();
944 bool NegMnemo =
false;
947 if (OpNo + 1 <
MI->getNumOperands() &&
949 const MCOperand &
Op =
MI->getOperand(OpNo + 1);
950 NegMnemo =
Op.isImm();
961 printRegularOperand(
MI, OpNo + 1, STI, O);
970 switch (
MI->getOpcode()) {
974 case AMDGPU::V_CNDMASK_B32_sdwa_gfx10:
975 case AMDGPU::V_CNDMASK_B32_dpp_gfx10:
976 case AMDGPU::V_CNDMASK_B32_dpp_gfx11:
978 AMDGPU::getNamedOperandIdx(
MI->getOpcode(), AMDGPU::OpName::src1))
979 printDefaultVccOperand(OpNo == 0, STI, O);
984void AMDGPUInstPrinter::printOperandAndIntInputMods(
const MCInst *
MI,
988 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
989 if (needsImpliedVcc(
Desc, OpNo))
990 printDefaultVccOperand(
true, STI, O);
992 unsigned InputModifiers =
MI->getOperand(OpNo).getImm();
995 printRegularOperand(
MI, OpNo + 1, STI, O);
1000 switch (
MI->getOpcode()) {
1003 case AMDGPU::V_ADD_CO_CI_U32_sdwa_gfx10:
1004 case AMDGPU::V_SUB_CO_CI_U32_sdwa_gfx10:
1005 case AMDGPU::V_SUBREV_CO_CI_U32_sdwa_gfx10:
1006 if ((
int)OpNo + 1 == AMDGPU::getNamedOperandIdx(
MI->getOpcode(),
1007 AMDGPU::OpName::src1))
1008 printDefaultVccOperand(OpNo == 0, STI, O);
1013void AMDGPUInstPrinter::printDPP8(
const MCInst *
MI,
unsigned OpNo,
1019 unsigned Imm =
MI->getOperand(OpNo).getImm();
1021 for (
size_t i = 1; i < 8; ++i) {
1027void AMDGPUInstPrinter::printDPPCtrl(
const MCInst *
MI,
unsigned OpNo,
1030 using namespace AMDGPU::DPP;
1032 unsigned Imm =
MI->getOperand(OpNo).getImm();
1033 const MCInstrDesc &
Desc =
MII.get(
MI->getOpcode());
1037 O <<
" /* DP ALU dpp only supports "
1038 << (
isGFX12(STI) ?
"row_share" :
"row_newbcast") <<
" */";
1041 if (
Imm <= DppCtrl::QUAD_PERM_LAST) {
1047 }
else if ((
Imm >= DppCtrl::ROW_SHL_FIRST) &&
1048 (
Imm <= DppCtrl::ROW_SHL_LAST)) {
1050 }
else if ((
Imm >= DppCtrl::ROW_SHR_FIRST) &&
1051 (
Imm <= DppCtrl::ROW_SHR_LAST)) {
1053 }
else if ((
Imm >= DppCtrl::ROW_ROR_FIRST) &&
1054 (
Imm <= DppCtrl::ROW_ROR_LAST)) {
1056 }
else if (
Imm == DppCtrl::WAVE_SHL1) {
1058 O <<
"/* wave_shl is not supported starting from GFX10 */";
1062 }
else if (
Imm == DppCtrl::WAVE_ROL1) {
1064 O <<
"/* wave_rol is not supported starting from GFX10 */";
1068 }
else if (
Imm == DppCtrl::WAVE_SHR1) {
1070 O <<
"/* wave_shr is not supported starting from GFX10 */";
1074 }
else if (
Imm == DppCtrl::WAVE_ROR1) {
1076 O <<
"/* wave_ror is not supported starting from GFX10 */";
1080 }
else if (
Imm == DppCtrl::ROW_MIRROR) {
1082 }
else if (
Imm == DppCtrl::ROW_HALF_MIRROR) {
1083 O <<
"row_half_mirror";
1084 }
else if (
Imm == DppCtrl::BCAST15) {
1086 O <<
"/* row_bcast is not supported starting from GFX10 */";
1089 O <<
"row_bcast:15";
1090 }
else if (
Imm == DppCtrl::BCAST31) {
1092 O <<
"/* row_bcast is not supported starting from GFX10 */";
1095 O <<
"row_bcast:31";
1096 }
else if ((
Imm >= DppCtrl::ROW_SHARE_FIRST) &&
1097 (
Imm <= DppCtrl::ROW_SHARE_LAST)) {
1099 O <<
"row_newbcast:";
1103 O <<
" /* row_newbcast/row_share is not supported on ASICs earlier "
1104 "than GFX90A/GFX10 */";
1108 }
else if ((
Imm >= DppCtrl::ROW_XMASK_FIRST) &&
1109 (
Imm <= DppCtrl::ROW_XMASK_LAST)) {
1111 O <<
"/* row_xmask is not supported on ASICs earlier than GFX10 */";
1114 O <<
"row_xmask:" <<
formatDec(
Imm - DppCtrl::ROW_XMASK_FIRST);
1116 O <<
"/* Invalid dpp_ctrl value */";
1120void AMDGPUInstPrinter::printDppBoundCtrl(
const MCInst *
MI,
unsigned OpNo,
1123 unsigned Imm =
MI->getOperand(OpNo).getImm();
1125 O <<
" bound_ctrl:1";
1129void AMDGPUInstPrinter::printDppFI(
const MCInst *
MI,
unsigned OpNo,
1131 using namespace llvm::AMDGPU::DPP;
1132 unsigned Imm =
MI->getOperand(OpNo).getImm();
1133 if (
Imm == DPP_FI_1 ||
Imm == DPP8_FI_1) {
1138void AMDGPUInstPrinter::printSDWASel(
const MCInst *
MI,
unsigned OpNo,
1140 using namespace llvm::AMDGPU::SDWA;
1142 unsigned Imm =
MI->getOperand(OpNo).getImm();
1144 case SdwaSel::BYTE_0:
O <<
"BYTE_0";
break;
1145 case SdwaSel::BYTE_1:
O <<
"BYTE_1";
break;
1146 case SdwaSel::BYTE_2:
O <<
"BYTE_2";
break;
1147 case SdwaSel::BYTE_3:
O <<
"BYTE_3";
break;
1148 case SdwaSel::WORD_0:
O <<
"WORD_0";
break;
1149 case SdwaSel::WORD_1:
O <<
"WORD_1";
break;
1150 case SdwaSel::DWORD:
O <<
"DWORD";
break;
1155void AMDGPUInstPrinter::printSDWADstSel(
const MCInst *
MI,
unsigned OpNo,
1159 printSDWASel(
MI, OpNo, O);
1162void AMDGPUInstPrinter::printSDWASrc0Sel(
const MCInst *
MI,
unsigned OpNo,
1166 printSDWASel(
MI, OpNo, O);
1169void AMDGPUInstPrinter::printSDWASrc1Sel(
const MCInst *
MI,
unsigned OpNo,
1173 printSDWASel(
MI, OpNo, O);
1176void AMDGPUInstPrinter::printSDWADstUnused(
const MCInst *
MI,
unsigned OpNo,
1179 using namespace llvm::AMDGPU::SDWA;
1182 unsigned Imm =
MI->getOperand(OpNo).getImm();
1184 case DstUnused::UNUSED_PAD:
O <<
"UNUSED_PAD";
break;
1185 case DstUnused::UNUSED_SEXT:
O <<
"UNUSED_SEXT";
break;
1186 case DstUnused::UNUSED_PRESERVE:
O <<
"UNUSED_PRESERVE";
break;
1191void AMDGPUInstPrinter::printExpSrcN(
const MCInst *
MI,
unsigned OpNo,
1194 unsigned Opc =
MI->getOpcode();
1195 int EnIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::en);
1196 unsigned En =
MI->getOperand(EnIdx).getImm();
1198 int ComprIdx = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::compr);
1201 if (
MI->getOperand(ComprIdx).getImm())
1202 OpNo = OpNo -
N +
N / 2;
1210void AMDGPUInstPrinter::printExpSrc0(
const MCInst *
MI,
unsigned OpNo,
1213 printExpSrcN(
MI, OpNo, STI, O, 0);
1216void AMDGPUInstPrinter::printExpSrc1(
const MCInst *
MI,
unsigned OpNo,
1219 printExpSrcN(
MI, OpNo, STI, O, 1);
1222void AMDGPUInstPrinter::printExpSrc2(
const MCInst *
MI,
unsigned OpNo,
1225 printExpSrcN(
MI, OpNo, STI, O, 2);
1228void AMDGPUInstPrinter::printExpSrc3(
const MCInst *
MI,
unsigned OpNo,
1231 printExpSrcN(
MI, OpNo, STI, O, 3);
1234void AMDGPUInstPrinter::printExpTgt(
const MCInst *
MI,
unsigned OpNo,
1237 using namespace llvm::AMDGPU::Exp;
1240 unsigned Id =
MI->getOperand(OpNo).getImm() & ((1 << 6) - 1);
1245 O <<
' ' << TgtName;
1249 O <<
" invalid_target_" <<
Id;
1254 bool IsPacked,
bool HasDstSel) {
1258 if (!!(
Ops[
I] &
Mod) != DefaultValue)
1268void AMDGPUInstPrinter::printPackedModifier(
const MCInst *
MI,
1272 unsigned Opc =
MI->getOpcode();
1276 std::pair<AMDGPU::OpName, AMDGPU::OpName> MOps[] = {
1277 {AMDGPU::OpName::src0_modifiers, AMDGPU::OpName::src0},
1278 {AMDGPU::OpName::src1_modifiers, AMDGPU::OpName::src1},
1279 {AMDGPU::OpName::src2_modifiers, AMDGPU::OpName::src2}};
1282 for (
auto [SrcMod, Src] : MOps) {
1286 int ModIdx = AMDGPU::getNamedOperandIdx(
Opc, SrcMod);
1288 (ModIdx != -1) ?
MI->getOperand(ModIdx).getImm() : DefaultValue;
1292 (AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::vdst) != -1) ||
1293 (AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::sdst) != -1);
1301 for (AMDGPU::OpName OpName :
1302 {AMDGPU::OpName::src0_modifiers, AMDGPU::OpName::src1_modifiers,
1303 AMDGPU::OpName::src2_modifiers}) {
1304 int Idx = AMDGPU::getNamedOperandIdx(
Opc, OpName);
1312 const bool HasDstSel =
1337void AMDGPUInstPrinter::printOpSel(
const MCInst *
MI,
unsigned,
1340 unsigned Opc =
MI->getOpcode();
1343 AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0_modifiers);
1344 unsigned Mod =
MI->getOperand(SrcMod).getImm();
1347 if (Index0 || Index1)
1348 O <<
" op_sel:[" << Index0 <<
',' << Index1 <<
']';
1352 auto FIN = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src0_modifiers);
1353 auto BCN = AMDGPU::getNamedOperandIdx(
Opc, AMDGPU::OpName::src1_modifiers);
1357 O <<
" op_sel:[" << FI <<
',' << BC <<
']';
1364void AMDGPUInstPrinter::printOpSelHi(
const MCInst *
MI,
unsigned OpNo,
1370void AMDGPUInstPrinter::printNegLo(
const MCInst *
MI,
unsigned OpNo,
1376void AMDGPUInstPrinter::printNegHi(
const MCInst *
MI,
unsigned OpNo,
1382void AMDGPUInstPrinter::printIndexKey8bit(
const MCInst *
MI,
unsigned OpNo,
1385 auto Imm =
MI->getOperand(OpNo).getImm() & 0x7;
1389 O <<
" index_key:" <<
Imm;
1392void AMDGPUInstPrinter::printIndexKey16bit(
const MCInst *
MI,
unsigned OpNo,
1395 auto Imm =
MI->getOperand(OpNo).getImm() & 0x7;
1399 O <<
" index_key:" <<
Imm;
1402void AMDGPUInstPrinter::printIndexKey32bit(
const MCInst *
MI,
unsigned OpNo,
1405 auto Imm =
MI->getOperand(OpNo).getImm() & 0x7;
1409 O <<
" index_key:" <<
Imm;
1412void AMDGPUInstPrinter::printMatrixFMT(
const MCInst *
MI,
unsigned OpNo,
1415 auto Imm =
MI->getOperand(OpNo).getImm() & 0x7;
1419 O <<
" matrix_" << AorB <<
"_fmt:";
1424 case WMMA::MatrixFMT::MATRIX_FMT_FP8:
1425 O <<
"MATRIX_FMT_FP8";
1427 case WMMA::MatrixFMT::MATRIX_FMT_BF8:
1428 O <<
"MATRIX_FMT_BF8";
1430 case WMMA::MatrixFMT::MATRIX_FMT_FP6:
1431 O <<
"MATRIX_FMT_FP6";
1433 case WMMA::MatrixFMT::MATRIX_FMT_BF6:
1434 O <<
"MATRIX_FMT_BF6";
1436 case WMMA::MatrixFMT::MATRIX_FMT_FP4:
1437 O <<
"MATRIX_FMT_FP4";
1442void AMDGPUInstPrinter::printMatrixAFMT(
const MCInst *
MI,
unsigned OpNo,
1445 printMatrixFMT(
MI, OpNo, STI, O,
'a');
1448void AMDGPUInstPrinter::printMatrixBFMT(
const MCInst *
MI,
unsigned OpNo,
1451 printMatrixFMT(
MI, OpNo, STI, O,
'b');
1454void AMDGPUInstPrinter::printMatrixScale(
const MCInst *
MI,
unsigned OpNo,
1457 auto Imm =
MI->getOperand(OpNo).getImm() & 1;
1461 O <<
" matrix_" << AorB <<
"_scale:";
1466 case WMMA::MatrixScale::MATRIX_SCALE_ROW0:
1467 O <<
"MATRIX_SCALE_ROW0";
1469 case WMMA::MatrixScale::MATRIX_SCALE_ROW1:
1470 O <<
"MATRIX_SCALE_ROW1";
1475void AMDGPUInstPrinter::printMatrixAScale(
const MCInst *
MI,
unsigned OpNo,
1478 printMatrixScale(
MI, OpNo, STI, O,
'a');
1481void AMDGPUInstPrinter::printMatrixBScale(
const MCInst *
MI,
unsigned OpNo,
1484 printMatrixScale(
MI, OpNo, STI, O,
'b');
1487void AMDGPUInstPrinter::printMatrixScaleFmt(
const MCInst *
MI,
unsigned OpNo,
1490 auto Imm =
MI->getOperand(OpNo).getImm() & 3;
1494 O <<
" matrix_" << AorB <<
"_scale_fmt:";
1499 case WMMA::MatrixScaleFmt::MATRIX_SCALE_FMT_E8:
1500 O <<
"MATRIX_SCALE_FMT_E8";
1502 case WMMA::MatrixScaleFmt::MATRIX_SCALE_FMT_E5M3:
1503 O <<
"MATRIX_SCALE_FMT_E5M3";
1505 case WMMA::MatrixScaleFmt::MATRIX_SCALE_FMT_E4M3:
1506 O <<
"MATRIX_SCALE_FMT_E4M3";
1511void AMDGPUInstPrinter::printMatrixAScaleFmt(
const MCInst *
MI,
unsigned OpNo,
1514 printMatrixScaleFmt(
MI, OpNo, STI, O,
'a');
1517void AMDGPUInstPrinter::printMatrixBScaleFmt(
const MCInst *
MI,
unsigned OpNo,
1520 printMatrixScaleFmt(
MI, OpNo, STI, O,
'b');
1523void AMDGPUInstPrinter::printInterpSlot(
const MCInst *
MI,
unsigned OpNum,
1526 unsigned Imm =
MI->getOperand(OpNum).getImm();
1538 O <<
"invalid_param_" <<
Imm;
1542void AMDGPUInstPrinter::printInterpAttr(
const MCInst *
MI,
unsigned OpNum,
1545 unsigned Attr =
MI->getOperand(OpNum).getImm();
1546 O <<
"attr" << Attr;
1549void AMDGPUInstPrinter::printInterpAttrChan(
const MCInst *
MI,
unsigned OpNum,
1552 unsigned Chan =
MI->getOperand(OpNum).getImm();
1553 O <<
'.' <<
"xyzw"[Chan & 0x3];
1556void AMDGPUInstPrinter::printGPRIdxMode(
const MCInst *
MI,
unsigned OpNo,
1559 using namespace llvm::AMDGPU::VGPRIndexMode;
1560 unsigned Val =
MI->getOperand(OpNo).getImm();
1562 if ((Val & ~ENABLE_MASK) != 0) {
1563 O << formatHex(static_cast<uint64_t>(Val));
1566 bool NeedComma =
false;
1567 for (
unsigned ModeId = ID_MIN; ModeId <=
ID_MAX; ++ModeId) {
1568 if (Val & (1 << ModeId)) {
1579void AMDGPUInstPrinter::printMemOperand(
const MCInst *
MI,
unsigned OpNo,
1582 printRegularOperand(
MI, OpNo, STI, O);
1584 printRegularOperand(
MI, OpNo + 1, STI, O);
1592 if (
Op.getImm() == 1) {
1603 if (
Op.getImm() == 1)
1610 int Imm =
MI->getOperand(OpNo).getImm();
1624 const unsigned Imm16 =
MI->getOperand(OpNo).getImm();
1635 O <<
"sendmsg(" << MsgName;
1644 O <<
"sendmsg(" << MsgId <<
", " << OpId <<
", " <<
StreamId <<
')';
1656 uint16_t Probe0 = ((0 & AndMask) | OrMask) ^ XorMask;
1661 for (
unsigned Mask = 1 << (
BITMASK_WIDTH - 1); Mask > 0; Mask >>= 1) {
1731 }
else if (AndMask ==
BITMASK_MAX && OrMask == 0 && XorMask > 0 &&
1742 if (GroupSize > 1 &&
1744 OrMask < GroupSize &&
1762 printU16ImmDecOperand(
MI, OpNo, O);
1771 unsigned SImm16 =
MI->getOperand(OpNo).getImm();
1772 unsigned Vmcnt, Expcnt, Lgkmcnt;
1778 bool PrintAll = IsDefaultVmcnt && IsDefaultExpcnt && IsDefaultLgkmcnt;
1780 bool NeedSpace =
false;
1782 if (!IsDefaultVmcnt || PrintAll) {
1783 O <<
"vmcnt(" << Vmcnt <<
')';
1787 if (!IsDefaultExpcnt || PrintAll) {
1790 O <<
"expcnt(" << Expcnt <<
')';
1794 if (!IsDefaultLgkmcnt || PrintAll) {
1797 O <<
"lgkmcnt(" << Lgkmcnt <<
')';
1806 uint64_t Imm16 =
MI->getOperand(OpNo).getImm() & 0xffff;
1808 bool HasNonDefaultVal =
false;
1814 bool NeedSpace =
false;
1816 if (!IsDefault || !HasNonDefaultVal) {
1819 O << Name <<
'(' << Val <<
')';
1831 const char *BadInstId =
"/* invalid instid value */";
1832 static const std::array<const char *, 12> InstIds = {
1833 "NO_DEP",
"VALU_DEP_1",
"VALU_DEP_2",
1834 "VALU_DEP_3",
"VALU_DEP_4",
"TRANS32_DEP_1",
1835 "TRANS32_DEP_2",
"TRANS32_DEP_3",
"FMA_ACCUM_CYCLE_1",
1836 "SALU_CYCLE_1",
"SALU_CYCLE_2",
"SALU_CYCLE_3"};
1838 const char *BadInstSkip =
"/* invalid instskip value */";
1839 static const std::array<const char *, 6> InstSkips = {
1840 "SAME",
"NEXT",
"SKIP_1",
"SKIP_2",
"SKIP_3",
"SKIP_4"};
1842 unsigned SImm16 =
MI->getOperand(OpNo).getImm();
1843 const char *Prefix =
"";
1845 unsigned Value = SImm16 & 0xF;
1847 const char *Name =
Value < InstIds.size() ? InstIds[
Value] : BadInstId;
1848 O << Prefix <<
"instid0(" << Name <<
')';
1852 Value = (SImm16 >> 4) & 7;
1855 Value < InstSkips.size() ? InstSkips[
Value] : BadInstSkip;
1856 O << Prefix <<
"instskip(" << Name <<
')';
1860 Value = (SImm16 >> 7) & 0xF;
1862 const char *Name =
Value < InstIds.size() ? InstIds[
Value] : BadInstId;
1863 O << Prefix <<
"instid1(" << Name <<
')';
1874 unsigned Val =
MI->getOperand(OpNo).getImm();
1879 if (!HwRegName.
empty()) {
1885 O <<
", " <<
Offset <<
", " << Width;
1900void AMDGPUInstPrinter::printNamedInt(
const MCInst *
MI,
unsigned OpNo,
1903 bool PrintInHex,
bool AlwaysPrint) {
1904 int64_t V =
MI->getOperand(OpNo).getImm();
1905 if (AlwaysPrint || V != 0)
1909void AMDGPUInstPrinter::printBitOp3(
const MCInst *
MI,
unsigned OpNo,
1920 O << formatHex(static_cast<uint64_t>(
Imm));
1923void AMDGPUInstPrinter::printScaleSel(
const MCInst *
MI,
unsigned OpNo,
1926 uint8_t
Imm =
MI->getOperand(OpNo).getImm();
1933#include "AMDGPUGenAsmWriter.inc"
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static void printSwizzleBitmask(const uint16_t AndMask, const uint16_t OrMask, const uint16_t XorMask, raw_ostream &O)
static bool printImmediateBFloat16(uint32_t Imm, const MCSubtargetInfo &STI, raw_ostream &O)
static bool allOpsDefaultValue(const int *Ops, int NumOps, int Mod, bool IsPacked, bool HasDstSel)
static MCPhysReg getRegFromMIA(MCPhysReg Reg, unsigned OpNo, const MCInstrDesc &Desc, const MCRegisterInfo &MRI, const AMDGPUMCInstrAnalysis &MIA)
static MCPhysReg getRegForPrinting(MCPhysReg Reg, const MCRegisterInfo &MRI)
static bool printImmediateFP16(uint32_t Imm, const MCSubtargetInfo &STI, raw_ostream &O)
Provides AMDGPU specific target descriptions.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
if(auto Err=PB.parsePassPipeline(MPM, Passes)) return wrap(std MPM run * Mod
void printSwizzle(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printEndpgm(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
static const char * getRegisterName(MCRegister Reg)
static void printIfSet(const MCInst *MI, unsigned OpNo, raw_ostream &O, StringRef Asm, StringRef Default="")
void printDepCtr(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printHwreg(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printSendMsg(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
static void printRegOperand(MCRegister Reg, raw_ostream &O, const MCRegisterInfo &MRI)
void printRegName(raw_ostream &OS, MCRegister Reg) override
Print the assembler register name.
void printInst(const MCInst *MI, uint64_t Address, StringRef Annot, const MCSubtargetInfo &STI, raw_ostream &O) override
Print the specified MCInst to the specified raw_ostream.
void printInstruction(const MCInst *MI, uint64_t Address, const MCSubtargetInfo &STI, raw_ostream &O)
void printSWaitCnt(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printOModSI(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
void printSDelayALU(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O)
unsigned getVgprMSBs() const
void printExpr(raw_ostream &, const MCExpr &) const
format_object< int64_t > formatHex(int64_t Value) const
format_object< int64_t > formatDec(int64_t Value) const
Utility functions to print decimal/hexadecimal values.
const MCRegisterInfo & MRI
void printAnnotation(raw_ostream &OS, StringRef Annot)
Utility function for printing annotations.
const MCInstrAnalysis * MIA
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Instances of this class represent operands of the MCInst class.
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getID() const
getID() - Return the register class ID number.
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
LLVM Value Representation.
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val, bool &IsDefault, const MCSubtargetInfo &STI)
bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal, const MCSubtargetInfo &STI)
bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI)
bool getTgtName(unsigned Id, StringRef &Name, int &Index)
StringRef getHwreg(uint64_t Encoding, const MCSubtargetInfo &STI)
uint64_t encodeMsg(uint64_t MsgId, uint64_t OpId, uint64_t StreamId)
bool msgSupportsStream(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI)
void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId, uint16_t &StreamId, const MCSubtargetInfo &STI)
StringRef getMsgName(uint64_t Encoding, const MCSubtargetInfo &STI)
Map from an encoding to the symbolic name for a msg_id immediate.
bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId, const MCSubtargetInfo &STI, bool Strict)
StringRef getMsgOpName(int64_t MsgId, uint64_t Encoding, const MCSubtargetInfo &STI)
Map from an encoding to the symbolic name for a sendmsg operation.
bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI)
bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI, bool Strict)
const char *const IdSymbolic[]
void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt)
Decodes Vmcnt, Expcnt and Lgkmcnt from given Waitcnt for given isa Version, and writes decoded values...
bool isVOPCAsmOnly(unsigned Opc)
unsigned getTemporalHintType(const MCInstrDesc TID)
const MCRegisterClass * getVGPRPhysRegClass(MCPhysReg Reg, const MCRegisterInfo &MRI)
unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST)
For pre-GFX12 FLAT instructions the offset must be positive; MSB is ignored and forced to zero.
bool isGFX12Plus(const MCSubtargetInfo &STI)
bool isGFX940(const MCSubtargetInfo &STI)
LLVM_ABI IsaVersion getIsaVersion(StringRef GPU)
LLVM_READNONE bool isLegalDPALU_DPPControl(const MCSubtargetInfo &ST, unsigned DC)
bool isSI(const MCSubtargetInfo &STI)
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, OpName NamedIdx)
bool getVOP3IsSingle(unsigned Opc)
bool getVOP1IsSingle(unsigned Opc)
bool isGFX90A(const MCSubtargetInfo &STI)
LLVM_READONLY const MIMGDimInfo * getMIMGDimInfoByEncoding(uint8_t DimEnc)
bool isGFX12(const MCSubtargetInfo &STI)
unsigned getVmcntBitMask(const IsaVersion &Version)
LLVM_READNONE bool isInlinableIntLiteral(int64_t Literal)
Is this literal inlinable, and not one of the values intended for floating point values.
unsigned getLgkmcntBitMask(const IsaVersion &Version)
bool isDPALU_DPP(const MCInstrDesc &OpDesc, const MCSubtargetInfo &ST)
std::pair< const AMDGPU::OpName *, const AMDGPU::OpName * > getVGPRLoweringOperandTables(const MCInstrDesc &Desc)
unsigned getExpcntBitMask(const IsaVersion &Version)
bool isInlineValue(unsigned Reg)
bool isGFX10Plus(const MCSubtargetInfo &STI)
@ OPERAND_REG_INLINE_C_FP64
@ OPERAND_REG_INLINE_C_BF16
@ OPERAND_REG_INLINE_C_V2BF16
@ OPERAND_REG_IMM_V2INT16
@ OPERAND_REG_IMM_INT32
Operands with register, 32-bit, or 64-bit immediate.
@ OPERAND_REG_INLINE_C_INT64
@ OPERAND_REG_INLINE_C_INT16
Operands with register or inline constant.
@ OPERAND_REG_IMM_NOINLINE_V2FP16
@ OPERAND_REG_INLINE_C_V2FP16
@ OPERAND_REG_INLINE_AC_INT32
Operands with an AccVGPR register or inline constant.
@ OPERAND_REG_INLINE_AC_FP32
@ OPERAND_REG_IMM_V2INT32
@ OPERAND_REG_INLINE_C_FP32
@ OPERAND_REG_INLINE_C_INT32
@ OPERAND_REG_INLINE_C_V2INT16
@ OPERAND_REG_INLINE_AC_FP64
@ OPERAND_REG_INLINE_C_FP16
@ OPERAND_INLINE_SPLIT_BARRIER_INT32
bool isGFX9Plus(const MCSubtargetInfo &STI)
MCPhysReg getVGPRWithMSBs(MCPhysReg Reg, unsigned MSBs, const MCRegisterInfo &MRI)
If Reg is a low VGPR return a corresponding high VGPR with MSBs set.
bool isCvt_F32_Fp8_Bf8_e64(unsigned Opc)
MCRegister mc2PseudoReg(MCRegister Reg)
Convert hardware register Reg to a pseudo register.
bool isCI(const MCSubtargetInfo &STI)
bool getVOP2IsSingle(unsigned Opc)
bool isPermlane16(unsigned Opc)
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
This is an optimization pass for GlobalISel generic memory operations.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
constexpr uint32_t Lo_32(uint64_t Value)
Return the low 32 bits of a 64 bit value.
@ Mod
The access may modify the value stored in memory.
To bit_cast(const From &from) noexcept
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
DWARFExpression::Operation Op
constexpr int32_t SignExtend32(uint32_t X)
Sign-extend the number in the bottom B bits of X to a 32-bit integer.
@ Default
The result values are uniform if and only if all operands are uniform.
int popcount(T Value) noexcept
Count the number of set bits in a value.
static constexpr ValueType Default
static std::tuple< typename Fields::ValueType... > decode(uint64_t Encoded)
Instruction set architecture version.