39#define DEBUG_TYPE "arm64eccalllowering"
41STATISTIC(Arm64ECCallsLowered,
"Number of Arm64EC calls lowered");
50enum ThunkArgTranslation :
uint8_t {
59 ThunkArgTranslation Translation;
62class AArch64Arm64ECCallLowering :
public ModulePass {
65 AArch64Arm64ECCallLowering() : ModulePass(ID) {}
67 Function *buildExitThunk(FunctionType *FnTy, AttributeList Attrs);
69 void lowerCall(CallBase *CB);
70 Function *buildGuestExitThunk(Function *
F);
71 Function *buildPatchableThunk(GlobalAlias *UnmangledAlias,
72 GlobalAlias *MangledAlias);
74 DenseMap<GlobalAlias *, GlobalAlias *> &FnsMap);
75 bool runOnModule(
Module &M)
override;
78 int cfguard_module_flag = 0;
79 FunctionType *GuardFnType =
nullptr;
80 FunctionType *DispatchFnType =
nullptr;
83 Constant *DispatchFnGlobal =
nullptr;
90 void getThunkType(FunctionType *FT, AttributeList AttrList,
92 FunctionType *&Arm64Ty, FunctionType *&X64Ty,
94 void getThunkRetType(FunctionType *FT, AttributeList AttrList,
95 raw_ostream &Out,
Type *&Arm64RetTy,
Type *&X64RetTy,
96 SmallVectorImpl<Type *> &Arm64ArgTypes,
97 SmallVectorImpl<Type *> &X64ArgTypes,
100 void getThunkArgTypes(FunctionType *FT, AttributeList AttrList,
102 SmallVectorImpl<Type *> &Arm64ArgTypes,
103 SmallVectorImpl<Type *> &X64ArgTypes,
104 SmallVectorImpl<ThunkArgTranslation> &ArgTranslations,
106 ThunkArgInfo canonicalizeThunkType(
Type *
T, Align Alignment,
bool Ret,
107 uint64_t ArgSizeBytes, raw_ostream &Out);
112void AArch64Arm64ECCallLowering::getThunkType(
116 Out << (
TT == Arm64ECThunkType::Entry ?
"$ientry_thunk$cdecl$"
117 :
"$iexit_thunk$cdecl$");
128 if (TT == Arm64ECThunkType::Exit)
132 bool HasSretPtr =
false;
133 getThunkRetType(FT, AttrList, Out, Arm64RetTy, X64RetTy, Arm64ArgTypes,
134 X64ArgTypes, ArgTranslations, HasSretPtr);
136 getThunkArgTypes(FT, AttrList, TT, Out, Arm64ArgTypes, X64ArgTypes,
137 ArgTranslations, HasSretPtr);
139 Arm64Ty = FunctionType::get(Arm64RetTy, Arm64ArgTypes,
false);
141 X64Ty = FunctionType::get(X64RetTy, X64ArgTypes,
false);
144void AArch64Arm64ECCallLowering::getThunkArgTypes(
146 raw_ostream &Out, SmallVectorImpl<Type *> &Arm64ArgTypes,
147 SmallVectorImpl<Type *> &X64ArgTypes,
148 SmallVectorImpl<ThunkArgTranslation> &ArgTranslations,
bool HasSretPtr) {
151 if (FT->isVarArg()) {
174 for (
int i = HasSretPtr ? 1 : 0; i < 4; i++) {
177 ArgTranslations.
push_back(ThunkArgTranslation::Direct);
183 ArgTranslations.
push_back(ThunkArgTranslation::Direct);
186 if (TT != Arm64ECThunkType::Entry) {
190 ArgTranslations.
push_back(ThunkArgTranslation::Direct);
199 if (
I == FT->getNumParams()) {
204 for (
unsigned E = FT->getNumParams();
I !=
E; ++
I) {
208 uint64_t ArgSizeBytes = AttrList.getParamArm64ECArgSizeBytes(
I);
209 Align ParamAlign = AttrList.getParamAlignment(
I).valueOrOne();
211 uint64_t ArgSizeBytes = 0;
214 auto [Arm64Ty, X64Ty, ArgTranslation] =
215 canonicalizeThunkType(FT->getParamType(
I), ParamAlign,
216 false, ArgSizeBytes, Out);
219 ArgTranslations.
push_back(ArgTranslation);
223void AArch64Arm64ECCallLowering::getThunkRetType(
224 FunctionType *FT, AttributeList AttrList, raw_ostream &Out,
225 Type *&Arm64RetTy,
Type *&X64RetTy, SmallVectorImpl<Type *> &Arm64ArgTypes,
226 SmallVectorImpl<Type *> &X64ArgTypes,
228 Type *
T = FT->getReturnType();
232 uint64_t ArgSizeBytes = AttrList.getRetArm64ECArgSizeBytes();
234 int64_t ArgSizeBytes = 0;
237 if (FT->getNumParams()) {
238 Attribute SRetAttr0 = AttrList.getParamAttr(0, Attribute::StructRet);
239 Attribute InRegAttr0 = AttrList.getParamAttr(0, Attribute::InReg);
241 if (FT->getNumParams() > 1) {
245 SRetAttr1 = AttrList.getParamAttr(1, Attribute::StructRet);
246 InRegAttr1 = AttrList.getParamAttr(1, Attribute::InReg);
268 Align SRetAlign = AttrList.getParamAlignment(0).valueOrOne();
269 canonicalizeThunkType(SRetType, SRetAlign,
true, ArgSizeBytes,
273 Arm64ArgTypes.
push_back(FT->getParamType(0));
274 X64ArgTypes.
push_back(FT->getParamType(0));
275 ArgTranslations.
push_back(ThunkArgTranslation::Direct);
288 canonicalizeThunkType(
T,
Align(),
true, ArgSizeBytes, Out);
289 Arm64RetTy =
info.Arm64Ty;
290 X64RetTy =
info.X64Ty;
300ThunkArgInfo AArch64Arm64ECCallLowering::canonicalizeThunkType(
301 Type *
T, Align Alignment,
bool Ret, uint64_t ArgSizeBytes,
304 auto direct = [](
Type *
T) {
305 return ThunkArgInfo{
T,
T, ThunkArgTranslation::Direct};
308 auto bitcast = [
this](
Type *Arm64Ty, uint64_t SizeInBytes) {
309 return ThunkArgInfo{Arm64Ty,
311 ThunkArgTranslation::Bitcast};
314 auto pointerIndirection = [
this](
Type *Arm64Ty) {
315 return ThunkArgInfo{Arm64Ty, PtrTy,
316 ThunkArgTranslation::PointerIndirection};
325 if (
T->isFloatTy()) {
330 if (
T->isDoubleTy()) {
335 if (
T->isFloatingPointTy()) {
337 "for ARM64EC thunks");
343 if (StructTy->getNumElements() == 1)
344 T = StructTy->getElementType(0);
346 if (
T->isArrayTy()) {
347 Type *ElementTy =
T->getArrayElementType();
348 uint64_t ElementCnt =
T->getArrayNumElements();
349 uint64_t ElementSizePerBytes =
DL.getTypeSizeInBits(ElementTy) / 8;
350 uint64_t TotalSizeBytes = ElementCnt * ElementSizePerBytes;
360 Out << TotalSizeBytes;
361 if (Alignment.
value() >= 16 && !Ret)
362 Out <<
"a" << Alignment.
value();
363 if (TotalSizeBytes <= 8) {
366 return bitcast(
T, TotalSizeBytes);
369 return pointerIndirection(
T);
371 }
else if (
T->isFloatingPointTy()) {
373 "Only 16, 32, and 64 bit floating points are supported "
374 "for ARM64EC thunks");
378 if ((
T->isIntegerTy() ||
T->isPointerTy()) &&
DL.getTypeSizeInBits(
T) <= 64) {
380 return direct(I64Ty);
383 unsigned TypeSize = ArgSizeBytes;
385 TypeSize =
DL.getTypeSizeInBits(
T) / 8;
389 if (Alignment.
value() >= 16 && !Ret)
390 Out <<
"a" << Alignment.
value();
392 if (TypeSize == 1 || TypeSize == 2 || TypeSize == 4 || TypeSize == 8) {
394 return bitcast(
T, TypeSize);
397 return pointerIndirection(
T);
403Function *AArch64Arm64ECCallLowering::buildExitThunk(FunctionType *FT,
404 AttributeList Attrs) {
405 SmallString<256> ExitThunkName;
406 llvm::raw_svector_ostream ExitThunkStream(ExitThunkName);
407 FunctionType *Arm64Ty, *X64Ty;
409 getThunkType(FT, Attrs, Arm64ECThunkType::Exit, ExitThunkStream, Arm64Ty,
410 X64Ty, ArgTranslations);
416 F->setCallingConv(CallingConv::ARM64EC_Thunk_Native);
417 F->setSection(
".wowthk$aa");
420 F->addFnAttr(
"frame-pointer",
"all");
424 if (FT->getNumParams()) {
425 auto SRet =
Attrs.getParamAttr(0, Attribute::StructRet);
426 auto InReg =
Attrs.getParamAttr(0, Attribute::InReg);
427 if (SRet.isValid() && !InReg.isValid())
428 F->addParamAttr(1, SRet);
441 auto X64TyOffset = 1;
442 Args.push_back(
F->arg_begin());
444 Type *RetTy = Arm64Ty->getReturnType();
445 if (RetTy != X64Ty->getReturnType()) {
449 if (
DL.getTypeStoreSize(RetTy) > 8) {
450 Args.push_back(IRB.CreateAlloca(RetTy));
457 make_range(X64Ty->param_begin() + X64TyOffset, X64Ty->param_end()),
473 if (ArgTranslation != ThunkArgTranslation::Direct) {
474 Value *Mem = IRB.CreateAlloca(Arg.getType());
475 IRB.CreateStore(&Arg, Mem);
476 if (ArgTranslation == ThunkArgTranslation::Bitcast) {
477 Type *IntTy = IRB.getIntNTy(
DL.getTypeStoreSizeInBits(Arg.getType()));
478 Args.push_back(IRB.CreateLoad(IntTy, Mem));
480 assert(ArgTranslation == ThunkArgTranslation::PointerIndirection);
484 Args.push_back(&Arg);
490 CallInst *
Call = IRB.CreateCall(X64Ty, Callee, Args);
494 if (RetTy != X64Ty->getReturnType()) {
497 if (
DL.getTypeStoreSize(RetTy) > 8) {
498 RetVal = IRB.CreateLoad(RetTy, Args[1]);
500 Value *CastAlloca = IRB.CreateAlloca(RetTy);
501 IRB.CreateStore(
Call, CastAlloca);
502 RetVal = IRB.CreateLoad(RetTy, CastAlloca);
509 IRB.CreateRet(RetVal);
515Function *AArch64Arm64ECCallLowering::buildEntryThunk(Function *
F) {
516 SmallString<256> EntryThunkName;
517 llvm::raw_svector_ostream EntryThunkStream(EntryThunkName);
518 FunctionType *Arm64Ty, *X64Ty;
520 getThunkType(
F->getFunctionType(),
F->getAttributes(),
521 Arm64ECThunkType::Entry, EntryThunkStream, Arm64Ty, X64Ty,
528 Thunk->setCallingConv(CallingConv::ARM64EC_Thunk_X64);
529 Thunk->setSection(
".wowthk$aa");
532 Thunk->addFnAttr(
"frame-pointer",
"all");
537 Type *RetTy = Arm64Ty->getReturnType();
538 Type *X64RetType = X64Ty->getReturnType();
541 unsigned ThunkArgOffset = TransformDirectToSRet ? 2 : 1;
542 unsigned PassthroughArgSize =
543 (
F->isVarArg() ? 5 :
Thunk->arg_size()) - ThunkArgOffset;
544 assert(ArgTranslations.
size() == (
F->isVarArg() ? 5 : PassthroughArgSize));
548 for (
unsigned i = 0; i != PassthroughArgSize; ++i) {
549 Value *Arg =
Thunk->getArg(i + ThunkArgOffset);
550 Type *ArgTy = Arm64Ty->getParamType(i);
551 ThunkArgTranslation ArgTranslation = ArgTranslations[i];
552 if (ArgTranslation != ThunkArgTranslation::Direct) {
554 if (ArgTranslation == ThunkArgTranslation::Bitcast) {
555 Value *CastAlloca = IRB.CreateAlloca(ArgTy);
556 IRB.CreateStore(Arg, CastAlloca);
557 Arg = IRB.CreateLoad(ArgTy, CastAlloca);
559 assert(ArgTranslation == ThunkArgTranslation::PointerIndirection);
560 Arg = IRB.CreateLoad(ArgTy, Arg);
574 Thunk->addParamAttr(5, Attribute::InReg);
576 Arg = IRB.CreatePtrAdd(Arg, IRB.getInt64(0x20));
580 Args.push_back(IRB.getInt64(0));
585 CallInst *
Call = IRB.CreateCall(Arm64Ty, Callee, Args);
587 auto SRetAttr =
F->
getAttributes().getParamAttr(0, Attribute::StructRet);
588 auto InRegAttr =
F->getAttributes().getParamAttr(0, Attribute::InReg);
589 if (SRetAttr.isValid() && !InRegAttr.isValid()) {
590 Thunk->addParamAttr(1, SRetAttr);
595 if (TransformDirectToSRet) {
596 IRB.CreateStore(RetVal,
Thunk->getArg(1));
597 }
else if (X64RetType != RetTy) {
598 Value *CastAlloca = IRB.CreateAlloca(X64RetType);
599 IRB.CreateStore(
Call, CastAlloca);
600 RetVal = IRB.CreateLoad(X64RetType, CastAlloca);
610 IRB.CreateRet(RetVal);
627Function *AArch64Arm64ECCallLowering::buildGuestExitThunk(Function *
F) {
628 llvm::raw_null_ostream NullThunkName;
629 FunctionType *Arm64Ty, *X64Ty;
631 getThunkType(
F->getFunctionType(),
F->getAttributes(),
632 Arm64ECThunkType::GuestExit, NullThunkName, Arm64Ty, X64Ty,
635 assert(MangledName &&
"Can't guest exit to function that's already native");
636 std::string ThunkName = *MangledName;
637 if (ThunkName[0] ==
'?' && ThunkName.find(
"@") != std::string::npos) {
638 ThunkName.insert(ThunkName.find(
"@"),
"$exit_thunk");
640 ThunkName.append(
"$exit_thunk");
647 "arm64ec_unmangled_name",
651 "arm64ec_ecmangled_name",
660 if (cfguard_module_flag == 2 && !
F->hasFnAttribute(
"guard_nocf"))
661 GuardFn = GuardFnCFGlobal;
663 GuardFn = GuardFnGlobal;
664 LoadInst *GuardCheckLoad =
B.CreateLoad(PtrTy, GuardFn);
668 Function *
Thunk = buildExitThunk(
F->getFunctionType(),
F->getAttributes());
669 CallInst *GuardCheck =
B.CreateCall(
670 GuardFnType, GuardCheckLoad, {
F,
Thunk});
676 CallInst *
Call =
B.CreateCall(Arm64Ty, GuardCheck, Args);
684 auto SRetAttr =
F->getAttributes().getParamAttr(0, Attribute::StructRet);
685 auto InRegAttr =
F->getAttributes().getParamAttr(0, Attribute::InReg);
686 if (SRetAttr.isValid() && !InRegAttr.isValid()) {
695AArch64Arm64ECCallLowering::buildPatchableThunk(GlobalAlias *UnmangledAlias,
696 GlobalAlias *MangledAlias) {
697 llvm::raw_null_ostream NullThunkName;
698 FunctionType *Arm64Ty, *X64Ty;
701 getThunkType(
F->getFunctionType(),
F->getAttributes(),
702 Arm64ECThunkType::GuestExit, NullThunkName, Arm64Ty, X64Ty,
704 std::string ThunkName(MangledAlias->
getName());
705 if (ThunkName[0] ==
'?' && ThunkName.find(
"@") != std::string::npos) {
706 ThunkName.insert(ThunkName.find(
"@"),
"$hybpatch_thunk");
708 ThunkName.append(
"$hybpatch_thunk");
719 LoadInst *DispatchLoad =
B.CreateLoad(PtrTy, DispatchFnGlobal);
723 buildExitThunk(
F->getFunctionType(),
F->getAttributes());
725 B.CreateCall(DispatchFnType, DispatchLoad,
726 {UnmangledAlias, ExitThunk, UnmangledAlias->
getAliasee()});
732 CallInst *
Call =
B.CreateCall(Arm64Ty, Dispatch, Args);
740 auto SRetAttr =
F->getAttributes().getParamAttr(0, Attribute::StructRet);
741 auto InRegAttr =
F->getAttributes().getParamAttr(0, Attribute::InReg);
742 if (SRetAttr.isValid() && !InRegAttr.isValid()) {
752void AArch64Arm64ECCallLowering::lowerCall(CallBase *CB) {
764 if (cfguard_module_flag == 2 && !CB->
hasFnAttr(
"guard_nocf"))
765 GuardFn = GuardFnCFGlobal;
767 GuardFn = GuardFnGlobal;
768 LoadInst *GuardCheckLoad =
B.CreateLoad(PtrTy, GuardFn);
773 CallInst *GuardCheck =
774 B.CreateCall(GuardFnType, GuardCheckLoad, {CalledOperand,
Thunk},
783bool AArch64Arm64ECCallLowering::runOnModule(
Module &
Mod) {
792 cfguard_module_flag = MD->getZExtValue();
794 PtrTy = PointerType::getUnqual(M->
getContext());
798 GuardFnType = FunctionType::get(PtrTy, {PtrTy, PtrTy},
false);
799 DispatchFnType = FunctionType::get(PtrTy, {PtrTy, PtrTy, PtrTy},
false);
808 for (GlobalAlias &
A :
Mod.aliases()) {
812 if (std::optional<std::string> MangledName =
814 F->addMetadata(
"arm64ec_unmangled_name",
817 A.setName(MangledName.value());
821 DenseMap<GlobalAlias *, GlobalAlias *> FnsMap;
822 SetVector<GlobalAlias *> PatchableFns;
824 for (Function &
F :
Mod) {
825 if (
F.hasPersonalityFn()) {
826 GlobalValue *PersFn =
829 if (std::optional<std::string> MangledName =
831 PersFn->
setName(MangledName.value());
836 if (!
F.hasFnAttribute(Attribute::HybridPatchable) ||
837 F.isDeclarationForLinker() ||
F.hasLocalLinkage() ||
843 if (std::optional<std::string> MangledName =
845 std::string OrigName(
F.getName());
857 MangledName.value(), &
F);
858 F.replaceUsesWithIf(AM,
860 F.replaceAllUsesWith(
A);
861 F.setMetadata(
"arm64ec_exp_name",
864 "EXP+" + MangledName.value())));
868 if (
F.hasDLLExportStorageClass()) {
878 SetVector<GlobalValue *> DirectCalledFns;
879 for (Function &
F :
Mod)
880 if (!
F.isDeclarationForLinker() &&
881 F.getCallingConv() != CallingConv::ARM64EC_Thunk_Native &&
882 F.getCallingConv() != CallingConv::ARM64EC_Thunk_X64)
891 for (Function &
F :
Mod) {
892 if (!
F.isDeclarationForLinker() &&
893 (!
F.hasLocalLinkage() ||
F.hasAddressTaken()) &&
894 F.getCallingConv() != CallingConv::ARM64EC_Thunk_Native &&
895 F.getCallingConv() != CallingConv::ARM64EC_Thunk_X64) {
897 F.setComdat(
Mod.getOrInsertComdat(
F.getName()));
899 {&
F, buildEntryThunk(&
F), Arm64ECThunkType::Entry});
902 for (GlobalValue *O : DirectCalledFns) {
906 {
O, buildExitThunk(
F->getFunctionType(),
F->getAttributes()),
907 Arm64ECThunkType::Exit});
908 if (!GA && !
F->hasDLLImportStorageClass())
910 {buildGuestExitThunk(
F),
F, Arm64ECThunkType::GuestExit});
912 for (GlobalAlias *
A : PatchableFns) {
917 if (!ThunkMapping.
empty()) {
919 for (ThunkInfo &Thunk : ThunkMapping) {
926 ThunkMappingArrayElems.
size()),
927 ThunkMappingArrayElems);
928 new GlobalVariable(
Mod, ThunkMappingArray->
getType(),
false,
930 "llvm.arm64ec.symbolmap");
936bool AArch64Arm64ECCallLowering::processFunction(
937 Function &
F, SetVector<GlobalValue *> &DirectCalledFns,
938 DenseMap<GlobalAlias *, GlobalAlias *> &FnsMap) {
948 if (!
F.hasLocalLinkage() ||
F.hasAddressTaken()) {
949 if (std::optional<std::string> MangledName =
951 F.addMetadata(
"arm64ec_unmangled_name",
954 if (
F.hasComdat() &&
F.getComdat()->getName() ==
F.getName()) {
958 for (GlobalObject *User : ComdatUsers)
959 User->setComdat(MangledComdat);
961 F.setName(MangledName.value());
969 for (BasicBlock &BB :
F) {
970 for (Instruction &
I : BB) {
972 if (!CB || CB->
getCallingConv() == CallingConv::ARM64EC_Thunk_X64 ||
983 F->isIntrinsic() || !
F->isDeclarationForLinker())
993 if (
I != FnsMap.
end()) {
995 DirectCalledFns.
insert(
I->first);
1001 ++Arm64ECCallsLowered;
1005 if (IndirectCalls.
empty())
1008 for (CallBase *CB : IndirectCalls)
1014char AArch64Arm64ECCallLowering::ID = 0;
1016 "AArch64Arm64ECCallLowering",
false,
false)
1019 return new AArch64Arm64ECCallLowering;
static cl::opt< bool > LowerDirectToIndirect("arm64ec-lower-direct-to-indirect", cl::Hidden, cl::init(true))
static cl::opt< bool > GenerateThunks("arm64ec-generate-thunks", cl::Hidden, cl::init(true))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Module.h This file contains the declarations for the Module class.
Machine Check Debug Module
static bool processFunction(Function &F, NVPTXTargetMachine &TM)
if(auto Err=PB.parsePassPipeline(MPM, Passes)) return wrap(std MPM run * Mod
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallString class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
bool isValid() const
Return true if the attribute is any kind of attribute.
LLVM_ABI Type * getValueAsType() const
Return the attribute's value as a Type.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
bool isInlineAsm() const
Check if this call is an inline asm statement.
void setCallingConv(CallingConv::ID CC)
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
CallingConv::ID getCallingConv() const
Value * getCalledOperand() const
FunctionType * getFunctionType() const
void setCalledOperand(Value *V)
AttributeList getAttributes() const
Return the attributes for this call.
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
void setTailCallKind(TailCallKind TCK)
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static Constant * getAnon(ArrayRef< Constant * > V, bool Packed=false)
Return an anonymous struct that has the specified elements.
iterator find(const_arg_type_t< KeyT > Val)
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
LLVM_ABI void setAliasee(Constant *Aliasee)
These methods retrieve and set alias target.
const Constant * getAliasee() const
static LLVM_ABI GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
@ DLLExportStorageClass
Function to be accessible from DLL.
@ WeakODRLinkage
Same, but only replaced by something equivalent.
@ ExternalLinkage
Externally visible function.
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
Type * getValueType() const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
LLVMContext & getContext() const
Get the global data context.
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
Comdat * getOrInsertComdat(StringRef Name)
Return the Comdat in the module with the specified name.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
GlobalVariable * getOrInsertGlobal(StringRef Name, Type *Ty, function_ref< GlobalVariable *()> CreateGlobalCallback)
Look up the specified global in the module symbol table.
Metadata * getModuleFlag(StringRef Key) const
Return the corresponding value if Key appears in module flags, otherwise return null.
A container for an operand bundle being viewed as a set of values rather than a set of uses.
bool insert(const value_type &X)
Insert a new element into the SetVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
bool isPointerTy() const
True if this is an instance of PointerType.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isFunctionTy() const
True if this is an instance of FunctionType.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isVoidTy() const
Return true if this is 'void'.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
This class implements an extremely fast bulk output stream that can only output to a stream.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
@ BasicBlock
Various leaf nodes.
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract_or_null(Y &&MD)
Extract a Value from Metadata, allowing null.
@ User
could "use" a pointer
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
LLVM_ABI std::optional< std::string > getArm64ECMangledFunctionName(StringRef Name)
Returns the ARM64EC mangled function name unless the input is already mangled.
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
auto dyn_cast_or_null(const Y &Val)
ModulePass * createAArch64Arm64ECCallLoweringPass()
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
constexpr std::string_view HybridPatchableTargetSuffix
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
OperandBundleDefT< Value * > OperandBundleDef
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
uint64_t value() const
This is a hole in the type system and should not be abused.