Thanks to visit codestin.com
Credit goes to llvm.org

LLVM 22.0.0git
AArch64Arm64ECCallLowering.cpp
Go to the documentation of this file.
1//===-- AArch64Arm64ECCallLowering.cpp - Lower Arm64EC calls ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file contains the IR transform to lower external or indirect calls for
11/// the ARM64EC calling convention. Such calls must go through the runtime, so
12/// we can translate the calling convention for calls into the emulator.
13///
14/// This subsumes Control Flow Guard handling.
15///
16//===----------------------------------------------------------------------===//
17
18#include "AArch64.h"
19#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/Statistic.h"
23#include "llvm/IR/CallingConv.h"
24#include "llvm/IR/GlobalAlias.h"
25#include "llvm/IR/IRBuilder.h"
26#include "llvm/IR/Instruction.h"
27#include "llvm/IR/Mangler.h"
28#include "llvm/IR/Module.h"
29#include "llvm/Object/COFF.h"
30#include "llvm/Pass.h"
33
34using namespace llvm;
35using namespace llvm::COFF;
36
38
39#define DEBUG_TYPE "arm64eccalllowering"
40
41STATISTIC(Arm64ECCallsLowered, "Number of Arm64EC calls lowered");
42
43static cl::opt<bool> LowerDirectToIndirect("arm64ec-lower-direct-to-indirect",
44 cl::Hidden, cl::init(true));
45static cl::opt<bool> GenerateThunks("arm64ec-generate-thunks", cl::Hidden,
46 cl::init(true));
47
48namespace {
49
50enum ThunkArgTranslation : uint8_t {
51 Direct,
52 Bitcast,
53 PointerIndirection,
54};
55
56struct ThunkArgInfo {
57 Type *Arm64Ty;
58 Type *X64Ty;
59 ThunkArgTranslation Translation;
60};
61
62class AArch64Arm64ECCallLowering : public ModulePass {
63public:
64 static char ID;
65 AArch64Arm64ECCallLowering() : ModulePass(ID) {}
66
67 Function *buildExitThunk(FunctionType *FnTy, AttributeList Attrs);
68 Function *buildEntryThunk(Function *F);
69 void lowerCall(CallBase *CB);
70 Function *buildGuestExitThunk(Function *F);
71 Function *buildPatchableThunk(GlobalAlias *UnmangledAlias,
72 GlobalAlias *MangledAlias);
73 bool processFunction(Function &F, SetVector<GlobalValue *> &DirectCalledFns,
74 DenseMap<GlobalAlias *, GlobalAlias *> &FnsMap);
75 bool runOnModule(Module &M) override;
76
77private:
78 int cfguard_module_flag = 0;
79 FunctionType *GuardFnType = nullptr;
80 FunctionType *DispatchFnType = nullptr;
81 Constant *GuardFnCFGlobal = nullptr;
82 Constant *GuardFnGlobal = nullptr;
83 Constant *DispatchFnGlobal = nullptr;
84 Module *M = nullptr;
85
86 Type *PtrTy;
87 Type *I64Ty;
88 Type *VoidTy;
89
90 void getThunkType(FunctionType *FT, AttributeList AttrList,
91 Arm64ECThunkType TT, raw_ostream &Out,
92 FunctionType *&Arm64Ty, FunctionType *&X64Ty,
93 SmallVector<ThunkArgTranslation> &ArgTranslations);
94 void getThunkRetType(FunctionType *FT, AttributeList AttrList,
95 raw_ostream &Out, Type *&Arm64RetTy, Type *&X64RetTy,
96 SmallVectorImpl<Type *> &Arm64ArgTypes,
97 SmallVectorImpl<Type *> &X64ArgTypes,
98 SmallVector<ThunkArgTranslation> &ArgTranslations,
99 bool &HasSretPtr);
100 void getThunkArgTypes(FunctionType *FT, AttributeList AttrList,
101 Arm64ECThunkType TT, raw_ostream &Out,
102 SmallVectorImpl<Type *> &Arm64ArgTypes,
103 SmallVectorImpl<Type *> &X64ArgTypes,
104 SmallVectorImpl<ThunkArgTranslation> &ArgTranslations,
105 bool HasSretPtr);
106 ThunkArgInfo canonicalizeThunkType(Type *T, Align Alignment, bool Ret,
107 uint64_t ArgSizeBytes, raw_ostream &Out);
108};
109
110} // end anonymous namespace
111
112void AArch64Arm64ECCallLowering::getThunkType(
113 FunctionType *FT, AttributeList AttrList, Arm64ECThunkType TT,
114 raw_ostream &Out, FunctionType *&Arm64Ty, FunctionType *&X64Ty,
115 SmallVector<ThunkArgTranslation> &ArgTranslations) {
116 Out << (TT == Arm64ECThunkType::Entry ? "$ientry_thunk$cdecl$"
117 : "$iexit_thunk$cdecl$");
118
119 Type *Arm64RetTy;
120 Type *X64RetTy;
121
122 SmallVector<Type *> Arm64ArgTypes;
123 SmallVector<Type *> X64ArgTypes;
124
125 // The first argument to a thunk is the called function, stored in x9.
126 // For exit thunks, we pass the called function down to the emulator;
127 // for entry/guest exit thunks, we just call the Arm64 function directly.
128 if (TT == Arm64ECThunkType::Exit)
129 Arm64ArgTypes.push_back(PtrTy);
130 X64ArgTypes.push_back(PtrTy);
131
132 bool HasSretPtr = false;
133 getThunkRetType(FT, AttrList, Out, Arm64RetTy, X64RetTy, Arm64ArgTypes,
134 X64ArgTypes, ArgTranslations, HasSretPtr);
135
136 getThunkArgTypes(FT, AttrList, TT, Out, Arm64ArgTypes, X64ArgTypes,
137 ArgTranslations, HasSretPtr);
138
139 Arm64Ty = FunctionType::get(Arm64RetTy, Arm64ArgTypes, false);
140
141 X64Ty = FunctionType::get(X64RetTy, X64ArgTypes, false);
142}
143
144void AArch64Arm64ECCallLowering::getThunkArgTypes(
145 FunctionType *FT, AttributeList AttrList, Arm64ECThunkType TT,
146 raw_ostream &Out, SmallVectorImpl<Type *> &Arm64ArgTypes,
147 SmallVectorImpl<Type *> &X64ArgTypes,
148 SmallVectorImpl<ThunkArgTranslation> &ArgTranslations, bool HasSretPtr) {
149
150 Out << "$";
151 if (FT->isVarArg()) {
152 // We treat the variadic function's thunk as a normal function
153 // with the following type on the ARM side:
154 // rettype exitthunk(
155 // ptr x9, ptr x0, i64 x1, i64 x2, i64 x3, ptr x4, i64 x5)
156 //
157 // that can coverage all types of variadic function.
158 // x9 is similar to normal exit thunk, store the called function.
159 // x0-x3 is the arguments be stored in registers.
160 // x4 is the address of the arguments on the stack.
161 // x5 is the size of the arguments on the stack.
162 //
163 // On the x64 side, it's the same except that x5 isn't set.
164 //
165 // If both the ARM and X64 sides are sret, there are only three
166 // arguments in registers.
167 //
168 // If the X64 side is sret, but the ARM side isn't, we pass an extra value
169 // to/from the X64 side, and let SelectionDAG transform it into a memory
170 // location.
171 Out << "varargs";
172
173 // x0-x3
174 for (int i = HasSretPtr ? 1 : 0; i < 4; i++) {
175 Arm64ArgTypes.push_back(I64Ty);
176 X64ArgTypes.push_back(I64Ty);
177 ArgTranslations.push_back(ThunkArgTranslation::Direct);
178 }
179
180 // x4
181 Arm64ArgTypes.push_back(PtrTy);
182 X64ArgTypes.push_back(PtrTy);
183 ArgTranslations.push_back(ThunkArgTranslation::Direct);
184 // x5
185 Arm64ArgTypes.push_back(I64Ty);
186 if (TT != Arm64ECThunkType::Entry) {
187 // FIXME: x5 isn't actually used by the x64 side; revisit once we
188 // have proper isel for varargs
189 X64ArgTypes.push_back(I64Ty);
190 ArgTranslations.push_back(ThunkArgTranslation::Direct);
191 }
192 return;
193 }
194
195 unsigned I = 0;
196 if (HasSretPtr)
197 I++;
198
199 if (I == FT->getNumParams()) {
200 Out << "v";
201 return;
202 }
203
204 for (unsigned E = FT->getNumParams(); I != E; ++I) {
205#if 0
206 // FIXME: Need more information about argument size; see
207 // https://reviews.llvm.org/D132926
208 uint64_t ArgSizeBytes = AttrList.getParamArm64ECArgSizeBytes(I);
209 Align ParamAlign = AttrList.getParamAlignment(I).valueOrOne();
210#else
211 uint64_t ArgSizeBytes = 0;
212 Align ParamAlign = Align();
213#endif
214 auto [Arm64Ty, X64Ty, ArgTranslation] =
215 canonicalizeThunkType(FT->getParamType(I), ParamAlign,
216 /*Ret*/ false, ArgSizeBytes, Out);
217 Arm64ArgTypes.push_back(Arm64Ty);
218 X64ArgTypes.push_back(X64Ty);
219 ArgTranslations.push_back(ArgTranslation);
220 }
221}
222
223void AArch64Arm64ECCallLowering::getThunkRetType(
224 FunctionType *FT, AttributeList AttrList, raw_ostream &Out,
225 Type *&Arm64RetTy, Type *&X64RetTy, SmallVectorImpl<Type *> &Arm64ArgTypes,
226 SmallVectorImpl<Type *> &X64ArgTypes,
227 SmallVector<ThunkArgTranslation> &ArgTranslations, bool &HasSretPtr) {
228 Type *T = FT->getReturnType();
229#if 0
230 // FIXME: Need more information about argument size; see
231 // https://reviews.llvm.org/D132926
232 uint64_t ArgSizeBytes = AttrList.getRetArm64ECArgSizeBytes();
233#else
234 int64_t ArgSizeBytes = 0;
235#endif
236 if (T->isVoidTy()) {
237 if (FT->getNumParams()) {
238 Attribute SRetAttr0 = AttrList.getParamAttr(0, Attribute::StructRet);
239 Attribute InRegAttr0 = AttrList.getParamAttr(0, Attribute::InReg);
240 Attribute SRetAttr1, InRegAttr1;
241 if (FT->getNumParams() > 1) {
242 // Also check the second parameter (for class methods, the first
243 // parameter is "this", and the second parameter is the sret pointer.)
244 // It doesn't matter which one is sret.
245 SRetAttr1 = AttrList.getParamAttr(1, Attribute::StructRet);
246 InRegAttr1 = AttrList.getParamAttr(1, Attribute::InReg);
247 }
248 if ((SRetAttr0.isValid() && InRegAttr0.isValid()) ||
249 (SRetAttr1.isValid() && InRegAttr1.isValid())) {
250 // sret+inreg indicates a call that returns a C++ class value. This is
251 // actually equivalent to just passing and returning a void* pointer
252 // as the first or second argument. Translate it that way, instead of
253 // trying to model "inreg" in the thunk's calling convention; this
254 // simplfies the rest of the code, and matches MSVC mangling.
255 Out << "i8";
256 Arm64RetTy = I64Ty;
257 X64RetTy = I64Ty;
258 return;
259 }
260 if (SRetAttr0.isValid()) {
261 // FIXME: Sanity-check the sret type; if it's an integer or pointer,
262 // we'll get screwy mangling/codegen.
263 // FIXME: For large struct types, mangle as an integer argument and
264 // integer return, so we can reuse more thunks, instead of "m" syntax.
265 // (MSVC mangles this case as an integer return with no argument, but
266 // that's a miscompile.)
267 Type *SRetType = SRetAttr0.getValueAsType();
268 Align SRetAlign = AttrList.getParamAlignment(0).valueOrOne();
269 canonicalizeThunkType(SRetType, SRetAlign, /*Ret*/ true, ArgSizeBytes,
270 Out);
271 Arm64RetTy = VoidTy;
272 X64RetTy = VoidTy;
273 Arm64ArgTypes.push_back(FT->getParamType(0));
274 X64ArgTypes.push_back(FT->getParamType(0));
275 ArgTranslations.push_back(ThunkArgTranslation::Direct);
276 HasSretPtr = true;
277 return;
278 }
279 }
280
281 Out << "v";
282 Arm64RetTy = VoidTy;
283 X64RetTy = VoidTy;
284 return;
285 }
286
287 auto info =
288 canonicalizeThunkType(T, Align(), /*Ret*/ true, ArgSizeBytes, Out);
289 Arm64RetTy = info.Arm64Ty;
290 X64RetTy = info.X64Ty;
291 if (X64RetTy->isPointerTy()) {
292 // If the X64 type is canonicalized to a pointer, that means it's
293 // passed/returned indirectly. For a return value, that means it's an
294 // sret pointer.
295 X64ArgTypes.push_back(X64RetTy);
296 X64RetTy = VoidTy;
297 }
298}
299
300ThunkArgInfo AArch64Arm64ECCallLowering::canonicalizeThunkType(
301 Type *T, Align Alignment, bool Ret, uint64_t ArgSizeBytes,
302 raw_ostream &Out) {
303
304 auto direct = [](Type *T) {
305 return ThunkArgInfo{T, T, ThunkArgTranslation::Direct};
306 };
307
308 auto bitcast = [this](Type *Arm64Ty, uint64_t SizeInBytes) {
309 return ThunkArgInfo{Arm64Ty,
310 llvm::Type::getIntNTy(M->getContext(), SizeInBytes * 8),
311 ThunkArgTranslation::Bitcast};
312 };
313
314 auto pointerIndirection = [this](Type *Arm64Ty) {
315 return ThunkArgInfo{Arm64Ty, PtrTy,
316 ThunkArgTranslation::PointerIndirection};
317 };
318
319 if (T->isHalfTy()) {
320 // Prefix with `llvm` since MSVC doesn't specify `_Float16`
321 Out << "__llvm_h__";
322 return direct(T);
323 }
324
325 if (T->isFloatTy()) {
326 Out << "f";
327 return direct(T);
328 }
329
330 if (T->isDoubleTy()) {
331 Out << "d";
332 return direct(T);
333 }
334
335 if (T->isFloatingPointTy()) {
336 report_fatal_error("Only 16, 32, and 64 bit floating points are supported "
337 "for ARM64EC thunks");
338 }
339
340 auto &DL = M->getDataLayout();
341
342 if (auto *StructTy = dyn_cast<StructType>(T))
343 if (StructTy->getNumElements() == 1)
344 T = StructTy->getElementType(0);
345
346 if (T->isArrayTy()) {
347 Type *ElementTy = T->getArrayElementType();
348 uint64_t ElementCnt = T->getArrayNumElements();
349 uint64_t ElementSizePerBytes = DL.getTypeSizeInBits(ElementTy) / 8;
350 uint64_t TotalSizeBytes = ElementCnt * ElementSizePerBytes;
351 if (ElementTy->isHalfTy() || ElementTy->isFloatTy() ||
352 ElementTy->isDoubleTy()) {
353 if (ElementTy->isHalfTy())
354 // Prefix with `llvm` since MSVC doesn't specify `_Float16`
355 Out << "__llvm_H__";
356 else if (ElementTy->isFloatTy())
357 Out << "F";
358 else if (ElementTy->isDoubleTy())
359 Out << "D";
360 Out << TotalSizeBytes;
361 if (Alignment.value() >= 16 && !Ret)
362 Out << "a" << Alignment.value();
363 if (TotalSizeBytes <= 8) {
364 // Arm64 returns small structs of float/double in float registers;
365 // X64 uses RAX.
366 return bitcast(T, TotalSizeBytes);
367 } else {
368 // Struct is passed directly on Arm64, but indirectly on X64.
369 return pointerIndirection(T);
370 }
371 } else if (T->isFloatingPointTy()) {
373 "Only 16, 32, and 64 bit floating points are supported "
374 "for ARM64EC thunks");
375 }
376 }
377
378 if ((T->isIntegerTy() || T->isPointerTy()) && DL.getTypeSizeInBits(T) <= 64) {
379 Out << "i8";
380 return direct(I64Ty);
381 }
382
383 unsigned TypeSize = ArgSizeBytes;
384 if (TypeSize == 0)
385 TypeSize = DL.getTypeSizeInBits(T) / 8;
386 Out << "m";
387 if (TypeSize != 4)
388 Out << TypeSize;
389 if (Alignment.value() >= 16 && !Ret)
390 Out << "a" << Alignment.value();
391 // FIXME: Try to canonicalize Arm64Ty more thoroughly?
392 if (TypeSize == 1 || TypeSize == 2 || TypeSize == 4 || TypeSize == 8) {
393 // Pass directly in an integer register
394 return bitcast(T, TypeSize);
395 } else {
396 // Passed directly on Arm64, but indirectly on X64.
397 return pointerIndirection(T);
398 }
399}
400
401// This function builds the "exit thunk", a function which translates
402// arguments and return values when calling x64 code from AArch64 code.
403Function *AArch64Arm64ECCallLowering::buildExitThunk(FunctionType *FT,
404 AttributeList Attrs) {
405 SmallString<256> ExitThunkName;
406 llvm::raw_svector_ostream ExitThunkStream(ExitThunkName);
407 FunctionType *Arm64Ty, *X64Ty;
408 SmallVector<ThunkArgTranslation> ArgTranslations;
409 getThunkType(FT, Attrs, Arm64ECThunkType::Exit, ExitThunkStream, Arm64Ty,
410 X64Ty, ArgTranslations);
411 if (Function *F = M->getFunction(ExitThunkName))
412 return F;
413
415 ExitThunkName, M);
416 F->setCallingConv(CallingConv::ARM64EC_Thunk_Native);
417 F->setSection(".wowthk$aa");
418 F->setComdat(M->getOrInsertComdat(ExitThunkName));
419 // Copy MSVC, and always set up a frame pointer. (Maybe this isn't necessary.)
420 F->addFnAttr("frame-pointer", "all");
421 // Only copy sret from the first argument. For C++ instance methods, clang can
422 // stick an sret marking on a later argument, but it doesn't actually affect
423 // the ABI, so we can omit it. This avoids triggering a verifier assertion.
424 if (FT->getNumParams()) {
425 auto SRet = Attrs.getParamAttr(0, Attribute::StructRet);
426 auto InReg = Attrs.getParamAttr(0, Attribute::InReg);
427 if (SRet.isValid() && !InReg.isValid())
428 F->addParamAttr(1, SRet);
429 }
430 // FIXME: Copy anything other than sret? Shouldn't be necessary for normal
431 // C ABI, but might show up in other cases.
432 BasicBlock *BB = BasicBlock::Create(M->getContext(), "", F);
433 IRBuilder<> IRB(BB);
434 Value *CalleePtr =
435 M->getOrInsertGlobal("__os_arm64x_dispatch_call_no_redirect", PtrTy);
436 Value *Callee = IRB.CreateLoad(PtrTy, CalleePtr);
437 auto &DL = M->getDataLayout();
439
440 // Pass the called function in x9.
441 auto X64TyOffset = 1;
442 Args.push_back(F->arg_begin());
443
444 Type *RetTy = Arm64Ty->getReturnType();
445 if (RetTy != X64Ty->getReturnType()) {
446 // If the return type is an array or struct, translate it. Values of size
447 // 8 or less go into RAX; bigger values go into memory, and we pass a
448 // pointer.
449 if (DL.getTypeStoreSize(RetTy) > 8) {
450 Args.push_back(IRB.CreateAlloca(RetTy));
451 X64TyOffset++;
452 }
453 }
454
455 for (auto [Arg, X64ArgType, ArgTranslation] : llvm::zip_equal(
456 make_range(F->arg_begin() + 1, F->arg_end()),
457 make_range(X64Ty->param_begin() + X64TyOffset, X64Ty->param_end()),
458 ArgTranslations)) {
459 // Translate arguments from AArch64 calling convention to x86 calling
460 // convention.
461 //
462 // For simple types, we don't need to do any translation: they're
463 // represented the same way. (Implicit sign extension is not part of
464 // either convention.)
465 //
466 // The big thing we have to worry about is struct types... but
467 // fortunately AArch64 clang is pretty friendly here: the cases that need
468 // translation are always passed as a struct or array. (If we run into
469 // some cases where this doesn't work, we can teach clang to mark it up
470 // with an attribute.)
471 //
472 // The first argument is the called function, stored in x9.
473 if (ArgTranslation != ThunkArgTranslation::Direct) {
474 Value *Mem = IRB.CreateAlloca(Arg.getType());
475 IRB.CreateStore(&Arg, Mem);
476 if (ArgTranslation == ThunkArgTranslation::Bitcast) {
477 Type *IntTy = IRB.getIntNTy(DL.getTypeStoreSizeInBits(Arg.getType()));
478 Args.push_back(IRB.CreateLoad(IntTy, Mem));
479 } else {
480 assert(ArgTranslation == ThunkArgTranslation::PointerIndirection);
481 Args.push_back(Mem);
482 }
483 } else {
484 Args.push_back(&Arg);
485 }
486 assert(Args.back()->getType() == X64ArgType);
487 }
488 // FIXME: Transfer necessary attributes? sret? anything else?
489
490 CallInst *Call = IRB.CreateCall(X64Ty, Callee, Args);
491 Call->setCallingConv(CallingConv::ARM64EC_Thunk_X64);
492
493 Value *RetVal = Call;
494 if (RetTy != X64Ty->getReturnType()) {
495 // If we rewrote the return type earlier, convert the return value to
496 // the proper type.
497 if (DL.getTypeStoreSize(RetTy) > 8) {
498 RetVal = IRB.CreateLoad(RetTy, Args[1]);
499 } else {
500 Value *CastAlloca = IRB.CreateAlloca(RetTy);
501 IRB.CreateStore(Call, CastAlloca);
502 RetVal = IRB.CreateLoad(RetTy, CastAlloca);
503 }
504 }
505
506 if (RetTy->isVoidTy())
507 IRB.CreateRetVoid();
508 else
509 IRB.CreateRet(RetVal);
510 return F;
511}
512
513// This function builds the "entry thunk", a function which translates
514// arguments and return values when calling AArch64 code from x64 code.
515Function *AArch64Arm64ECCallLowering::buildEntryThunk(Function *F) {
516 SmallString<256> EntryThunkName;
517 llvm::raw_svector_ostream EntryThunkStream(EntryThunkName);
518 FunctionType *Arm64Ty, *X64Ty;
519 SmallVector<ThunkArgTranslation> ArgTranslations;
520 getThunkType(F->getFunctionType(), F->getAttributes(),
521 Arm64ECThunkType::Entry, EntryThunkStream, Arm64Ty, X64Ty,
522 ArgTranslations);
523 if (Function *F = M->getFunction(EntryThunkName))
524 return F;
525
527 EntryThunkName, M);
528 Thunk->setCallingConv(CallingConv::ARM64EC_Thunk_X64);
529 Thunk->setSection(".wowthk$aa");
530 Thunk->setComdat(M->getOrInsertComdat(EntryThunkName));
531 // Copy MSVC, and always set up a frame pointer. (Maybe this isn't necessary.)
532 Thunk->addFnAttr("frame-pointer", "all");
533
534 BasicBlock *BB = BasicBlock::Create(M->getContext(), "", Thunk);
535 IRBuilder<> IRB(BB);
536
537 Type *RetTy = Arm64Ty->getReturnType();
538 Type *X64RetType = X64Ty->getReturnType();
539
540 bool TransformDirectToSRet = X64RetType->isVoidTy() && !RetTy->isVoidTy();
541 unsigned ThunkArgOffset = TransformDirectToSRet ? 2 : 1;
542 unsigned PassthroughArgSize =
543 (F->isVarArg() ? 5 : Thunk->arg_size()) - ThunkArgOffset;
544 assert(ArgTranslations.size() == (F->isVarArg() ? 5 : PassthroughArgSize));
545
546 // Translate arguments to call.
548 for (unsigned i = 0; i != PassthroughArgSize; ++i) {
549 Value *Arg = Thunk->getArg(i + ThunkArgOffset);
550 Type *ArgTy = Arm64Ty->getParamType(i);
551 ThunkArgTranslation ArgTranslation = ArgTranslations[i];
552 if (ArgTranslation != ThunkArgTranslation::Direct) {
553 // Translate array/struct arguments to the expected type.
554 if (ArgTranslation == ThunkArgTranslation::Bitcast) {
555 Value *CastAlloca = IRB.CreateAlloca(ArgTy);
556 IRB.CreateStore(Arg, CastAlloca);
557 Arg = IRB.CreateLoad(ArgTy, CastAlloca);
558 } else {
559 assert(ArgTranslation == ThunkArgTranslation::PointerIndirection);
560 Arg = IRB.CreateLoad(ArgTy, Arg);
561 }
562 }
563 assert(Arg->getType() == ArgTy);
564 Args.push_back(Arg);
565 }
566
567 if (F->isVarArg()) {
568 // The 5th argument to variadic entry thunks is used to model the x64 sp
569 // which is passed to the thunk in x4, this can be passed to the callee as
570 // the variadic argument start address after skipping over the 32 byte
571 // shadow store.
572
573 // The EC thunk CC will assign any argument marked as InReg to x4.
574 Thunk->addParamAttr(5, Attribute::InReg);
575 Value *Arg = Thunk->getArg(5);
576 Arg = IRB.CreatePtrAdd(Arg, IRB.getInt64(0x20));
577 Args.push_back(Arg);
578
579 // Pass in a zero variadic argument size (in x5).
580 Args.push_back(IRB.getInt64(0));
581 }
582
583 // Call the function passed to the thunk.
584 Value *Callee = Thunk->getArg(0);
585 CallInst *Call = IRB.CreateCall(Arm64Ty, Callee, Args);
586
587 auto SRetAttr = F->getAttributes().getParamAttr(0, Attribute::StructRet);
588 auto InRegAttr = F->getAttributes().getParamAttr(0, Attribute::InReg);
589 if (SRetAttr.isValid() && !InRegAttr.isValid()) {
590 Thunk->addParamAttr(1, SRetAttr);
591 Call->addParamAttr(0, SRetAttr);
592 }
593
594 Value *RetVal = Call;
595 if (TransformDirectToSRet) {
596 IRB.CreateStore(RetVal, Thunk->getArg(1));
597 } else if (X64RetType != RetTy) {
598 Value *CastAlloca = IRB.CreateAlloca(X64RetType);
599 IRB.CreateStore(Call, CastAlloca);
600 RetVal = IRB.CreateLoad(X64RetType, CastAlloca);
601 }
602
603 // Return to the caller. Note that the isel has code to translate this
604 // "ret" to a tail call to __os_arm64x_dispatch_ret. (Alternatively, we
605 // could emit a tail call here, but that would require a dedicated calling
606 // convention, which seems more complicated overall.)
607 if (X64RetType->isVoidTy())
608 IRB.CreateRetVoid();
609 else
610 IRB.CreateRet(RetVal);
611
612 return Thunk;
613}
614
615std::optional<std::string> getArm64ECMangledFunctionName(GlobalValue &GV) {
616 if (!GV.hasName()) {
617 GV.setName("__unnamed");
618 }
619
621}
622
623// Builds the "guest exit thunk", a helper to call a function which may or may
624// not be an exit thunk. (We optimistically assume non-dllimport function
625// declarations refer to functions defined in AArch64 code; if the linker
626// can't prove that, we use this routine instead.)
627Function *AArch64Arm64ECCallLowering::buildGuestExitThunk(Function *F) {
628 llvm::raw_null_ostream NullThunkName;
629 FunctionType *Arm64Ty, *X64Ty;
630 SmallVector<ThunkArgTranslation> ArgTranslations;
631 getThunkType(F->getFunctionType(), F->getAttributes(),
632 Arm64ECThunkType::GuestExit, NullThunkName, Arm64Ty, X64Ty,
633 ArgTranslations);
634 auto MangledName = getArm64ECMangledFunctionName(*F);
635 assert(MangledName && "Can't guest exit to function that's already native");
636 std::string ThunkName = *MangledName;
637 if (ThunkName[0] == '?' && ThunkName.find("@") != std::string::npos) {
638 ThunkName.insert(ThunkName.find("@"), "$exit_thunk");
639 } else {
640 ThunkName.append("$exit_thunk");
641 }
643 Function::Create(Arm64Ty, GlobalValue::WeakODRLinkage, 0, ThunkName, M);
644 GuestExit->setComdat(M->getOrInsertComdat(ThunkName));
645 GuestExit->setSection(".wowthk$aa");
646 GuestExit->addMetadata(
647 "arm64ec_unmangled_name",
649 MDString::get(M->getContext(), F->getName())));
650 GuestExit->setMetadata(
651 "arm64ec_ecmangled_name",
653 MDString::get(M->getContext(), *MangledName)));
654 F->setMetadata("arm64ec_hasguestexit", MDNode::get(M->getContext(), {}));
656 IRBuilder<> B(BB);
657
658 // Load the global symbol as a pointer to the check function.
659 Value *GuardFn;
660 if (cfguard_module_flag == 2 && !F->hasFnAttribute("guard_nocf"))
661 GuardFn = GuardFnCFGlobal;
662 else
663 GuardFn = GuardFnGlobal;
664 LoadInst *GuardCheckLoad = B.CreateLoad(PtrTy, GuardFn);
665
666 // Create new call instruction. The CFGuard check should always be a call,
667 // even if the original CallBase is an Invoke or CallBr instruction.
668 Function *Thunk = buildExitThunk(F->getFunctionType(), F->getAttributes());
669 CallInst *GuardCheck = B.CreateCall(
670 GuardFnType, GuardCheckLoad, {F, Thunk});
671
672 // Ensure that the first argument is passed in the correct register.
673 GuardCheck->setCallingConv(CallingConv::CFGuard_Check);
674
676 CallInst *Call = B.CreateCall(Arm64Ty, GuardCheck, Args);
678
679 if (Call->getType()->isVoidTy())
680 B.CreateRetVoid();
681 else
682 B.CreateRet(Call);
683
684 auto SRetAttr = F->getAttributes().getParamAttr(0, Attribute::StructRet);
685 auto InRegAttr = F->getAttributes().getParamAttr(0, Attribute::InReg);
686 if (SRetAttr.isValid() && !InRegAttr.isValid()) {
687 GuestExit->addParamAttr(0, SRetAttr);
688 Call->addParamAttr(0, SRetAttr);
689 }
690
691 return GuestExit;
692}
693
694Function *
695AArch64Arm64ECCallLowering::buildPatchableThunk(GlobalAlias *UnmangledAlias,
696 GlobalAlias *MangledAlias) {
697 llvm::raw_null_ostream NullThunkName;
698 FunctionType *Arm64Ty, *X64Ty;
699 Function *F = cast<Function>(MangledAlias->getAliasee());
700 SmallVector<ThunkArgTranslation> ArgTranslations;
701 getThunkType(F->getFunctionType(), F->getAttributes(),
702 Arm64ECThunkType::GuestExit, NullThunkName, Arm64Ty, X64Ty,
703 ArgTranslations);
704 std::string ThunkName(MangledAlias->getName());
705 if (ThunkName[0] == '?' && ThunkName.find("@") != std::string::npos) {
706 ThunkName.insert(ThunkName.find("@"), "$hybpatch_thunk");
707 } else {
708 ThunkName.append("$hybpatch_thunk");
709 }
710
712 Function::Create(Arm64Ty, GlobalValue::WeakODRLinkage, 0, ThunkName, M);
713 GuestExit->setComdat(M->getOrInsertComdat(ThunkName));
714 GuestExit->setSection(".wowthk$aa");
716 IRBuilder<> B(BB);
717
718 // Load the global symbol as a pointer to the check function.
719 LoadInst *DispatchLoad = B.CreateLoad(PtrTy, DispatchFnGlobal);
720
721 // Create new dispatch call instruction.
722 Function *ExitThunk =
723 buildExitThunk(F->getFunctionType(), F->getAttributes());
724 CallInst *Dispatch =
725 B.CreateCall(DispatchFnType, DispatchLoad,
726 {UnmangledAlias, ExitThunk, UnmangledAlias->getAliasee()});
727
728 // Ensure that the first arguments are passed in the correct registers.
729 Dispatch->setCallingConv(CallingConv::CFGuard_Check);
730
732 CallInst *Call = B.CreateCall(Arm64Ty, Dispatch, Args);
734
735 if (Call->getType()->isVoidTy())
736 B.CreateRetVoid();
737 else
738 B.CreateRet(Call);
739
740 auto SRetAttr = F->getAttributes().getParamAttr(0, Attribute::StructRet);
741 auto InRegAttr = F->getAttributes().getParamAttr(0, Attribute::InReg);
742 if (SRetAttr.isValid() && !InRegAttr.isValid()) {
743 GuestExit->addParamAttr(0, SRetAttr);
744 Call->addParamAttr(0, SRetAttr);
745 }
746
747 MangledAlias->setAliasee(GuestExit);
748 return GuestExit;
749}
750
751// Lower an indirect call with inline code.
752void AArch64Arm64ECCallLowering::lowerCall(CallBase *CB) {
753 IRBuilder<> B(CB);
754 Value *CalledOperand = CB->getCalledOperand();
755
756 // If the indirect call is called within catchpad or cleanuppad,
757 // we need to copy "funclet" bundle of the call.
759 if (auto Bundle = CB->getOperandBundle(LLVMContext::OB_funclet))
760 Bundles.push_back(OperandBundleDef(*Bundle));
761
762 // Load the global symbol as a pointer to the check function.
763 Value *GuardFn;
764 if (cfguard_module_flag == 2 && !CB->hasFnAttr("guard_nocf"))
765 GuardFn = GuardFnCFGlobal;
766 else
767 GuardFn = GuardFnGlobal;
768 LoadInst *GuardCheckLoad = B.CreateLoad(PtrTy, GuardFn);
769
770 // Create new call instruction. The CFGuard check should always be a call,
771 // even if the original CallBase is an Invoke or CallBr instruction.
772 Function *Thunk = buildExitThunk(CB->getFunctionType(), CB->getAttributes());
773 CallInst *GuardCheck =
774 B.CreateCall(GuardFnType, GuardCheckLoad, {CalledOperand, Thunk},
775 Bundles);
776
777 // Ensure that the first argument is passed in the correct register.
778 GuardCheck->setCallingConv(CallingConv::CFGuard_Check);
779
780 CB->setCalledOperand(GuardCheck);
781}
782
783bool AArch64Arm64ECCallLowering::runOnModule(Module &Mod) {
784 if (!GenerateThunks)
785 return false;
786
787 M = &Mod;
788
789 // Check if this module has the cfguard flag and read its value.
790 if (auto *MD =
792 cfguard_module_flag = MD->getZExtValue();
793
794 PtrTy = PointerType::getUnqual(M->getContext());
795 I64Ty = Type::getInt64Ty(M->getContext());
796 VoidTy = Type::getVoidTy(M->getContext());
797
798 GuardFnType = FunctionType::get(PtrTy, {PtrTy, PtrTy}, false);
799 DispatchFnType = FunctionType::get(PtrTy, {PtrTy, PtrTy, PtrTy}, false);
800 GuardFnCFGlobal = M->getOrInsertGlobal("__os_arm64x_check_icall_cfg", PtrTy);
801 GuardFnGlobal = M->getOrInsertGlobal("__os_arm64x_check_icall", PtrTy);
802 DispatchFnGlobal = M->getOrInsertGlobal("__os_arm64x_dispatch_call", PtrTy);
803
804 // Mangle names of function aliases and add the alias name to
805 // arm64ec_unmangled_name metadata to ensure a weak anti-dependency symbol is
806 // emitted for the alias as well. Do this early, before handling
807 // hybrid_patchable functions, to avoid mangling their aliases.
808 for (GlobalAlias &A : Mod.aliases()) {
809 auto F = dyn_cast_or_null<Function>(A.getAliaseeObject());
810 if (!F)
811 continue;
812 if (std::optional<std::string> MangledName =
814 F->addMetadata("arm64ec_unmangled_name",
816 MDString::get(M->getContext(), A.getName())));
817 A.setName(MangledName.value());
818 }
819 }
820
821 DenseMap<GlobalAlias *, GlobalAlias *> FnsMap;
822 SetVector<GlobalAlias *> PatchableFns;
823
824 for (Function &F : Mod) {
825 if (F.hasPersonalityFn()) {
826 GlobalValue *PersFn =
827 cast<GlobalValue>(F.getPersonalityFn()->stripPointerCasts());
828 if (PersFn->getValueType() && PersFn->getValueType()->isFunctionTy()) {
829 if (std::optional<std::string> MangledName =
831 PersFn->setName(MangledName.value());
832 }
833 }
834 }
835
836 if (!F.hasFnAttribute(Attribute::HybridPatchable) ||
837 F.isDeclarationForLinker() || F.hasLocalLinkage() ||
838 F.getName().ends_with(HybridPatchableTargetSuffix))
839 continue;
840
841 // Rename hybrid patchable functions and change callers to use a global
842 // alias instead.
843 if (std::optional<std::string> MangledName =
845 std::string OrigName(F.getName());
846 F.setName(MangledName.value() + HybridPatchableTargetSuffix);
847
848 // The unmangled symbol is a weak alias to an undefined symbol with the
849 // "EXP+" prefix. This undefined symbol is resolved by the linker by
850 // creating an x86 thunk that jumps back to the actual EC target. Since we
851 // can't represent that in IR, we create an alias to the target instead.
852 // The "EXP+" symbol is set as metadata, which is then used by
853 // emitGlobalAlias to emit the right alias.
854 auto *A =
857 MangledName.value(), &F);
858 F.replaceUsesWithIf(AM,
859 [](Use &U) { return isa<GlobalAlias>(U.getUser()); });
860 F.replaceAllUsesWith(A);
861 F.setMetadata("arm64ec_exp_name",
864 "EXP+" + MangledName.value())));
865 A->setAliasee(&F);
866 AM->setAliasee(&F);
867
868 if (F.hasDLLExportStorageClass()) {
869 A->setDLLStorageClass(GlobalValue::DLLExportStorageClass);
870 F.setDLLStorageClass(GlobalValue::DefaultStorageClass);
871 }
872
873 FnsMap[A] = AM;
874 PatchableFns.insert(A);
875 }
876 }
877
878 SetVector<GlobalValue *> DirectCalledFns;
879 for (Function &F : Mod)
880 if (!F.isDeclarationForLinker() &&
881 F.getCallingConv() != CallingConv::ARM64EC_Thunk_Native &&
882 F.getCallingConv() != CallingConv::ARM64EC_Thunk_X64)
883 processFunction(F, DirectCalledFns, FnsMap);
884
885 struct ThunkInfo {
886 Constant *Src;
887 Constant *Dst;
889 };
890 SmallVector<ThunkInfo> ThunkMapping;
891 for (Function &F : Mod) {
892 if (!F.isDeclarationForLinker() &&
893 (!F.hasLocalLinkage() || F.hasAddressTaken()) &&
894 F.getCallingConv() != CallingConv::ARM64EC_Thunk_Native &&
895 F.getCallingConv() != CallingConv::ARM64EC_Thunk_X64) {
896 if (!F.hasComdat())
897 F.setComdat(Mod.getOrInsertComdat(F.getName()));
898 ThunkMapping.push_back(
899 {&F, buildEntryThunk(&F), Arm64ECThunkType::Entry});
900 }
901 }
902 for (GlobalValue *O : DirectCalledFns) {
903 auto GA = dyn_cast<GlobalAlias>(O);
904 auto F = dyn_cast<Function>(GA ? GA->getAliasee() : O);
905 ThunkMapping.push_back(
906 {O, buildExitThunk(F->getFunctionType(), F->getAttributes()),
907 Arm64ECThunkType::Exit});
908 if (!GA && !F->hasDLLImportStorageClass())
909 ThunkMapping.push_back(
910 {buildGuestExitThunk(F), F, Arm64ECThunkType::GuestExit});
911 }
912 for (GlobalAlias *A : PatchableFns) {
913 Function *Thunk = buildPatchableThunk(A, FnsMap[A]);
914 ThunkMapping.push_back({Thunk, A, Arm64ECThunkType::GuestExit});
915 }
916
917 if (!ThunkMapping.empty()) {
918 SmallVector<Constant *> ThunkMappingArrayElems;
919 for (ThunkInfo &Thunk : ThunkMapping) {
920 ThunkMappingArrayElems.push_back(ConstantStruct::getAnon(
921 {Thunk.Src, Thunk.Dst,
922 ConstantInt::get(M->getContext(), APInt(32, uint8_t(Thunk.Kind)))}));
923 }
924 Constant *ThunkMappingArray = ConstantArray::get(
925 llvm::ArrayType::get(ThunkMappingArrayElems[0]->getType(),
926 ThunkMappingArrayElems.size()),
927 ThunkMappingArrayElems);
928 new GlobalVariable(Mod, ThunkMappingArray->getType(), /*isConstant*/ false,
929 GlobalValue::ExternalLinkage, ThunkMappingArray,
930 "llvm.arm64ec.symbolmap");
931 }
932
933 return true;
934}
935
936bool AArch64Arm64ECCallLowering::processFunction(
937 Function &F, SetVector<GlobalValue *> &DirectCalledFns,
938 DenseMap<GlobalAlias *, GlobalAlias *> &FnsMap) {
939 SmallVector<CallBase *, 8> IndirectCalls;
940
941 // For ARM64EC targets, a function definition's name is mangled differently
942 // from the normal symbol. We currently have no representation of this sort
943 // of symbol in IR, so we change the name to the mangled name, then store
944 // the unmangled name as metadata. Later passes that need the unmangled
945 // name (emitting the definition) can grab it from the metadata.
946 //
947 // FIXME: Handle functions with weak linkage?
948 if (!F.hasLocalLinkage() || F.hasAddressTaken()) {
949 if (std::optional<std::string> MangledName =
951 F.addMetadata("arm64ec_unmangled_name",
953 MDString::get(M->getContext(), F.getName())));
954 if (F.hasComdat() && F.getComdat()->getName() == F.getName()) {
955 Comdat *MangledComdat = M->getOrInsertComdat(MangledName.value());
956 SmallVector<GlobalObject *> ComdatUsers =
957 to_vector(F.getComdat()->getUsers());
958 for (GlobalObject *User : ComdatUsers)
959 User->setComdat(MangledComdat);
960 }
961 F.setName(MangledName.value());
962 }
963 }
964
965 // Iterate over the instructions to find all indirect call/invoke/callbr
966 // instructions. Make a separate list of pointers to indirect
967 // call/invoke/callbr instructions because the original instructions will be
968 // deleted as the checks are added.
969 for (BasicBlock &BB : F) {
970 for (Instruction &I : BB) {
971 auto *CB = dyn_cast<CallBase>(&I);
972 if (!CB || CB->getCallingConv() == CallingConv::ARM64EC_Thunk_X64 ||
973 CB->isInlineAsm())
974 continue;
975
976 // We need to instrument any call that isn't directly calling an
977 // ARM64 function.
978 //
979 // FIXME: getCalledFunction() fails if there's a bitcast (e.g.
980 // unprototyped functions in C)
981 if (Function *F = CB->getCalledFunction()) {
982 if (!LowerDirectToIndirect || F->hasLocalLinkage() ||
983 F->isIntrinsic() || !F->isDeclarationForLinker())
984 continue;
985
986 DirectCalledFns.insert(F);
987 continue;
988 }
989
990 // Use mangled global alias for direct calls to patchable functions.
991 if (GlobalAlias *A = dyn_cast<GlobalAlias>(CB->getCalledOperand())) {
992 auto I = FnsMap.find(A);
993 if (I != FnsMap.end()) {
994 CB->setCalledOperand(I->second);
995 DirectCalledFns.insert(I->first);
996 continue;
997 }
998 }
999
1000 IndirectCalls.push_back(CB);
1001 ++Arm64ECCallsLowered;
1002 }
1003 }
1004
1005 if (IndirectCalls.empty())
1006 return false;
1007
1008 for (CallBase *CB : IndirectCalls)
1009 lowerCall(CB);
1010
1011 return true;
1012}
1013
1014char AArch64Arm64ECCallLowering::ID = 0;
1015INITIALIZE_PASS(AArch64Arm64ECCallLowering, "Arm64ECCallLowering",
1016 "AArch64Arm64ECCallLowering", false, false)
1017
1019 return new AArch64Arm64ECCallLowering;
1020}
static cl::opt< bool > LowerDirectToIndirect("arm64ec-lower-direct-to-indirect", cl::Hidden, cl::init(true))
static cl::opt< bool > GenerateThunks("arm64ec-generate-thunks", cl::Hidden, cl::init(true))
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Module.h This file contains the declarations for the Module class.
lazy value info
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
Machine Check Debug Module
#define T
static bool processFunction(Function &F, NVPTXTargetMachine &TM)
if(auto Err=PB.parsePassPipeline(MPM, Passes)) return wrap(std MPM run * Mod
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition PassSupport.h:56
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallString class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
LLVM_ABI Type * getValueAsType() const
Return the attribute's value as a Type.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
bool isInlineAsm() const
Check if this call is an inline asm statement.
void setCallingConv(CallingConv::ID CC)
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool hasFnAttr(Attribute::AttrKind Kind) const
Determine whether this call has the given attribute.
CallingConv::ID getCallingConv() const
Value * getCalledOperand() const
FunctionType * getFunctionType() const
void setCalledOperand(Value *V)
AttributeList getAttributes() const
Return the attributes for this call.
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
void setTailCallKind(TailCallKind TCK)
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static Constant * getAnon(ArrayRef< Constant * > V, bool Packed=false)
Return an anonymous struct that has the specified elements.
Definition Constants.h:486
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:165
iterator end()
Definition DenseMap.h:81
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition Function.h:166
LLVM_ABI void setAliasee(Constant *Aliasee)
These methods retrieve and set alias target.
Definition Globals.cpp:629
const Constant * getAliasee() const
Definition GlobalAlias.h:87
static LLVM_ABI GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition Globals.cpp:597
@ DLLExportStorageClass
Function to be accessible from DLL.
Definition GlobalValue.h:77
@ WeakODRLinkage
Same, but only replaced by something equivalent.
Definition GlobalValue.h:58
@ ExternalLinkage
Externally visible function.
Definition GlobalValue.h:53
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
Definition GlobalValue.h:56
Type * getValueType() const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1561
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Definition Metadata.cpp:607
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
Definition Pass.h:255
LLVMContext & getContext() const
Get the global data context.
Definition Module.h:285
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
Definition Module.cpp:230
Comdat * getOrInsertComdat(StringRef Name)
Return the Comdat in the module with the specified name.
Definition Module.cpp:611
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition Module.h:278
GlobalVariable * getOrInsertGlobal(StringRef Name, Type *Ty, function_ref< GlobalVariable *()> CreateGlobalCallback)
Look up the specified global in the module symbol table.
Definition Module.cpp:257
Metadata * getModuleFlag(StringRef Key) const
Return the corresponding value if Key appears in module flags, otherwise return null.
Definition Module.cpp:353
A container for an operand bundle being viewed as a set of values rather than a set of uses.
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:168
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Definition Type.h:153
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
Definition Type.h:142
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
Definition Type.h:156
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition Type.h:258
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
Definition Type.cpp:301
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:390
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
Arm64ECThunkType
Definition COFF.h:860
@ GuestExit
Definition COFF.h:861
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract_or_null(Y &&MD)
Extract a Value from Metadata, allowing null.
Definition Metadata.h:681
@ User
could "use" a pointer
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI std::optional< std::string > getArm64ECMangledFunctionName(StringRef Name)
Returns the ARM64EC mangled function name unless the input is already mangled.
Definition Mangler.cpp:294
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
Definition STLExtras.h:841
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:759
ModulePass * createAArch64Arm64ECCallLoweringPass()
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
SmallVector< ValueTypeFromRangeType< R >, Size > to_vector(R &&Range)
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vecto...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
constexpr std::string_view HybridPatchableTargetSuffix
Definition Mangler.h:37
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
OperandBundleDefT< Value * > OperandBundleDef
Definition AutoUpgrade.h:34
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:363
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:85