Thanks to visit codestin.com
Credit goes to llvm.org

LLVM 22.0.0git
PreISelIntrinsicLowering.cpp
Go to the documentation of this file.
1//===- PreISelIntrinsicLowering.cpp - Pre-ISel intrinsic lowering pass ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass implements IR lowering for the llvm.memcpy, llvm.memmove,
10// llvm.memset, llvm.load.relative and llvm.objc.* intrinsics.
11//
12//===----------------------------------------------------------------------===//
13
20#include "llvm/CodeGen/Passes.h"
23#include "llvm/IR/Function.h"
24#include "llvm/IR/IRBuilder.h"
27#include "llvm/IR/Module.h"
29#include "llvm/IR/Type.h"
30#include "llvm/IR/Use.h"
32#include "llvm/Pass.h"
39
40using namespace llvm;
41
42/// Threshold to leave statically sized memory intrinsic calls. Calls of known
43/// size larger than this will be expanded by the pass. Calls of unknown or
44/// lower size will be left for expansion in codegen.
46 "mem-intrinsic-expand-size",
47 cl::desc("Set minimum mem intrinsic size to expand in IR"), cl::init(-1),
49
50namespace {
51
52struct PreISelIntrinsicLowering {
53 const TargetMachine *TM;
54 const function_ref<TargetTransformInfo &(Function &)> LookupTTI;
55 const function_ref<TargetLibraryInfo &(Function &)> LookupTLI;
56
57 /// If this is true, assume it's preferably to leave memory intrinsic calls
58 /// for replacement with a library call later. Otherwise this depends on
59 /// TargetLoweringInfo availability of the corresponding function.
60 const bool UseMemIntrinsicLibFunc;
61
62 explicit PreISelIntrinsicLowering(
63 const TargetMachine *TM_,
66 bool UseMemIntrinsicLibFunc_ = true)
67 : TM(TM_), LookupTTI(LookupTTI_), LookupTLI(LookupTLI_),
68 UseMemIntrinsicLibFunc(UseMemIntrinsicLibFunc_) {}
69
70 static bool shouldExpandMemIntrinsicWithSize(Value *Size,
71 const TargetTransformInfo &TTI);
72 bool
73 expandMemIntrinsicUses(Function &F,
74 DenseMap<Constant *, GlobalVariable *> &CMap) const;
75 bool lowerIntrinsics(Module &M) const;
76};
77
78} // namespace
79
80template <class T> static bool forEachCall(Function &Intrin, T Callback) {
81 // Lowering all intrinsics in a function will delete multiple uses, so we
82 // can't use an early-inc-range. In case some remain, we don't want to look
83 // at them again. Unfortunately, Value::UseList is private, so we can't use a
84 // simple Use**. If LastUse is null, the next use to consider is
85 // Intrin.use_begin(), otherwise it's LastUse->getNext().
86 Use *LastUse = nullptr;
87 bool Changed = false;
88 while (!Intrin.use_empty() && (!LastUse || LastUse->getNext())) {
89 Use *U = LastUse ? LastUse->getNext() : &*Intrin.use_begin();
90 bool Removed = false;
91 // An intrinsic cannot have its address taken, so it cannot be an argument
92 // operand. It might be used as operand in debug metadata, though.
93 if (auto CI = dyn_cast<CallInst>(U->getUser()))
94 Changed |= Removed = Callback(CI);
95 if (!Removed)
96 LastUse = U;
97 }
98 return Changed;
99}
100
102 if (F.use_empty())
103 return false;
104
105 bool Changed = false;
106 Type *Int32Ty = Type::getInt32Ty(F.getContext());
107
108 for (Use &U : llvm::make_early_inc_range(F.uses())) {
109 auto CI = dyn_cast<CallInst>(U.getUser());
110 if (!CI || CI->getCalledOperand() != &F)
111 continue;
112
113 IRBuilder<> B(CI);
114 Value *OffsetPtr =
115 B.CreatePtrAdd(CI->getArgOperand(0), CI->getArgOperand(1));
116 Value *OffsetI32 = B.CreateAlignedLoad(Int32Ty, OffsetPtr, Align(4));
117
118 Value *ResultPtr = B.CreatePtrAdd(CI->getArgOperand(0), OffsetI32);
119
120 CI->replaceAllUsesWith(ResultPtr);
121 CI->eraseFromParent();
122 Changed = true;
123 }
124
125 return Changed;
126}
127
128// ObjCARC has knowledge about whether an obj-c runtime function needs to be
129// always tail-called or never tail-called.
138
139static bool lowerObjCCall(Function &F, RTLIB::LibcallImpl NewFn,
140 bool setNonLazyBind = false) {
142 "Pre-ISel intrinsics do lower into regular function calls");
143 if (F.use_empty())
144 return false;
145
146 // FIXME: When RuntimeLibcalls is an analysis, check if the function is really
147 // supported, and go through RTLIB::Libcall.
149
150 // If we haven't already looked up this function, check to see if the
151 // program already contains a function with this name.
152 Module *M = F.getParent();
153 FunctionCallee FCache =
154 M->getOrInsertFunction(NewFnName, F.getFunctionType());
155
156 if (Function *Fn = dyn_cast<Function>(FCache.getCallee())) {
157 Fn->setLinkage(F.getLinkage());
158 if (setNonLazyBind && !Fn->isWeakForLinker()) {
159 // If we have Native ARC, set nonlazybind attribute for these APIs for
160 // performance.
161 Fn->addFnAttr(Attribute::NonLazyBind);
162 }
163 }
164
166
167 for (Use &U : llvm::make_early_inc_range(F.uses())) {
168 auto *CB = cast<CallBase>(U.getUser());
169
170 if (CB->getCalledFunction() != &F) {
172 "use expected to be the argument of operand bundle "
173 "\"clang.arc.attachedcall\"");
174 U.set(FCache.getCallee());
175 continue;
176 }
177
178 auto *CI = cast<CallInst>(CB);
179 assert(CI->getCalledFunction() && "Cannot lower an indirect call!");
180
181 IRBuilder<> Builder(CI->getParent(), CI->getIterator());
182 SmallVector<Value *, 8> Args(CI->args());
184 CI->getOperandBundlesAsDefs(BundleList);
185 CallInst *NewCI = Builder.CreateCall(FCache, Args, BundleList);
186 NewCI->setName(CI->getName());
187
188 // Try to set the most appropriate TailCallKind based on both the current
189 // attributes and the ones that we could get from ObjCARC's special
190 // knowledge of the runtime functions.
191 //
192 // std::max respects both requirements of notail and tail here:
193 // * notail on either the call or from ObjCARC becomes notail
194 // * tail on either side is stronger than none, but not notail
195 CallInst::TailCallKind TCK = CI->getTailCallKind();
196 NewCI->setTailCallKind(std::max(TCK, OverridingTCK));
197
198 // Transfer the 'returned' attribute from the intrinsic to the call site.
199 // By applying this only to intrinsic call sites, we avoid applying it to
200 // non-ARC explicit calls to things like objc_retain which have not been
201 // auto-upgraded to use the intrinsics.
202 unsigned Index;
203 if (F.getAttributes().hasAttrSomewhere(Attribute::Returned, &Index) &&
204 Index)
205 NewCI->addParamAttr(Index - AttributeList::FirstArgIndex,
206 Attribute::Returned);
207
208 if (!CI->use_empty())
209 CI->replaceAllUsesWith(NewCI);
210 CI->eraseFromParent();
211 }
212
213 return true;
214}
215
216// TODO: Should refine based on estimated number of accesses (e.g. does it
217// require splitting based on alignment)
218bool PreISelIntrinsicLowering::shouldExpandMemIntrinsicWithSize(
220 ConstantInt *CI = dyn_cast<ConstantInt>(Size);
221 if (!CI)
222 return true;
223 uint64_t Threshold = MemIntrinsicExpandSizeThresholdOpt.getNumOccurrences()
226 uint64_t SizeVal = CI->getZExtValue();
227
228 // Treat a threshold of 0 as a special case to force expansion of all
229 // intrinsics, including size 0.
230 return SizeVal > Threshold || Threshold == 0;
231}
232
233static bool canEmitLibcall(const TargetMachine *TM, Function *F,
234 RTLIB::Libcall LC) {
235 // TODO: Should this consider the address space of the memcpy?
236 if (!TM)
237 return true;
238 const TargetLowering *TLI = TM->getSubtargetImpl(*F)->getTargetLowering();
239 return TLI->getLibcallName(LC) != nullptr;
240}
241
242static bool canEmitMemcpy(const TargetMachine *TM, Function *F) {
243 // TODO: Should this consider the address space of the memcpy?
244 if (!TM)
245 return true;
246 const TargetLowering *TLI = TM->getSubtargetImpl(*F)->getTargetLowering();
247 return TLI->getMemcpyName() != nullptr;
248}
249
250// Return a value appropriate for use with the memset_pattern16 libcall, if
251// possible and if we know how. (Adapted from equivalent helper in
252// LoopIdiomRecognize).
254 const TargetLibraryInfo &TLI) {
255 // TODO: This could check for UndefValue because it can be merged into any
256 // other valid pattern.
257
258 // Don't emit libcalls if a non-default address space is being used.
259 if (Inst->getRawDest()->getType()->getPointerAddressSpace() != 0)
260 return nullptr;
261
262 Value *V = Inst->getValue();
263 Type *VTy = V->getType();
264 const DataLayout &DL = Inst->getDataLayout();
265 Module *M = Inst->getModule();
266
267 if (!isLibFuncEmittable(M, &TLI, LibFunc_memset_pattern16))
268 return nullptr;
269
270 // If the value isn't a constant, we can't promote it to being in a constant
271 // array. We could theoretically do a store to an alloca or something, but
272 // that doesn't seem worthwhile.
274 if (!C || isa<ConstantExpr>(C))
275 return nullptr;
276
277 // Only handle simple values that are a power of two bytes in size.
278 uint64_t Size = DL.getTypeSizeInBits(VTy);
279 if (!DL.typeSizeEqualsStoreSize(VTy) || !isPowerOf2_64(Size))
280 return nullptr;
281
282 // Don't care enough about darwin/ppc to implement this.
283 if (DL.isBigEndian())
284 return nullptr;
285
286 // Convert to size in bytes.
287 Size /= 8;
288
289 // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see
290 // if the top and bottom are the same (e.g. for vectors and large integers).
291 if (Size > 16)
292 return nullptr;
293
294 // If the constant is exactly 16 bytes, just use it.
295 if (Size == 16)
296 return C;
297
298 // Otherwise, we'll use an array of the constants.
299 uint64_t ArraySize = 16 / Size;
300 ArrayType *AT = ArrayType::get(V->getType(), ArraySize);
301 return ConstantArray::get(AT, std::vector<Constant *>(ArraySize, C));
302}
303
304// TODO: Handle atomic memcpy and memcpy.inline
305// TODO: Pass ScalarEvolution
306bool PreISelIntrinsicLowering::expandMemIntrinsicUses(
307 Function &F, DenseMap<Constant *, GlobalVariable *> &CMap) const {
308 Intrinsic::ID ID = F.getIntrinsicID();
309 bool Changed = false;
310
311 for (User *U : llvm::make_early_inc_range(F.users())) {
313
314 switch (ID) {
315 case Intrinsic::memcpy: {
316 auto *Memcpy = cast<MemCpyInst>(Inst);
317 Function *ParentFunc = Memcpy->getFunction();
318 const TargetTransformInfo &TTI = LookupTTI(*ParentFunc);
319 if (shouldExpandMemIntrinsicWithSize(Memcpy->getLength(), TTI)) {
320 if (UseMemIntrinsicLibFunc && canEmitMemcpy(TM, ParentFunc))
321 break;
322
323 // TODO: For optsize, emit the loop into a separate function
324 expandMemCpyAsLoop(Memcpy, TTI);
325 Changed = true;
326 Memcpy->eraseFromParent();
327 }
328
329 break;
330 }
331 case Intrinsic::memcpy_inline: {
332 // Only expand llvm.memcpy.inline with non-constant length in this
333 // codepath, leaving the current SelectionDAG expansion for constant
334 // length memcpy intrinsics undisturbed.
335 auto *Memcpy = cast<MemCpyInst>(Inst);
336 if (isa<ConstantInt>(Memcpy->getLength()))
337 break;
338
339 Function *ParentFunc = Memcpy->getFunction();
340 const TargetTransformInfo &TTI = LookupTTI(*ParentFunc);
341 expandMemCpyAsLoop(Memcpy, TTI);
342 Changed = true;
343 Memcpy->eraseFromParent();
344 break;
345 }
346 case Intrinsic::memmove: {
347 auto *Memmove = cast<MemMoveInst>(Inst);
348 Function *ParentFunc = Memmove->getFunction();
349 const TargetTransformInfo &TTI = LookupTTI(*ParentFunc);
350 if (shouldExpandMemIntrinsicWithSize(Memmove->getLength(), TTI)) {
351 if (UseMemIntrinsicLibFunc &&
352 canEmitLibcall(TM, ParentFunc, RTLIB::MEMMOVE))
353 break;
354
355 if (expandMemMoveAsLoop(Memmove, TTI)) {
356 Changed = true;
357 Memmove->eraseFromParent();
358 }
359 }
360
361 break;
362 }
363 case Intrinsic::memset: {
364 auto *Memset = cast<MemSetInst>(Inst);
365 Function *ParentFunc = Memset->getFunction();
366 const TargetTransformInfo &TTI = LookupTTI(*ParentFunc);
367 if (shouldExpandMemIntrinsicWithSize(Memset->getLength(), TTI)) {
368 if (UseMemIntrinsicLibFunc &&
369 canEmitLibcall(TM, ParentFunc, RTLIB::MEMSET))
370 break;
371
372 expandMemSetAsLoop(Memset);
373 Changed = true;
374 Memset->eraseFromParent();
375 }
376
377 break;
378 }
379 case Intrinsic::memset_inline: {
380 // Only expand llvm.memset.inline with non-constant length in this
381 // codepath, leaving the current SelectionDAG expansion for constant
382 // length memset intrinsics undisturbed.
383 auto *Memset = cast<MemSetInst>(Inst);
384 if (isa<ConstantInt>(Memset->getLength()))
385 break;
386
387 expandMemSetAsLoop(Memset);
388 Changed = true;
389 Memset->eraseFromParent();
390 break;
391 }
392 case Intrinsic::experimental_memset_pattern: {
393 auto *Memset = cast<MemSetPatternInst>(Inst);
394 const TargetLibraryInfo &TLI = LookupTLI(*Memset->getFunction());
395 Constant *PatternValue = getMemSetPattern16Value(Memset, TLI);
396 if (!PatternValue) {
397 // If it isn't possible to emit a memset_pattern16 libcall, expand to
398 // a loop instead.
400 Changed = true;
401 Memset->eraseFromParent();
402 break;
403 }
404 // FIXME: There is currently no profitability calculation for emitting
405 // the libcall vs expanding the memset.pattern directly.
406 IRBuilder<> Builder(Inst);
407 Module *M = Memset->getModule();
408 const DataLayout &DL = Memset->getDataLayout();
409
410 Type *DestPtrTy = Memset->getRawDest()->getType();
411 Type *SizeTTy = TLI.getSizeTType(*M);
412 StringRef FuncName = "memset_pattern16";
413 FunctionCallee MSP = getOrInsertLibFunc(M, TLI, LibFunc_memset_pattern16,
414 Builder.getVoidTy(), DestPtrTy,
415 Builder.getPtrTy(), SizeTTy);
416 inferNonMandatoryLibFuncAttrs(M, FuncName, TLI);
417
418 // Otherwise we should form a memset_pattern16. PatternValue is known
419 // to be an constant array of 16-bytes. Put the value into a mergable
420 // global.
421 assert(Memset->getRawDest()->getType()->getPointerAddressSpace() == 0 &&
422 "Should have skipped if non-zero AS");
423 GlobalVariable *GV;
424 auto It = CMap.find(PatternValue);
425 if (It != CMap.end()) {
426 GV = It->second;
427 } else {
428 GV = new GlobalVariable(
429 *M, PatternValue->getType(), /*isConstant=*/true,
430 GlobalValue::PrivateLinkage, PatternValue, ".memset_pattern");
431 GV->setUnnamedAddr(
432 GlobalValue::UnnamedAddr::Global); // Ok to merge these.
433 // TODO: Consider relaxing alignment requirement.
434 GV->setAlignment(Align(16));
435 CMap[PatternValue] = GV;
436 }
437 Value *PatternPtr = GV;
438 Value *NumBytes = Builder.CreateMul(
439 TLI.getAsSizeT(DL.getTypeAllocSize(Memset->getValue()->getType()),
440 *M),
441 Builder.CreateZExtOrTrunc(Memset->getLength(), SizeTTy));
442 CallInst *MemsetPattern16Call =
443 Builder.CreateCall(MSP, {Memset->getRawDest(), PatternPtr, NumBytes});
444 MemsetPattern16Call->setAAMetadata(Memset->getAAMetadata());
445 // Preserve any call site attributes on the destination pointer
446 // argument (e.g. alignment).
447 AttrBuilder ArgAttrs(Memset->getContext(),
448 Memset->getAttributes().getParamAttrs(0));
449 MemsetPattern16Call->setAttributes(
450 MemsetPattern16Call->getAttributes().addParamAttributes(
451 Memset->getContext(), 0, ArgAttrs));
452 Changed = true;
453 Memset->eraseFromParent();
454 break;
455 }
456 default:
457 llvm_unreachable("unhandled intrinsic");
458 }
459 }
460
461 return Changed;
462}
463
464bool PreISelIntrinsicLowering::lowerIntrinsics(Module &M) const {
465 // Map unique constants to globals.
466 DenseMap<Constant *, GlobalVariable *> CMap;
467 bool Changed = false;
468 for (Function &F : M) {
469 switch (F.getIntrinsicID()) {
470 default:
471 break;
472 case Intrinsic::memcpy:
473 case Intrinsic::memcpy_inline:
474 case Intrinsic::memmove:
475 case Intrinsic::memset:
476 case Intrinsic::memset_inline:
477 case Intrinsic::experimental_memset_pattern:
478 Changed |= expandMemIntrinsicUses(F, CMap);
479 break;
480 case Intrinsic::load_relative:
482 break;
483 case Intrinsic::is_constant:
484 case Intrinsic::objectsize:
485 Changed |= forEachCall(F, [&](CallInst *CI) {
486 Function *Parent = CI->getParent()->getParent();
487 TargetLibraryInfo &TLI = LookupTLI(*Parent);
488 // Intrinsics in unreachable code are not lowered.
489 bool Changed = lowerConstantIntrinsics(*Parent, TLI, /*DT=*/nullptr);
490 return Changed;
491 });
492 break;
493#define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, VLENPOS) \
494 case Intrinsic::VPID:
495#include "llvm/IR/VPIntrinsics.def"
496 forEachCall(F, [&](CallInst *CI) {
497 Function *Parent = CI->getParent()->getParent();
498 const TargetTransformInfo &TTI = LookupTTI(*Parent);
499 auto *VPI = cast<VPIntrinsic>(CI);
501 // Expansion of VP intrinsics may change the IR but not actually
502 // replace the intrinsic, so update Changed for the pass
503 // and compute Removed for forEachCall.
504 Changed |= ED != VPExpansionDetails::IntrinsicUnchanged;
505 bool Removed = ED == VPExpansionDetails::IntrinsicReplaced;
506 return Removed;
507 });
508 break;
509 case Intrinsic::objc_autorelease:
510 Changed |= lowerObjCCall(F, RTLIB::impl_objc_autorelease);
511 break;
512 case Intrinsic::objc_autoreleasePoolPop:
513 Changed |= lowerObjCCall(F, RTLIB::impl_objc_autoreleasePoolPop);
514 break;
515 case Intrinsic::objc_autoreleasePoolPush:
516 Changed |= lowerObjCCall(F, RTLIB::impl_objc_autoreleasePoolPush);
517 break;
518 case Intrinsic::objc_autoreleaseReturnValue:
519 Changed |= lowerObjCCall(F, RTLIB::impl_objc_autoreleaseReturnValue);
520 break;
521 case Intrinsic::objc_copyWeak:
522 Changed |= lowerObjCCall(F, RTLIB::impl_objc_copyWeak);
523 break;
524 case Intrinsic::objc_destroyWeak:
525 Changed |= lowerObjCCall(F, RTLIB::impl_objc_destroyWeak);
526 break;
527 case Intrinsic::objc_initWeak:
528 Changed |= lowerObjCCall(F, RTLIB::impl_objc_initWeak);
529 break;
530 case Intrinsic::objc_loadWeak:
531 Changed |= lowerObjCCall(F, RTLIB::impl_objc_loadWeak);
532 break;
533 case Intrinsic::objc_loadWeakRetained:
534 Changed |= lowerObjCCall(F, RTLIB::impl_objc_loadWeakRetained);
535 break;
536 case Intrinsic::objc_moveWeak:
537 Changed |= lowerObjCCall(F, RTLIB::impl_objc_moveWeak);
538 break;
539 case Intrinsic::objc_release:
540 Changed |= lowerObjCCall(F, RTLIB::impl_objc_release, true);
541 break;
542 case Intrinsic::objc_retain:
543 Changed |= lowerObjCCall(F, RTLIB::impl_objc_retain, true);
544 break;
545 case Intrinsic::objc_retainAutorelease:
546 Changed |= lowerObjCCall(F, RTLIB::impl_objc_retainAutorelease);
547 break;
548 case Intrinsic::objc_retainAutoreleaseReturnValue:
549 Changed |=
550 lowerObjCCall(F, RTLIB::impl_objc_retainAutoreleaseReturnValue);
551 break;
552 case Intrinsic::objc_retainAutoreleasedReturnValue:
553 Changed |=
554 lowerObjCCall(F, RTLIB::impl_objc_retainAutoreleasedReturnValue);
555 break;
556 case Intrinsic::objc_claimAutoreleasedReturnValue:
557 Changed |=
558 lowerObjCCall(F, RTLIB::impl_objc_claimAutoreleasedReturnValue);
559 break;
560 case Intrinsic::objc_retainBlock:
561 Changed |= lowerObjCCall(F, RTLIB::impl_objc_retainBlock);
562 break;
563 case Intrinsic::objc_storeStrong:
564 Changed |= lowerObjCCall(F, RTLIB::impl_objc_storeStrong);
565 break;
566 case Intrinsic::objc_storeWeak:
567 Changed |= lowerObjCCall(F, RTLIB::impl_objc_storeWeak);
568 break;
569 case Intrinsic::objc_unsafeClaimAutoreleasedReturnValue:
570 Changed |=
571 lowerObjCCall(F, RTLIB::impl_objc_unsafeClaimAutoreleasedReturnValue);
572 break;
573 case Intrinsic::objc_retainedObject:
574 Changed |= lowerObjCCall(F, RTLIB::impl_objc_retainedObject);
575 break;
576 case Intrinsic::objc_unretainedObject:
577 Changed |= lowerObjCCall(F, RTLIB::impl_objc_unretainedObject);
578 break;
579 case Intrinsic::objc_unretainedPointer:
580 Changed |= lowerObjCCall(F, RTLIB::impl_objc_unretainedPointer);
581 break;
582 case Intrinsic::objc_retain_autorelease:
583 Changed |= lowerObjCCall(F, RTLIB::impl_objc_retain_autorelease);
584 break;
585 case Intrinsic::objc_sync_enter:
586 Changed |= lowerObjCCall(F, RTLIB::impl_objc_sync_enter);
587 break;
588 case Intrinsic::objc_sync_exit:
589 Changed |= lowerObjCCall(F, RTLIB::impl_objc_sync_exit);
590 break;
591 case Intrinsic::exp:
592 case Intrinsic::exp2:
593 case Intrinsic::log:
594 Changed |= forEachCall(F, [&](CallInst *CI) {
595 Type *Ty = CI->getArgOperand(0)->getType();
597 return false;
598 const TargetLowering *TL = TM->getSubtargetImpl(F)->getTargetLowering();
599 unsigned Op = TL->IntrinsicIDToISD(F.getIntrinsicID());
600 assert(Op != ISD::DELETED_NODE && "unsupported intrinsic");
601 if (!TL->isOperationExpand(Op, EVT::getEVT(Ty)))
602 return false;
604 });
605 break;
606 }
607 }
608 return Changed;
609}
610
611namespace {
612
613class PreISelIntrinsicLoweringLegacyPass : public ModulePass {
614public:
615 static char ID;
616
617 PreISelIntrinsicLoweringLegacyPass() : ModulePass(ID) {}
618
619 void getAnalysisUsage(AnalysisUsage &AU) const override {
620 AU.addRequired<TargetTransformInfoWrapperPass>();
621 AU.addRequired<TargetLibraryInfoWrapperPass>();
622 AU.addRequired<TargetPassConfig>();
623 }
624
625 bool runOnModule(Module &M) override {
626 auto LookupTTI = [this](Function &F) -> TargetTransformInfo & {
627 return this->getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
628 };
629 auto LookupTLI = [this](Function &F) -> TargetLibraryInfo & {
630 return this->getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
631 };
632
633 const auto *TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
634 PreISelIntrinsicLowering Lowering(TM, LookupTTI, LookupTLI);
635 return Lowering.lowerIntrinsics(M);
636 }
637};
638
639} // end anonymous namespace
640
641char PreISelIntrinsicLoweringLegacyPass::ID;
642
643INITIALIZE_PASS_BEGIN(PreISelIntrinsicLoweringLegacyPass,
644 "pre-isel-intrinsic-lowering",
645 "Pre-ISel Intrinsic Lowering", false, false)
649INITIALIZE_PASS_END(PreISelIntrinsicLoweringLegacyPass,
650 "pre-isel-intrinsic-lowering",
651 "Pre-ISel Intrinsic Lowering", false, false)
652
654 return new PreISelIntrinsicLoweringLegacyPass();
655}
656
659 auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
660
661 auto LookupTTI = [&FAM](Function &F) -> TargetTransformInfo & {
662 return FAM.getResult<TargetIRAnalysis>(F);
663 };
664 auto LookupTLI = [&FAM](Function &F) -> TargetLibraryInfo & {
665 return FAM.getResult<TargetLibraryAnalysis>(F);
666 };
667
668 PreISelIntrinsicLowering Lowering(TM, LookupTTI, LookupTLI);
669 if (!Lowering.lowerIntrinsics(M))
670 return PreservedAnalyses::all();
671 else
673}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool setNonLazyBind(Function &F)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Module.h This file contains the declarations for the Module class.
This defines the Use class.
The header file for the LowerConstantIntrinsics pass as used by the new pass manager.
#define F(x, y, z)
Definition MD5.cpp:55
Machine Check Debug Module
#define T
This file defines ARC utility functions which are used by various parts of the compiler.
FunctionAnalysisManager FAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
static cl::opt< int64_t > MemIntrinsicExpandSizeThresholdOpt("mem-intrinsic-expand-size", cl::desc("Set minimum mem intrinsic size to expand in IR"), cl::init(-1), cl::Hidden)
Threshold to leave statically sized memory intrinsic calls.
static bool forEachCall(Function &Intrin, T Callback)
pre isel intrinsic Pre ISel Intrinsic Lowering
static bool canEmitLibcall(const TargetMachine *TM, Function *F, RTLIB::Libcall LC)
static CallInst::TailCallKind getOverridingTailCallKind(const Function &F)
static Constant * getMemSetPattern16Value(MemSetPatternInst *Inst, const TargetLibraryInfo &TLI)
static bool canEmitMemcpy(const TargetMachine *TM, Function *F)
static bool lowerObjCCall(Function &F, RTLIB::LibcallImpl NewFn, bool setNonLazyBind=false)
static bool lowerLoadRelative(Function &F)
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
This pass exposes codegen information to IR-level passes.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
AnalysisUsage & addRequired()
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
void setAttributes(AttributeList A)
Set the attributes for this call.
Value * getArgOperand(unsigned i) const
AttributeList getAttributes() const
Return the attributes for this call.
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
This class represents a function call, abstracting a target machine's calling convention.
void setTailCallKind(TailCallKind TCK)
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:163
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:165
iterator end()
Definition DenseMap.h:81
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
const Function & getFunction() const
Definition Function.h:164
void setUnnamedAddr(UnnamedAddr Val)
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
void setAlignment(Align Align)
Sets the alignment attribute of the GlobalVariable.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2780
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
static LLVM_ABI bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Value * getRawDest() const
Value * getValue() const
This class wraps the llvm.experimental.memset.pattern intrinsic.
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
Definition Pass.h:255
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition Analysis.h:115
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
ConstantInt * getAsSizeT(uint64_t V, const Module &M) const
Returns a constant materialized as a size_t type.
IntegerType * getSizeTType(const Module &M) const
Returns an IntegerType corresponding to size_t.
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
const char * getMemcpyName() const
int IntrinsicIDToISD(Intrinsic::ID ID) const
Get the ISD node that corresponds to the Intrinsic ID.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
Target-Independent Code Generator Pass Configuration Options.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
LLVM_ABI uint64_t getMaxMemIntrinsicInlineSizeThreshold() const
Returns the maximum memset / memcpy size in bytes that still makes it profitable to inline the call.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:297
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
Definition Value.cpp:390
use_iterator use_begin()
Definition Value.h:364
bool use_empty() const
Definition Value.h:346
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
Definition ISDOpcodes.h:45
initializer< Ty > init(const Ty &Val)
bool IsNeverTail(ARCInstKind Class)
Test if the given class represents instructions which are never safe to mark with the "tail" keyword.
bool IsAlwaysTail(ARCInstKind Class)
Test if the given class represents instructions which are always safe to mark with the "tail" keyword...
ARCInstKind
Equivalence classes of instructions in the ARC Model.
ARCInstKind GetFunctionClass(const Function *F)
Determine if F is one of the special known Functions.
std::optional< Function * > getAttachedARCFunction(const CallBase *CB)
This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...
Definition ObjCARCUtil.h:43
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool lowerUnaryVectorIntrinsicAsLoop(Module &M, CallInst *CI)
Lower CI as a loop.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
bool lowerConstantIntrinsics(Function &F, const TargetLibraryInfo &TLI, DominatorTree *DT)
LLVM_ABI void expandMemSetPatternAsLoop(MemSetPatternInst *MemSet)
Expand MemSetPattern as a loop. MemSet is not deleted.
LLVM_ABI bool expandMemMoveAsLoop(MemMoveInst *MemMove, const TargetTransformInfo &TTI)
Expand MemMove as a loop.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:293
LLVM_ABI bool inferNonMandatoryLibFuncAttrs(Module *M, StringRef Name, const TargetLibraryInfo &TLI)
Analyze the name and prototype of the given function and set any applicable attributes.
LLVM_ABI bool isLibFuncEmittable(const Module *M, const TargetLibraryInfo *TLI, LibFunc TheLibFunc)
Check whether the library function is available on target and also that it in the current Module is a...
LLVM_ABI ModulePass * createPreISelIntrinsicLoweringPass()
This pass lowers the @llvm.load.relative and @llvm.objc.
LLVM_ABI FunctionCallee getOrInsertLibFunc(Module *M, const TargetLibraryInfo &TLI, LibFunc TheLibFunc, FunctionType *T, AttributeList AttributeList)
Calls getOrInsertFunction() and then makes sure to add mandatory argument attributes.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
VPExpansionDetails expandVectorPredicationIntrinsic(VPIntrinsic &VPI, const TargetTransformInfo &TTI)
Expand a vector predication intrinsic.
TargetTransformInfo TTI
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
LLVM_ABI void expandMemCpyAsLoop(MemCpyInst *MemCpy, const TargetTransformInfo &TTI, ScalarEvolution *SE=nullptr)
Expand MemCpy as a loop. MemCpy is not deleted.
VPExpansionDetails
Represents the details the expansion of a VP intrinsic.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
LLVM_ABI void expandMemSetAsLoop(MemSetInst *MemSet)
Expand MemSet as a loop. MemSet is not deleted.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
static StringRef getLibcallImplName(RTLIB::LibcallImpl CallImpl)
Get the libcall routine name for the specified libcall implementation.