95#define DEBUG_TYPE "asan"
101 std::numeric_limits<uint64_t>::max();
142 "__asan_unregister_image_globals";
155 "__asan_stack_malloc_always_";
169 "__asan_option_detect_stack_use_after_return";
172 "__asan_shadow_memory_dynamic_address";
198 "asan-kernel",
cl::desc(
"Enable KernelAddressSanitizer instrumentation"),
203 cl::desc(
"Enable recovery mode (continue-after-error)."),
207 "asan-guard-against-version-mismatch",
213 cl::desc(
"instrument read instructions"),
217 "asan-instrument-writes",
cl::desc(
"instrument write instructions"),
226 "asan-instrument-atomics",
236 "asan-always-slow-path",
241 "asan-force-dynamic-shadow",
242 cl::desc(
"Load shadow address into a local variable for each function"),
247 cl::desc(
"Access dynamic shadow through an ifunc global on "
248 "platforms that support this"),
252 "asan-with-ifunc-suppress-remat",
253 cl::desc(
"Suppress rematerialization of dynamic shadow address by passing "
254 "it through inline asm in prologue."),
262 "asan-max-ins-per-bb",
cl::init(10000),
263 cl::desc(
"maximal number of instructions to instrument in any given BB"),
270 "asan-max-inline-poisoning-size",
272 "Inline shadow poisoning for blocks up to the given size in bytes."),
276 "asan-use-after-return",
277 cl::desc(
"Sets the mode of detection for stack-use-after-return."),
280 "Never detect stack use after return."),
283 "Detect stack use after return if "
284 "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."),
286 "Always detect stack use after return.")),
290 cl::desc(
"Create redzones for byval "
291 "arguments (extra copy "
296 cl::desc(
"Check stack-use-after-scope"),
305 cl::desc(
"Handle C++ initializer order"),
309 "asan-detect-invalid-pointer-pair",
314 "asan-detect-invalid-pointer-cmp",
319 "asan-detect-invalid-pointer-sub",
324 "asan-realign-stack",
325 cl::desc(
"Realign stack to the value of this flag (power of two)"),
329 "asan-instrumentation-with-call-threshold",
330 cl::desc(
"If the function being instrumented contains more than "
331 "this number of memory accesses, use callbacks instead of "
332 "inline checks (-1 means never use callbacks)."),
336 "asan-memory-access-callback-prefix",
341 "asan-kernel-mem-intrinsic-prefix",
347 cl::desc(
"instrument dynamic allocas"),
351 "asan-skip-promotable-allocas",
356 "asan-constructor-kind",
357 cl::desc(
"Sets the ASan constructor kind"),
360 "Use global constructors")),
367 cl::desc(
"scale of asan shadow mapping"),
372 cl::desc(
"offset of asan shadow mapping [EXPERIMENTAL]"),
386 "asan-opt-same-temp",
cl::desc(
"Instrument the same temp just once"),
390 cl::desc(
"Don't instrument scalar globals"),
394 "asan-opt-stack",
cl::desc(
"Don't instrument scalar stack variables"),
398 "asan-stack-dynamic-alloca",
403 "asan-force-experiment",
409 cl::desc(
"Use private aliases for global variables"),
414 cl::desc(
"Use odr indicators to improve ODR reporting"),
419 cl::desc(
"Use linker features to support dead "
420 "code stripping of globals"),
427 cl::desc(
"Place ASan constructors in comdat sections"),
431 "asan-destructor-kind",
432 cl::desc(
"Sets the ASan destructor kind. The default is to use the value "
433 "provided to the pass constructor"),
436 "Use global destructors")),
456STATISTIC(NumInstrumentedReads,
"Number of instrumented reads");
457STATISTIC(NumInstrumentedWrites,
"Number of instrumented writes");
459 "Number of optimized accesses to global vars");
461 "Number of optimized accesses to stack vars");
470struct ShadowMapping {
481 bool IsAndroid = TargetTriple.
isAndroid();
484 bool IsMacOS = TargetTriple.
isMacOSX();
487 bool IsPS = TargetTriple.
isPS();
493 bool IsMIPSN32ABI = TargetTriple.
isABIN32();
494 bool IsMIPS32 = TargetTriple.
isMIPS32();
495 bool IsMIPS64 = TargetTriple.
isMIPS64();
496 bool IsArmOrThumb = TargetTriple.
isARM() || TargetTriple.
isThumb();
503 bool IsAMDGPU = TargetTriple.
isAMDGPU();
505 bool IsWasm = TargetTriple.
isWasm();
507 ShadowMapping Mapping;
514 if (LongSize == 32) {
517 else if (IsMIPSN32ABI)
542 else if (IsFreeBSD && IsAArch64)
544 else if (IsFreeBSD && !IsMIPS64) {
549 }
else if (IsNetBSD) {
556 else if (IsLinux && IsX86_64) {
562 }
else if (IsWindows && IsX86_64) {
568 else if (IsMacOS && IsAArch64)
572 else if (IsLoongArch64)
579 else if (IsHaiku && IsX86_64)
599 Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS &&
600 !IsRISCV64 && !IsLoongArch64 &&
601 !(Mapping.Offset & (Mapping.Offset - 1)) &&
603 bool IsAndroidWithIfuncSupport =
605 Mapping.InGlobal =
ClWithIfunc && IsAndroidWithIfuncSupport && IsArmOrThumb;
613 int *MappingScale,
bool *OrShadowOffset) {
615 *ShadowBase = Mapping.Offset;
616 *MappingScale = Mapping.Scale;
617 *OrShadowOffset = Mapping.OrShadowOffset;
636 if (!
F.doesNotAccessMemory()) {
637 bool WritesMemory = !
F.onlyReadsMemory();
638 bool ReadsMemory = !
F.onlyWritesMemory();
639 if ((WritesMemory && !ReadsMemory) ||
F.onlyAccessesArgMemory()) {
640 F.removeFnAttr(Attribute::Memory);
646 if (
A.hasAttribute(Attribute::WriteOnly)) {
647 A.removeAttr(Attribute::WriteOnly);
655 F.addFnAttr(Attribute::NoBuiltin);
678 return std::max(32U, 1U << MappingScale);
697class RuntimeCallInserter {
699 bool TrackInsertedCalls =
false;
703 RuntimeCallInserter(Function &Fn) : OwnerFn(&Fn) {
705 auto Personality = classifyEHPersonality(Fn.getPersonalityFn());
706 if (isScopedEHPersonality(Personality))
707 TrackInsertedCalls = true;
711 ~RuntimeCallInserter() {
712 if (InsertedCalls.empty())
714 assert(TrackInsertedCalls &&
"Calls were wrongly tracked");
716 DenseMap<BasicBlock *, ColorVector> BlockColors =
colorEHFunclets(*OwnerFn);
717 for (CallInst *CI : InsertedCalls) {
719 assert(BB &&
"Instruction doesn't belong to a BasicBlock");
721 "Instruction doesn't belong to the expected Function!");
729 if (Colors.
size() != 1) {
730 OwnerFn->getContext().emitError(
731 "Instruction's BasicBlock is not monochromatic");
738 if (EHPadIt != Color->end() && EHPadIt->isEHPad()) {
742 OB, CI->getIterator());
743 NewCall->copyMetadata(*CI);
744 CI->replaceAllUsesWith(NewCall);
745 CI->eraseFromParent();
750 CallInst *createRuntimeCall(
IRBuilder<> &IRB, FunctionCallee Callee,
752 const Twine &
Name =
"") {
755 CallInst *Inst = IRB.
CreateCall(Callee, Args, Name,
nullptr);
756 if (TrackInsertedCalls)
757 InsertedCalls.push_back(Inst);
763struct AddressSanitizer {
764 AddressSanitizer(
Module &M,
const StackSafetyGlobalInfo *SSGI,
765 int InstrumentationWithCallsThreshold,
766 uint32_t MaxInlinePoisoningSize,
bool CompileKernel =
false,
767 bool Recover =
false,
bool UseAfterScope =
false,
769 AsanDetectStackUseAfterReturnMode::Runtime)
778 InstrumentationWithCallsThreshold(
781 : InstrumentationWithCallsThreshold),
784 : MaxInlinePoisoningSize) {
785 C = &(M.getContext());
786 DL = &M.getDataLayout();
787 LongSize = M.getDataLayout().getPointerSizeInBits();
788 IntptrTy = Type::getIntNTy(*C, LongSize);
789 PtrTy = PointerType::getUnqual(*C);
790 Int32Ty = Type::getInt32Ty(*C);
791 TargetTriple = M.getTargetTriple();
795 assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid);
803 bool isInterestingAlloca(
const AllocaInst &AI);
805 bool ignoreAccess(Instruction *Inst,
Value *
Ptr);
807 Instruction *
I, SmallVectorImpl<InterestingMemoryOperand> &Interesting,
808 const TargetTransformInfo *
TTI);
810 void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
811 InterestingMemoryOperand &O,
bool UseCalls,
812 const DataLayout &DL, RuntimeCallInserter &RTCI);
813 void instrumentPointerComparisonOrSubtraction(Instruction *
I,
814 RuntimeCallInserter &RTCI);
816 Value *Addr, MaybeAlign Alignment,
817 uint32_t TypeStoreSize,
bool IsWrite,
818 Value *SizeArgument,
bool UseCalls, uint32_t Exp,
819 RuntimeCallInserter &RTCI);
820 Instruction *instrumentAMDGPUAddress(Instruction *OrigIns,
821 Instruction *InsertBefore,
Value *Addr,
822 uint32_t TypeStoreSize,
bool IsWrite,
823 Value *SizeArgument);
826 void instrumentUnusualSizeOrAlignment(Instruction *
I,
827 Instruction *InsertBefore,
Value *Addr,
828 TypeSize TypeStoreSize,
bool IsWrite,
829 Value *SizeArgument,
bool UseCalls,
831 RuntimeCallInserter &RTCI);
832 void instrumentMaskedLoadOrStore(AddressSanitizer *
Pass,
const DataLayout &DL,
835 MaybeAlign Alignment,
unsigned Granularity,
836 Type *OpType,
bool IsWrite,
837 Value *SizeArgument,
bool UseCalls,
838 uint32_t Exp, RuntimeCallInserter &RTCI);
840 Value *ShadowValue, uint32_t TypeStoreSize);
842 bool IsWrite,
size_t AccessSizeIndex,
843 Value *SizeArgument, uint32_t Exp,
844 RuntimeCallInserter &RTCI);
845 void instrumentMemIntrinsic(MemIntrinsic *
MI, RuntimeCallInserter &RTCI);
847 bool suppressInstrumentationSiteForDebug(
int &Instrumented);
848 bool instrumentFunction(Function &
F,
const TargetLibraryInfo *TLI,
849 const TargetTransformInfo *
TTI);
850 bool maybeInsertAsanInitAtFunctionEntry(Function &
F);
851 bool maybeInsertDynamicShadowAtFunctionEntry(Function &
F);
852 void markEscapedLocalAllocas(Function &
F);
855 friend struct FunctionStackPoisoner;
857 void initializeCallbacks(
const TargetLibraryInfo *TLI);
859 bool LooksLikeCodeInBug11395(Instruction *
I);
860 bool GlobalIsLinkerInitialized(GlobalVariable *
G);
861 bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis,
Value *Addr,
862 TypeSize TypeStoreSize)
const;
865 struct FunctionStateRAII {
866 AddressSanitizer *Pass;
868 FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) {
869 assert(Pass->ProcessedAllocas.empty() &&
870 "last pass forgot to clear cache");
871 assert(!Pass->LocalDynamicShadow);
874 ~FunctionStateRAII() {
875 Pass->LocalDynamicShadow =
nullptr;
876 Pass->ProcessedAllocas.clear();
882 const DataLayout *DL;
892 ShadowMapping Mapping;
893 FunctionCallee AsanHandleNoReturnFunc;
894 FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction;
902 FunctionCallee AsanErrorCallbackSized[2][2];
903 FunctionCallee AsanMemoryAccessCallbackSized[2][2];
905 FunctionCallee AsanMemmove, AsanMemcpy, AsanMemset;
906 Value *LocalDynamicShadow =
nullptr;
907 const StackSafetyGlobalInfo *SSGI;
908 DenseMap<const AllocaInst *, bool> ProcessedAllocas;
910 FunctionCallee AMDGPUAddressShared;
911 FunctionCallee AMDGPUAddressPrivate;
912 int InstrumentationWithCallsThreshold;
913 uint32_t MaxInlinePoisoningSize;
916class ModuleAddressSanitizer {
918 ModuleAddressSanitizer(
Module &M,
bool InsertVersionCheck,
919 bool CompileKernel =
false,
bool Recover =
false,
920 bool UseGlobalsGC =
true,
bool UseOdrIndicator =
true,
928 : InsertVersionCheck),
930 UseGlobalsGC(UseGlobalsGC &&
ClUseGlobalsGC && !this->CompileKernel),
945 UseCtorComdat(UseGlobalsGC &&
ClWithComdat && !this->CompileKernel),
946 DestructorKind(DestructorKind),
950 C = &(M.getContext());
951 int LongSize = M.getDataLayout().getPointerSizeInBits();
952 IntptrTy = Type::getIntNTy(*C, LongSize);
953 PtrTy = PointerType::getUnqual(*C);
954 TargetTriple = M.getTargetTriple();
959 assert(this->DestructorKind != AsanDtorKind::Invalid);
962 bool instrumentModule();
965 void initializeCallbacks();
967 void instrumentGlobals(
IRBuilder<> &IRB,
bool *CtorComdat);
974 const std::string &UniqueModuleId);
979 InstrumentGlobalsWithMetadataArray(
IRBuilder<> &IRB,
983 GlobalVariable *CreateMetadataGlobal(Constant *Initializer,
984 StringRef OriginalName);
985 void SetComdatForGlobalMetadata(GlobalVariable *
G, GlobalVariable *
Metadata,
986 StringRef InternalSuffix);
989 const GlobalVariable *getExcludedAliasedGlobal(
const GlobalAlias &GA)
const;
990 bool shouldInstrumentGlobal(GlobalVariable *
G)
const;
991 bool ShouldUseMachOGlobalsSection()
const;
992 StringRef getGlobalMetadataSection()
const;
993 void poisonOneInitializer(Function &GlobalInit);
994 void createInitializerPoisonCalls();
995 uint64_t getMinRedzoneSizeForGlobal()
const {
999 int GetAsanVersion()
const;
1000 GlobalVariable *getOrCreateModuleName();
1004 bool InsertVersionCheck;
1007 bool UsePrivateAlias;
1008 bool UseOdrIndicator;
1015 Triple TargetTriple;
1016 ShadowMapping Mapping;
1017 FunctionCallee AsanPoisonGlobals;
1018 FunctionCallee AsanUnpoisonGlobals;
1019 FunctionCallee AsanRegisterGlobals;
1020 FunctionCallee AsanUnregisterGlobals;
1021 FunctionCallee AsanRegisterImageGlobals;
1022 FunctionCallee AsanUnregisterImageGlobals;
1023 FunctionCallee AsanRegisterElfGlobals;
1024 FunctionCallee AsanUnregisterElfGlobals;
1026 Function *AsanCtorFunction =
nullptr;
1027 Function *AsanDtorFunction =
nullptr;
1028 GlobalVariable *ModuleName =
nullptr;
1040struct FunctionStackPoisoner :
public InstVisitor<FunctionStackPoisoner> {
1042 AddressSanitizer &ASan;
1043 RuntimeCallInserter &RTCI;
1048 ShadowMapping Mapping;
1052 SmallVector<Instruction *, 8> RetVec;
1056 FunctionCallee AsanSetShadowFunc[0x100] = {};
1057 FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc;
1058 FunctionCallee AsanAllocaPoisonFunc, AsanAllocasUnpoisonFunc;
1061 struct AllocaPoisonCall {
1062 IntrinsicInst *InsBefore;
1072 AllocaInst *DynamicAllocaLayout =
nullptr;
1073 IntrinsicInst *LocalEscapeCall =
nullptr;
1075 bool HasInlineAsm =
false;
1076 bool HasReturnsTwiceCall =
false;
1079 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan,
1080 RuntimeCallInserter &RTCI)
1081 : F(F), ASan(ASan), RTCI(RTCI),
1083 IntptrTy(ASan.IntptrTy),
1085 Mapping(ASan.Mapping),
1093 copyArgsPassedByValToAllocas();
1098 if (AllocaVec.empty() && DynamicAllocaVec.empty())
return false;
1100 initializeCallbacks(*F.getParent());
1102 processDynamicAllocas();
1103 processStaticAllocas();
1114 void copyArgsPassedByValToAllocas();
1119 void processStaticAllocas();
1120 void processDynamicAllocas();
1122 void createDynamicAllocasInitStorage();
1127 void visitReturnInst(ReturnInst &RI) {
1128 if (CallInst *CI = RI.
getParent()->getTerminatingMustTailCall())
1129 RetVec.push_back(CI);
1131 RetVec.push_back(&RI);
1135 void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); }
1138 void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); }
1140 void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore,
1141 Value *SavedStack) {
1150 Intrinsic::get_dynamic_area_offset, {IntptrTy}, {});
1156 RTCI.createRuntimeCall(
1157 IRB, AsanAllocasUnpoisonFunc,
1158 {IRB.
CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr});
1162 void unpoisonDynamicAllocas() {
1163 for (Instruction *Ret : RetVec)
1164 unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout);
1166 for (Instruction *StackRestoreInst : StackRestoreVec)
1167 unpoisonDynamicAllocasBeforeInst(StackRestoreInst,
1168 StackRestoreInst->getOperand(0));
1181 void handleDynamicAllocaCall(AllocaInst *AI);
1184 void visitAllocaInst(AllocaInst &AI) {
1189 (STy && STy->containsHomogeneousScalableVectorTypes())) {
1193 if (AllocaVec.empty())
1196 StaticAllocasToMoveUp.push_back(&AI);
1202 DynamicAllocaVec.push_back(&AI);
1204 AllocaVec.push_back(&AI);
1209 void visitIntrinsicInst(IntrinsicInst &
II) {
1211 if (
ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&
II);
1212 if (
ID == Intrinsic::localescape) LocalEscapeCall = &
II;
1213 if (!ASan.UseAfterScope)
1215 if (!
II.isLifetimeStartOrEnd())
1220 if (!AI || !ASan.isInterestingAlloca(*AI))
1230 bool DoPoison = (
ID == Intrinsic::lifetime_end);
1231 AllocaPoisonCall APC = {&
II, AI, *
Size, DoPoison};
1233 StaticAllocaPoisonCallVec.push_back(APC);
1235 DynamicAllocaPoisonCallVec.push_back(APC);
1238 void visitCallBase(CallBase &CB) {
1240 HasInlineAsm |= CI->isInlineAsm() && &CB != ASan.LocalDynamicShadow;
1241 HasReturnsTwiceCall |= CI->canReturnTwice();
1246 void initializeCallbacks(
Module &M);
1251 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1253 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1256 void copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
1257 ArrayRef<uint8_t> ShadowBytes,
size_t Begin,
1262 Value *createAllocaForLayout(
IRBuilder<> &IRB,
const ASanStackFrameLayout &L,
1265 Instruction *ThenTerm,
Value *ValueIfFalse);
1273 OS, MapClassName2PassName);
1275 if (Options.CompileKernel)
1277 if (Options.UseAfterScope)
1278 OS <<
"use-after-scope";
1286 : Options(Options), UseGlobalGC(UseGlobalGC),
1287 UseOdrIndicator(UseOdrIndicator), DestructorKind(DestructorKind),
1288 ConstructorKind(ConstructorKind) {}
1297 ModuleAddressSanitizer ModuleSanitizer(
1298 M, Options.InsertVersionCheck, Options.CompileKernel, Options.Recover,
1299 UseGlobalGC, UseOdrIndicator, DestructorKind, ConstructorKind);
1311 if (
F.getName().starts_with(
"__asan_"))
1313 if (
F.isPresplitCoroutine())
1315 AddressSanitizer FunctionSanitizer(
1316 M, SSGI, Options.InstrumentationWithCallsThreshold,
1317 Options.MaxInlinePoisoningSize, Options.CompileKernel, Options.Recover,
1318 Options.UseAfterScope, Options.UseAfterReturn);
1321 Modified |= FunctionSanitizer.instrumentFunction(
F, &TLI, &
TTI);
1323 Modified |= ModuleSanitizer.instrumentModule();
1344 if (
G->getName().starts_with(
"llvm.") ||
1346 G->getName().starts_with(
"__llvm_gcov_ctr") ||
1348 G->getName().starts_with(
"__llvm_rtti_proxy"))
1363 if (AddrSpace == 3 || AddrSpace == 5)
1370 Shadow = IRB.
CreateLShr(Shadow, Mapping.Scale);
1371 if (Mapping.Offset == 0)
return Shadow;
1374 if (LocalDynamicShadow)
1375 ShadowBase = LocalDynamicShadow;
1377 ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset);
1378 if (Mapping.OrShadowOffset)
1379 return IRB.
CreateOr(Shadow, ShadowBase);
1381 return IRB.
CreateAdd(Shadow, ShadowBase);
1386 RuntimeCallInserter &RTCI) {
1389 RTCI.createRuntimeCall(
1395 RTCI.createRuntimeCall(
1401 MI->eraseFromParent();
1405bool AddressSanitizer::isInterestingAlloca(
const AllocaInst &AI) {
1406 auto [It,
Inserted] = ProcessedAllocas.try_emplace(&AI);
1409 return It->getSecond();
1411 bool IsInteresting =
1424 !(SSGI && SSGI->
isSafe(AI)));
1426 It->second = IsInteresting;
1427 return IsInteresting;
1441 if (
Ptr->isSwiftError())
1458void AddressSanitizer::getInterestingMemoryOperands(
1462 if (LocalDynamicShadow ==
I)
1468 Interesting.
emplace_back(
I, LI->getPointerOperandIndex(),
false,
1469 LI->getType(), LI->getAlign());
1474 SI->getValueOperand()->getType(),
SI->getAlign());
1478 Interesting.
emplace_back(
I, RMW->getPointerOperandIndex(),
true,
1479 RMW->getValOperand()->getType(), std::nullopt);
1483 Interesting.
emplace_back(
I, XCHG->getPointerOperandIndex(),
true,
1484 XCHG->getCompareOperand()->getType(),
1487 switch (CI->getIntrinsicID()) {
1488 case Intrinsic::masked_load:
1489 case Intrinsic::masked_store:
1490 case Intrinsic::masked_gather:
1491 case Intrinsic::masked_scatter: {
1492 bool IsWrite = CI->getType()->isVoidTy();
1494 unsigned OpOffset = IsWrite ? 1 : 0;
1498 auto BasePtr = CI->getOperand(OpOffset);
1499 if (ignoreAccess(
I, BasePtr))
1501 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1505 Alignment =
Op->getMaybeAlignValue();
1506 Value *
Mask = CI->getOperand(2 + OpOffset);
1507 Interesting.
emplace_back(
I, OpOffset, IsWrite, Ty, Alignment, Mask);
1510 case Intrinsic::masked_expandload:
1511 case Intrinsic::masked_compressstore: {
1512 bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_compressstore;
1513 unsigned OpOffset = IsWrite ? 1 : 0;
1516 auto BasePtr = CI->getOperand(OpOffset);
1517 if (ignoreAccess(
I, BasePtr))
1520 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1523 Value *
Mask = CI->getOperand(1 + OpOffset);
1526 Value *ExtMask =
IB.CreateZExt(Mask, ExtTy);
1527 Value *EVL =
IB.CreateAddReduce(ExtMask);
1528 Value *TrueMask = ConstantInt::get(
Mask->getType(), 1);
1529 Interesting.
emplace_back(
I, OpOffset, IsWrite, Ty, Alignment, TrueMask,
1533 case Intrinsic::vp_load:
1534 case Intrinsic::vp_store:
1535 case Intrinsic::experimental_vp_strided_load:
1536 case Intrinsic::experimental_vp_strided_store: {
1538 unsigned IID = CI->getIntrinsicID();
1539 bool IsWrite = CI->getType()->isVoidTy();
1542 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1543 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1544 MaybeAlign Alignment = VPI->getOperand(PtrOpNo)->getPointerAlignment(*
DL);
1545 Value *Stride =
nullptr;
1546 if (IID == Intrinsic::experimental_vp_strided_store ||
1547 IID == Intrinsic::experimental_vp_strided_load) {
1548 Stride = VPI->getOperand(PtrOpNo + 1);
1555 Alignment =
Align(1);
1557 Interesting.
emplace_back(
I, PtrOpNo, IsWrite, Ty, Alignment,
1558 VPI->getMaskParam(), VPI->getVectorLengthParam(),
1562 case Intrinsic::vp_gather:
1563 case Intrinsic::vp_scatter: {
1565 unsigned IID = CI->getIntrinsicID();
1566 bool IsWrite = IID == Intrinsic::vp_scatter;
1569 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1570 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1571 MaybeAlign Alignment = VPI->getPointerAlignment();
1572 Interesting.
emplace_back(
I, PtrOpNo, IsWrite, Ty, Alignment,
1573 VPI->getMaskParam(),
1574 VPI->getVectorLengthParam());
1580 if (
TTI->getTgtMemIntrinsic(
II, IntrInfo))
1584 for (
unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
1586 ignoreAccess(
I, CI->getArgOperand(ArgNo)))
1588 Type *Ty = CI->getParamByValType(ArgNo);
1604 if (!Cmp->isRelational())
1618 if (BO->getOpcode() != Instruction::Sub)
1631 if (!
G->hasInitializer())
1634 if (
G->hasSanitizerMetadata() &&
G->getSanitizerMetadata().IsDynInit)
1640void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
1644 Value *
Param[2] = {
I->getOperand(0),
I->getOperand(1)};
1645 for (
Value *&i : Param) {
1646 if (i->getType()->isPointerTy())
1649 RTCI.createRuntimeCall(IRB,
F, Param);
1655 TypeSize TypeStoreSize,
bool IsWrite,
1656 Value *SizeArgument,
bool UseCalls,
1657 uint32_t Exp, RuntimeCallInserter &RTCI) {
1662 switch (FixedSize) {
1668 if (!Alignment || *Alignment >= Granularity ||
1669 *Alignment >= FixedSize / 8)
1670 return Pass->instrumentAddress(
I, InsertBefore, Addr, Alignment,
1671 FixedSize, IsWrite,
nullptr, UseCalls,
1675 Pass->instrumentUnusualSizeOrAlignment(
I, InsertBefore, Addr, TypeStoreSize,
1676 IsWrite,
nullptr, UseCalls, Exp, RTCI);
1679void AddressSanitizer::instrumentMaskedLoadOrStore(
1682 MaybeAlign Alignment,
unsigned Granularity,
Type *OpType,
bool IsWrite,
1683 Value *SizeArgument,
bool UseCalls, uint32_t Exp,
1684 RuntimeCallInserter &RTCI) {
1686 TypeSize ElemTypeSize =
DL.getTypeStoreSizeInBits(VTy->getScalarType());
1687 auto Zero = ConstantInt::get(IntptrTy, 0);
1695 Value *IsEVLZero =
IB.CreateICmpNE(EVL, ConstantInt::get(EVLType, 0));
1697 IB.SetInsertPoint(LoopInsertBefore);
1699 EVL =
IB.CreateZExtOrTrunc(EVL, IntptrTy);
1702 Value *
EC =
IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1703 EVL =
IB.CreateBinaryIntrinsic(Intrinsic::umin, EVL, EC);
1705 EVL =
IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1710 Stride =
IB.CreateZExtOrTrunc(Stride, IntptrTy);
1714 Value *MaskElem = IRB.CreateExtractElement(Mask, Index);
1715 if (auto *MaskElemC = dyn_cast<ConstantInt>(MaskElem)) {
1716 if (MaskElemC->isZero())
1722 Instruction *ThenTerm = SplitBlockAndInsertIfThen(
1723 MaskElem, &*IRB.GetInsertPoint(), false);
1724 IRB.SetInsertPoint(ThenTerm);
1727 Value *InstrumentedAddress;
1730 cast<VectorType>(Addr->getType())->getElementType()->isPointerTy() &&
1731 "Expected vector of pointer.");
1732 InstrumentedAddress = IRB.CreateExtractElement(Addr, Index);
1733 }
else if (Stride) {
1740 Alignment, Granularity, ElemTypeSize, IsWrite,
1741 SizeArgument, UseCalls, Exp, RTCI);
1748 RuntimeCallInserter &RTCI) {
1749 Value *Addr =
O.getPtr();
1769 isSafeAccess(ObjSizeVis, Addr,
O.TypeStoreSize)) {
1770 NumOptimizedAccessesToGlobalVar++;
1778 isSafeAccess(ObjSizeVis, Addr,
O.TypeStoreSize)) {
1779 NumOptimizedAccessesToStackVar++;
1785 NumInstrumentedWrites++;
1787 NumInstrumentedReads++;
1789 if (
O.MaybeByteOffset) {
1794 if (TargetTriple.isRISCV()) {
1799 static_cast<unsigned>(LongSize)) {
1808 unsigned Granularity = 1 << Mapping.Scale;
1810 instrumentMaskedLoadOrStore(
this,
DL, IntptrTy,
O.MaybeMask,
O.MaybeEVL,
1811 O.MaybeStride,
O.getInsn(), Addr,
O.Alignment,
1812 Granularity,
O.OpType,
O.IsWrite,
nullptr,
1813 UseCalls, Exp, RTCI);
1816 Granularity,
O.TypeStoreSize,
O.IsWrite,
nullptr,
1817 UseCalls, Exp, RTCI);
1822 Value *Addr,
bool IsWrite,
1823 size_t AccessSizeIndex,
1824 Value *SizeArgument,
1826 RuntimeCallInserter &RTCI) {
1832 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][0],
1833 {Addr, SizeArgument});
1835 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][1],
1836 {Addr, SizeArgument, ExpVal});
1839 Call = RTCI.createRuntimeCall(
1840 IRB, AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
1842 Call = RTCI.createRuntimeCall(
1843 IRB, AsanErrorCallback[IsWrite][1][AccessSizeIndex], {Addr, ExpVal});
1852 uint32_t TypeStoreSize) {
1853 size_t Granularity =
static_cast<size_t>(1) << Mapping.Scale;
1855 Value *LastAccessedByte =
1856 IRB.
CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
1858 if (TypeStoreSize / 8 > 1)
1860 LastAccessedByte, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1));
1863 IRB.
CreateIntCast(LastAccessedByte, ShadowValue->getType(),
false);
1868Instruction *AddressSanitizer::instrumentAMDGPUAddress(
1870 uint32_t TypeStoreSize,
bool IsWrite,
Value *SizeArgument) {
1877 return InsertBefore;
1882 Value *IsSharedOrPrivate = IRB.
CreateOr(IsShared, IsPrivate);
1884 Value *AddrSpaceZeroLanding =
1887 return InsertBefore;
1903 Trm->getParent()->setName(
"asan.report");
1914void AddressSanitizer::instrumentAddress(
Instruction *OrigIns,
1917 uint32_t TypeStoreSize,
bool IsWrite,
1918 Value *SizeArgument,
bool UseCalls,
1920 RuntimeCallInserter &RTCI) {
1921 if (TargetTriple.isAMDGPU()) {
1922 InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr,
1923 TypeStoreSize, IsWrite, SizeArgument);
1932 const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
1935 ConstantInt::get(
Int32Ty, AccessInfo.Packed)});
1942 RTCI.createRuntimeCall(
1943 IRB, AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], AddrLong);
1945 RTCI.createRuntimeCall(
1946 IRB, AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
1947 {AddrLong, ConstantInt::get(IRB.
getInt32Ty(), Exp)});
1954 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1955 const uint64_t ShadowAlign =
1956 std::max<uint64_t>(Alignment.
valueOrOne().
value() >> Mapping.Scale, 1);
1961 size_t Granularity = 1ULL << Mapping.Scale;
1964 bool GenSlowPath = (
ClAlwaysSlowPath || (TypeStoreSize < 8 * Granularity));
1966 if (TargetTriple.isAMDGCN()) {
1968 auto *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1971 CrashTerm = genAMDGPUReportBlock(IRB, Cmp, Recover);
1972 }
else if (GenSlowPath) {
1980 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1995 CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument, Exp, RTCI);
2004void AddressSanitizer::instrumentUnusualSizeOrAlignment(
2006 TypeSize TypeStoreSize,
bool IsWrite,
Value *SizeArgument,
bool UseCalls,
2007 uint32_t Exp, RuntimeCallInserter &RTCI) {
2015 RTCI.createRuntimeCall(IRB, AsanMemoryAccessCallbackSized[IsWrite][0],
2018 RTCI.createRuntimeCall(
2019 IRB, AsanMemoryAccessCallbackSized[IsWrite][1],
2033void ModuleAddressSanitizer::poisonOneInitializer(
Function &GlobalInit) {
2039 Value *ModuleNameAddr =
2041 IRB.
CreateCall(AsanPoisonGlobals, ModuleNameAddr);
2044 for (
auto &BB : GlobalInit)
2049void ModuleAddressSanitizer::createInitializerPoisonCalls() {
2069 poisonOneInitializer(*
F);
2075ModuleAddressSanitizer::getExcludedAliasedGlobal(
const GlobalAlias &GA)
const {
2080 assert(CompileKernel &&
"Only expecting to be called when compiling kernel");
2092bool ModuleAddressSanitizer::shouldInstrumentGlobal(
GlobalVariable *
G)
const {
2093 Type *Ty =
G->getValueType();
2096 if (
G->hasSanitizerMetadata() &&
G->getSanitizerMetadata().NoAddress)
2098 if (!Ty->
isSized())
return false;
2099 if (!
G->hasInitializer())
return false;
2101 if (
G->getAddressSpace() &&
2108 if (
G->isThreadLocal())
return false;
2110 if (
G->getAlign() && *
G->getAlign() > getMinRedzoneSizeForGlobal())
return false;
2116 if (!TargetTriple.isOSBinFormatCOFF()) {
2117 if (!
G->hasExactDefinition() ||
G->hasComdat())
2121 if (
G->isInterposable())
2125 if (
G->hasAvailableExternallyLinkage())
2132 switch (
C->getSelectionKind()) {
2143 if (
G->hasSection()) {
2153 if (Section ==
"llvm.metadata")
return false;
2160 if (
Section.starts_with(
".preinit_array") ||
2161 Section.starts_with(
".init_array") ||
2162 Section.starts_with(
".fini_array")) {
2168 if (TargetTriple.isOSBinFormatELF()) {
2182 if (TargetTriple.isOSBinFormatCOFF() &&
Section.contains(
'$')) {
2183 LLVM_DEBUG(
dbgs() <<
"Ignoring global in sorted section (contains '$'): "
2188 if (TargetTriple.isOSBinFormatMachO()) {
2190 unsigned TAA = 0, StubSize = 0;
2193 Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize));
2198 if (ParsedSegment ==
"__OBJC" ||
2199 (ParsedSegment ==
"__DATA" && ParsedSection.
starts_with(
"__objc_"))) {
2211 if (ParsedSegment ==
"__DATA" && ParsedSection ==
"__cfstring") {
2224 if (CompileKernel) {
2227 if (
G->getName().starts_with(
"__"))
2237bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection()
const {
2238 if (!TargetTriple.isOSBinFormatMachO())
2241 if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11))
2243 if (TargetTriple.isiOS() && !TargetTriple.isOSVersionLT(9))
2245 if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2))
2247 if (TargetTriple.isDriverKit())
2249 if (TargetTriple.isXROS())
2255StringRef ModuleAddressSanitizer::getGlobalMetadataSection()
const {
2256 switch (TargetTriple.getObjectFormat()) {
2266 "ModuleAddressSanitizer not implemented for object file format");
2273void ModuleAddressSanitizer::initializeCallbacks() {
2279 AsanUnpoisonGlobals =
2283 AsanRegisterGlobals =
M.getOrInsertFunction(
2285 AsanUnregisterGlobals =
M.getOrInsertFunction(
2290 AsanRegisterImageGlobals =
M.getOrInsertFunction(
2292 AsanUnregisterImageGlobals =
M.getOrInsertFunction(
2295 AsanRegisterElfGlobals =
2297 IntptrTy, IntptrTy, IntptrTy);
2298 AsanUnregisterElfGlobals =
2300 IntptrTy, IntptrTy, IntptrTy);
2305void ModuleAddressSanitizer::SetComdatForGlobalMetadata(
2310 if (!
G->hasName()) {
2314 G->setName(
genName(
"anon_global"));
2317 if (!InternalSuffix.
empty() &&
G->hasLocalLinkage()) {
2318 std::string
Name = std::string(
G->getName());
2319 Name += InternalSuffix;
2320 C =
M.getOrInsertComdat(Name);
2322 C =
M.getOrInsertComdat(
G->getName());
2328 if (TargetTriple.isOSBinFormatCOFF()) {
2330 if (
G->hasPrivateLinkage())
2343ModuleAddressSanitizer::CreateMetadataGlobal(
Constant *Initializer,
2345 auto Linkage = TargetTriple.isOSBinFormatMachO()
2351 Metadata->setSection(getGlobalMetadataSection());
2358Instruction *ModuleAddressSanitizer::CreateAsanModuleDtor() {
2362 AsanDtorFunction->addFnAttr(Attribute::NoUnwind);
2370void ModuleAddressSanitizer::InstrumentGlobalsCOFF(
2374 auto &
DL =
M.getDataLayout();
2377 for (
size_t i = 0; i < ExtendedGlobals.
size(); i++) {
2378 Constant *Initializer = MetadataInitializers[i];
2382 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2388 unsigned SizeOfGlobalStruct =
DL.getTypeAllocSize(Initializer->
getType());
2390 "global metadata will not be padded appropriately");
2393 SetComdatForGlobalMetadata(
G,
Metadata,
"");
2398 if (!MetadataGlobals.empty())
2402void ModuleAddressSanitizer::instrumentGlobalsELF(
2405 const std::string &UniqueModuleId) {
2412 bool UseComdatForGlobalsGC = UseOdrIndicator && !UniqueModuleId.empty();
2415 for (
size_t i = 0; i < ExtendedGlobals.
size(); i++) {
2418 CreateMetadataGlobal(MetadataInitializers[i],
G->getName());
2420 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2423 if (UseComdatForGlobalsGC)
2424 SetComdatForGlobalMetadata(
G,
Metadata, UniqueModuleId);
2429 if (!MetadataGlobals.empty())
2446 "__start_" + getGlobalMetadataSection());
2450 "__stop_" + getGlobalMetadataSection());
2464 IrbDtor.CreateCall(AsanUnregisterElfGlobals,
2471void ModuleAddressSanitizer::InstrumentGlobalsMachO(
2482 for (
size_t i = 0; i < ExtendedGlobals.
size(); i++) {
2483 Constant *Initializer = MetadataInitializers[i];
2489 auto LivenessBinder =
2494 Twine(
"__asan_binder_") +
G->getName());
2495 Liveness->
setSection(
"__DATA,__asan_liveness,regular,live_support");
2496 LivenessGlobals[i] = Liveness;
2503 if (!LivenessGlobals.empty())
2525 IrbDtor.CreateCall(AsanUnregisterImageGlobals,
2530void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray(
2534 unsigned N = ExtendedGlobals.
size();
2544 if (Mapping.Scale > 3)
2545 AllGlobals->setAlignment(
Align(1ULL << Mapping.Scale));
2550 ConstantInt::get(IntptrTy,
N)});
2556 IrbDtor.CreateCall(AsanUnregisterGlobals,
2558 ConstantInt::get(IntptrTy,
N)});
2567void ModuleAddressSanitizer::instrumentGlobals(
IRBuilder<> &IRB,
2572 if (CompileKernel) {
2573 for (
auto &GA :
M.aliases()) {
2575 AliasedGlobalExclusions.
insert(GV);
2580 for (
auto &
G :
M.globals()) {
2581 if (!AliasedGlobalExclusions.
count(&
G) && shouldInstrumentGlobal(&
G))
2585 size_t n = GlobalsToChange.
size();
2586 auto &
DL =
M.getDataLayout();
2600 IntptrTy, IntptrTy, IntptrTy);
2604 for (
size_t i = 0; i < n; i++) {
2608 if (
G->hasSanitizerMetadata())
2609 MD =
G->getSanitizerMetadata();
2614 std::string NameForGlobal =
G->getName().str();
2619 Type *Ty =
G->getValueType();
2620 const uint64_t SizeInBytes =
DL.getTypeAllocSize(Ty);
2633 M, NewTy,
G->isConstant(),
Linkage, NewInitializer,
"",
G,
2634 G->getThreadLocalMode(),
G->getAddressSpace());
2644 if (TargetTriple.isOSBinFormatMachO() && !
G->hasSection() &&
2647 if (Seq && Seq->isCString())
2648 NewGlobal->
setSection(
"__TEXT,__asan_cstring,regular");
2662 G->eraseFromParent();
2663 NewGlobals[i] = NewGlobal;
2668 bool CanUsePrivateAliases =
2669 TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() ||
2670 TargetTriple.isOSBinFormatWasm();
2671 if (CanUsePrivateAliases && UsePrivateAlias) {
2674 InstrumentedGlobal =
2682 }
else if (UseOdrIndicator) {
2685 auto *ODRIndicatorSym =
2694 ODRIndicatorSym->setAlignment(
Align(1));
2695 ODRIndicator = ODRIndicatorSym;
2701 ConstantInt::get(IntptrTy, SizeInBytes),
2702 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
2705 ConstantInt::get(IntptrTy, MD.
IsDynInit),
2711 Initializers[i] = Initializer;
2717 for (
size_t i = 0; i < n; i++) {
2719 if (
G->getName().empty())
continue;
2724 if (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) {
2731 }
else if (n == 0) {
2734 *CtorComdat = TargetTriple.isOSBinFormatELF();
2736 *CtorComdat =
false;
2737 if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) {
2738 InstrumentGlobalsCOFF(IRB, NewGlobals, Initializers);
2739 }
else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) {
2740 InstrumentGlobalsMachO(IRB, NewGlobals, Initializers);
2742 InstrumentGlobalsWithMetadataArray(IRB, NewGlobals, Initializers);
2748 createInitializerPoisonCalls();
2754ModuleAddressSanitizer::getRedzoneSizeForGlobal(uint64_t SizeInBytes)
const {
2755 constexpr uint64_t kMaxRZ = 1 << 18;
2756 const uint64_t MinRZ = getMinRedzoneSizeForGlobal();
2759 if (SizeInBytes <= MinRZ / 2) {
2763 RZ = MinRZ - SizeInBytes;
2766 RZ = std::clamp((SizeInBytes / MinRZ / 4) * MinRZ, MinRZ, kMaxRZ);
2769 if (SizeInBytes % MinRZ)
2770 RZ += MinRZ - (SizeInBytes % MinRZ);
2773 assert((RZ + SizeInBytes) % MinRZ == 0);
2778int ModuleAddressSanitizer::GetAsanVersion()
const {
2779 int LongSize =
M.getDataLayout().getPointerSizeInBits();
2780 bool isAndroid =
M.getTargetTriple().isAndroid();
2784 Version += (LongSize == 32 && isAndroid);
2799bool ModuleAddressSanitizer::instrumentModule() {
2800 initializeCallbacks();
2808 if (CompileKernel) {
2813 std::string AsanVersion = std::to_string(GetAsanVersion());
2814 std::string VersionCheckName =
2816 std::tie(AsanCtorFunction, std::ignore) =
2819 {}, VersionCheckName);
2823 bool CtorComdat =
true;
2826 if (AsanCtorFunction) {
2827 IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator());
2828 instrumentGlobals(IRB, &CtorComdat);
2831 instrumentGlobals(IRB, &CtorComdat);
2840 if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) {
2841 if (AsanCtorFunction) {
2845 if (AsanDtorFunction) {
2850 if (AsanCtorFunction)
2852 if (AsanDtorFunction)
2863 for (
int Exp = 0;
Exp < 2;
Exp++) {
2864 for (
size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
2865 const std::string TypeStr = AccessIsWrite ?
"store" :
"load";
2866 const std::string ExpStr =
Exp ?
"exp_" :
"";
2867 const std::string EndingStr = Recover ?
"_noabort" :
"";
2877 if (
auto AK = TLI->getExtAttrForI32Param(
false)) {
2878 AL2 = AL2.addParamAttribute(*
C, 2, AK);
2879 AL1 = AL1.addParamAttribute(*
C, 1, AK);
2882 AsanErrorCallbackSized[AccessIsWrite][
Exp] =
M.getOrInsertFunction(
2886 AsanMemoryAccessCallbackSized[AccessIsWrite][
Exp] =
M.getOrInsertFunction(
2891 AccessSizeIndex++) {
2892 const std::string Suffix = TypeStr +
itostr(1ULL << AccessSizeIndex);
2893 AsanErrorCallback[AccessIsWrite][
Exp][AccessSizeIndex] =
2894 M.getOrInsertFunction(
2898 AsanMemoryAccessCallback[AccessIsWrite][
Exp][AccessSizeIndex] =
2899 M.getOrInsertFunction(
2906 const std::string MemIntrinCallbackPrefix =
2910 AsanMemmove =
M.getOrInsertFunction(MemIntrinCallbackPrefix +
"memmove",
2911 PtrTy, PtrTy, PtrTy, IntptrTy);
2912 AsanMemcpy =
M.getOrInsertFunction(MemIntrinCallbackPrefix +
"memcpy", PtrTy,
2913 PtrTy, PtrTy, IntptrTy);
2914 AsanMemset =
M.getOrInsertFunction(MemIntrinCallbackPrefix +
"memset",
2918 AsanHandleNoReturnFunc =
2921 AsanPtrCmpFunction =
2923 AsanPtrSubFunction =
2925 if (Mapping.InGlobal)
2926 AsanShadowGlobal =
M.getOrInsertGlobal(
"__asan_shadow",
2929 AMDGPUAddressShared =
2931 AMDGPUAddressPrivate =
2935bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(
Function &
F) {
2943 if (
F.getName().contains(
" load]")) {
2953bool AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(
Function &
F) {
2959 if (Mapping.InGlobal) {
2967 LocalDynamicShadow =
2968 IRB.
CreateCall(Asm, {AsanShadowGlobal},
".asan.shadow");
2970 LocalDynamicShadow =
2974 Value *GlobalDynamicAddress =
F.getParent()->getOrInsertGlobal(
2976 LocalDynamicShadow = IRB.
CreateLoad(IntptrTy, GlobalDynamicAddress);
2981void AddressSanitizer::markEscapedLocalAllocas(
Function &
F) {
2986 assert(ProcessedAllocas.empty() &&
"must process localescape before allocas");
2990 if (!
F.getParent()->getFunction(
"llvm.localescape"))
return;
2996 if (
II &&
II->getIntrinsicID() == Intrinsic::localescape) {
2998 for (
Value *Arg :
II->args()) {
3001 "non-static alloca arg to localescape");
3002 ProcessedAllocas[AI] =
false;
3009bool AddressSanitizer::suppressInstrumentationSiteForDebug(
int &Instrumented) {
3010 bool ShouldInstrument =
3014 return !ShouldInstrument;
3017bool AddressSanitizer::instrumentFunction(
Function &
F,
3020 bool FunctionModified =
false;
3023 if (
F.hasFnAttribute(Attribute::Naked))
3024 return FunctionModified;
3029 if (maybeInsertAsanInitAtFunctionEntry(
F))
3030 FunctionModified =
true;
3033 if (!
F.hasFnAttribute(Attribute::SanitizeAddress))
return FunctionModified;
3035 if (
F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
3036 return FunctionModified;
3040 initializeCallbacks(TLI);
3042 FunctionStateRAII CleanupObj(
this);
3044 RuntimeCallInserter RTCI(
F);
3046 FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(
F);
3050 markEscapedLocalAllocas(
F);
3062 for (
auto &BB :
F) {
3064 TempsToInstrument.
clear();
3065 int NumInsnsPerBB = 0;
3066 for (
auto &Inst : BB) {
3067 if (LooksLikeCodeInBug11395(&Inst))
return false;
3074 if (!InterestingOperands.
empty()) {
3075 for (
auto &Operand : InterestingOperands) {
3081 if (Operand.MaybeMask) {
3085 if (!TempsToInstrument.
insert(
Ptr).second)
3089 OperandsToInstrument.
push_back(Operand);
3096 PointerComparisonsOrSubtracts.
push_back(&Inst);
3104 TempsToInstrument.
clear();
3115 bool UseCalls = (InstrumentationWithCallsThreshold >= 0 &&
3116 OperandsToInstrument.
size() + IntrinToInstrument.
size() >
3117 (
unsigned)InstrumentationWithCallsThreshold);
3122 int NumInstrumented = 0;
3123 for (
auto &Operand : OperandsToInstrument) {
3124 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3125 instrumentMop(ObjSizeVis, Operand, UseCalls,
3126 F.getDataLayout(), RTCI);
3127 FunctionModified =
true;
3129 for (
auto *Inst : IntrinToInstrument) {
3130 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3131 instrumentMemIntrinsic(Inst, RTCI);
3132 FunctionModified =
true;
3135 FunctionStackPoisoner FSP(
F, *
this, RTCI);
3136 bool ChangedStack = FSP.runOnFunction();
3140 for (
auto *CI : NoReturnCalls) {
3142 RTCI.createRuntimeCall(IRB, AsanHandleNoReturnFunc, {});
3145 for (
auto *Inst : PointerComparisonsOrSubtracts) {
3146 instrumentPointerComparisonOrSubtraction(Inst, RTCI);
3147 FunctionModified =
true;
3150 if (ChangedStack || !NoReturnCalls.empty())
3151 FunctionModified =
true;
3153 LLVM_DEBUG(
dbgs() <<
"ASAN done instrumenting: " << FunctionModified <<
" "
3156 return FunctionModified;
3162bool AddressSanitizer::LooksLikeCodeInBug11395(
Instruction *
I) {
3163 if (LongSize != 32)
return false;
3172void FunctionStackPoisoner::initializeCallbacks(
Module &M) {
3176 const char *MallocNameTemplate =
3181 std::string Suffix =
itostr(Index);
3182 AsanStackMallocFunc[
Index] =
M.getOrInsertFunction(
3183 MallocNameTemplate + Suffix, IntptrTy, IntptrTy);
3184 AsanStackFreeFunc[
Index] =
3189 if (ASan.UseAfterScope) {
3190 AsanPoisonStackMemoryFunc =
M.getOrInsertFunction(
3192 AsanUnpoisonStackMemoryFunc =
M.getOrInsertFunction(
3196 for (
size_t Val : {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0xf1, 0xf2,
3197 0xf3, 0xf5, 0xf8}) {
3198 std::ostringstream
Name;
3200 Name << std::setw(2) << std::setfill(
'0') << std::hex << Val;
3201 AsanSetShadowFunc[Val] =
3202 M.getOrInsertFunction(
Name.str(), IRB.
getVoidTy(), IntptrTy, IntptrTy);
3205 AsanAllocaPoisonFunc =
M.getOrInsertFunction(
3207 AsanAllocasUnpoisonFunc =
M.getOrInsertFunction(
3213 size_t Begin,
size_t End,
3215 Value *ShadowBase) {
3219 const size_t LargestStoreSizeInBytes =
3220 std::min<size_t>(
sizeof(uint64_t), ASan.LongSize / 8);
3222 const bool IsLittleEndian =
F.getDataLayout().isLittleEndian();
3228 for (
size_t i = Begin; i < End;) {
3229 if (!ShadowMask[i]) {
3235 size_t StoreSizeInBytes = LargestStoreSizeInBytes;
3237 while (StoreSizeInBytes > End - i)
3238 StoreSizeInBytes /= 2;
3241 for (
size_t j = StoreSizeInBytes - 1;
j && !ShadowMask[i +
j]; --
j) {
3242 while (j <= StoreSizeInBytes / 2)
3243 StoreSizeInBytes /= 2;
3247 for (
size_t j = 0;
j < StoreSizeInBytes;
j++) {
3249 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
3251 Val = (Val << 8) | ShadowBytes[i + j];
3260 i += StoreSizeInBytes;
3267 copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.
size(), IRB, ShadowBase);
3272 size_t Begin,
size_t End,
3275 size_t Done = Begin;
3276 for (
size_t i = Begin, j = Begin + 1; i < End; i =
j++) {
3277 if (!ShadowMask[i]) {
3281 uint8_t Val = ShadowBytes[i];
3282 if (!AsanSetShadowFunc[Val])
3286 for (;
j < End && ShadowMask[
j] && Val == ShadowBytes[
j]; ++
j) {
3289 if (j - i >= ASan.MaxInlinePoisoningSize) {
3290 copyToShadowInline(ShadowMask, ShadowBytes,
Done, i, IRB, ShadowBase);
3291 RTCI.createRuntimeCall(
3292 IRB, AsanSetShadowFunc[Val],
3293 {IRB.
CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
3294 ConstantInt::get(IntptrTy, j - i)});
3299 copyToShadowInline(ShadowMask, ShadowBytes,
Done, End, IRB, ShadowBase);
3307 for (
int i = 0;; i++, MaxSize *= 2)
3308 if (LocalStackSize <= MaxSize)
return i;
3312void FunctionStackPoisoner::copyArgsPassedByValToAllocas() {
3314 if (CopyInsertPoint == ASan.LocalDynamicShadow) {
3322 if (Arg.hasByValAttr()) {
3323 Type *Ty = Arg.getParamByValType();
3324 const Align Alignment =
3325 DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty);
3329 (Arg.hasName() ? Arg.getName() :
"Arg" +
Twine(Arg.getArgNo())) +
3332 Arg.replaceAllUsesWith(AI);
3334 uint64_t AllocSize =
DL.getTypeAllocSize(Ty);
3335 IRB.
CreateMemCpy(AI, Alignment, &Arg, Alignment, AllocSize);
3343 Value *ValueIfFalse) {
3346 PHI->addIncoming(ValueIfFalse, CondBlock);
3348 PHI->addIncoming(ValueIfTrue, ThenBlock);
3352Value *FunctionStackPoisoner::createAllocaForLayout(
3361 nullptr,
"MyAlloca");
3365 uint64_t FrameAlignment = std::max(
L.FrameAlignment, uint64_t(
ClRealignStack));
3370void FunctionStackPoisoner::createDynamicAllocasInitStorage() {
3373 DynamicAllocaLayout = IRB.
CreateAlloca(IntptrTy,
nullptr);
3378void FunctionStackPoisoner::processDynamicAllocas() {
3385 for (
const auto &APC : DynamicAllocaPoisonCallVec) {
3388 assert(ASan.isInterestingAlloca(*APC.AI));
3389 assert(!APC.AI->isStaticAlloca());
3392 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
3399 createDynamicAllocasInitStorage();
3400 for (
auto &AI : DynamicAllocaVec)
3401 handleDynamicAllocaCall(AI);
3402 unpoisonDynamicAllocas();
3414 for (
Instruction *It = Start; It; It = It->getNextNode()) {
3431 if (!Alloca || ASan.isInterestingAlloca(*Alloca))
3434 Value *Val = Store->getValueOperand();
3436 bool IsArgInitViaCast =
3441 Val == It->getPrevNode();
3442 bool IsArgInit = IsDirectArgInit || IsArgInitViaCast;
3446 if (IsArgInitViaCast)
3461 if (AI->
hasMetadata(LLVMContext::MD_annotation)) {
3464 for (
auto &Annotation : AllocaAnnotations->
operands()) {
3468 for (
unsigned Index = 0; Index < AnnotationTuple->getNumOperands();
3471 auto MetadataString =
3473 if (MetadataString->getString() ==
"alloca_name_altered")
3482void FunctionStackPoisoner::processStaticAllocas() {
3483 if (AllocaVec.
empty()) {
3488 int StackMallocIdx = -1;
3490 if (
auto SP =
F.getSubprogram())
3491 EntryDebugLocation =
3500 auto InsBeforeB = InsBefore->
getParent();
3501 assert(InsBeforeB == &
F.getEntryBlock());
3502 for (
auto *AI : StaticAllocasToMoveUp)
3513 ArgInitInst->moveBefore(InsBefore->
getIterator());
3516 if (LocalEscapeCall)
3524 ASan.getAllocaSizeInBytes(*AI),
3535 uint64_t Granularity = 1ULL << Mapping.Scale;
3536 uint64_t MinHeaderSize = std::max((uint64_t)ASan.LongSize / 2, Granularity);
3542 for (
auto &
Desc : SVD)
3546 for (
const auto &APC : StaticAllocaPoisonCallVec) {
3549 assert(ASan.isInterestingAlloca(*APC.AI));
3550 assert(APC.AI->isStaticAlloca());
3555 if (
const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) {
3556 if (LifetimeLoc->getFile() == FnLoc->getFile())
3557 if (
unsigned Line = LifetimeLoc->getLine())
3558 Desc.Line = std::min(
Desc.Line ?
Desc.Line : Line, Line);
3564 LLVM_DEBUG(
dbgs() << DescriptionString <<
" --- " <<
L.FrameSize <<
"\n");
3565 uint64_t LocalStackSize =
L.FrameSize;
3566 bool DoStackMalloc =
3576 DoDynamicAlloca &= !HasInlineAsm && !HasReturnsTwiceCall;
3577 DoStackMalloc &= !HasInlineAsm && !HasReturnsTwiceCall;
3579 Value *StaticAlloca =
3580 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L,
false);
3583 Value *LocalStackBase;
3584 Value *LocalStackBaseAlloca;
3587 if (DoStackMalloc) {
3588 LocalStackBaseAlloca =
3589 IRB.
CreateAlloca(IntptrTy,
nullptr,
"asan_local_stack_base");
3596 Constant *OptionDetectUseAfterReturn =
F.getParent()->getOrInsertGlobal(
3606 Value *FakeStackValue =
3607 RTCI.createRuntimeCall(IRBIf, AsanStackMallocFunc[StackMallocIdx],
3608 ConstantInt::get(IntptrTy, LocalStackSize));
3610 FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term,
3611 ConstantInt::get(IntptrTy, 0));
3619 RTCI.createRuntimeCall(IRB, AsanStackMallocFunc[StackMallocIdx],
3620 ConstantInt::get(IntptrTy, LocalStackSize));
3622 Value *NoFakeStack =
3627 Value *AllocaValue =
3628 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L,
true) : StaticAlloca;
3631 LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack);
3632 IRB.
CreateStore(LocalStackBase, LocalStackBaseAlloca);
3637 FakeStack = ConstantInt::get(IntptrTy, 0);
3639 DoDynamicAlloca ? createAllocaForLayout(IRB, L,
true) : StaticAlloca;
3640 LocalStackBaseAlloca = LocalStackBase;
3646 Value *LocalStackBaseAllocaPtr =
3649 : LocalStackBaseAlloca;
3651 "Variable descriptions relative to ASan stack base will be dropped");
3655 for (
const auto &
Desc : SVD) {
3660 IRB.
CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy,
Desc.Offset)),
3674 ConstantInt::get(IntptrTy, ASan.LongSize / 8)),
3684 ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)),
3691 Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB);
3694 copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase);
3696 if (!StaticAllocaPoisonCallVec.empty()) {
3700 for (
const auto &APC : StaticAllocaPoisonCallVec) {
3703 size_t Begin =
Desc.Offset /
L.Granularity;
3704 size_t End = Begin + (APC.Size +
L.Granularity - 1) /
L.Granularity;
3707 copyToShadow(ShadowAfterScope,
3708 APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin, End,
3714 for (
Value *NewAllocaPtr : NewAllocaPtrs) {
3717 if (
I->isLifetimeStartOrEnd())
3718 I->eraseFromParent();
3731 if (DoStackMalloc) {
3732 assert(StackMallocIdx >= 0);
3749 if (ASan.MaxInlinePoisoningSize != 0 && StackMallocIdx <= 4) {
3751 ShadowAfterReturn.
resize(ClassSize /
L.Granularity,
3753 copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison,
3755 Value *SavedFlagPtrPtr = IRBPoison.CreateAdd(
3757 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
3758 Value *SavedFlagPtr = IRBPoison.CreateLoad(
3759 IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
3760 IRBPoison.CreateStore(
3762 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getPtrTy()));
3765 RTCI.createRuntimeCall(
3766 IRBPoison, AsanStackFreeFunc[StackMallocIdx],
3767 {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)});
3771 copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase);
3773 copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase);
3778 for (
auto *AI : AllocaVec)
3782void FunctionStackPoisoner::poisonAlloca(
Value *V, uint64_t
Size,
3786 Value *SizeArg = ConstantInt::get(IntptrTy,
Size);
3787 RTCI.createRuntimeCall(
3788 IRB, DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
3789 {AddrArg, SizeArg});
3800void FunctionStackPoisoner::handleDynamicAllocaCall(
AllocaInst *AI) {
3808 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
3814 const unsigned ElementSize =
3818 ConstantInt::get(IntptrTy, ElementSize));
3846 ConstantInt::get(IntptrTy, Alignment.
value()));
3849 RTCI.createRuntimeCall(IRB, AsanAllocaPoisonFunc, {NewAddress, OldSize});
3860 if (
I->isLifetimeStartOrEnd())
3861 I->eraseFromParent();
3893 Size - uint64_t(
Offset) >= TypeStoreSize / 8;
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< bool > ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden, cl::init(true), cl::desc("Use Stack Safety analysis results"))
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void findStoresToUninstrumentedArgAllocas(AddressSanitizer &ASan, Instruction &InsBefore, SmallVectorImpl< Instruction * > &InitInsts)
Collect instructions in the entry block after InsBefore which initialize permanent storage for a func...
static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I, Instruction *InsertBefore, Value *Addr, MaybeAlign Alignment, unsigned Granularity, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp, RuntimeCallInserter &RTCI)
static const uint64_t kDefaultShadowScale
const char kAMDGPUUnreachableName[]
constexpr size_t kAccessSizeIndexMask
static cl::opt< int > ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUsePrivateAlias("asan-use-private-alias", cl::desc("Use private aliases for global variables"), cl::Hidden, cl::init(true))
static const uint64_t kPS_ShadowOffset64
static const uint64_t kFreeBSD_ShadowOffset32
constexpr size_t kIsWriteShift
static const uint64_t kSmallX86_64ShadowOffsetAlignMask
static bool isInterestingPointerSubtraction(Instruction *I)
const char kAMDGPUAddressSharedName[]
const char kAsanStackFreeNameTemplate[]
constexpr size_t kCompileKernelMask
static cl::opt< bool > ClForceDynamicShadow("asan-force-dynamic-shadow", cl::desc("Load shadow address into a local variable for each function"), cl::Hidden, cl::init(false))
const char kAsanOptionDetectUseAfterReturn[]
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static const uint64_t kRISCV64_ShadowOffset64
static cl::opt< bool > ClInsertVersionCheck("asan-guard-against-version-mismatch", cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden, cl::init(true))
const char kAsanSetShadowPrefix[]
static cl::opt< AsanDtorKind > ClOverrideDestructorKind("asan-destructor-kind", cl::desc("Sets the ASan destructor kind. The default is to use the value " "provided to the pass constructor"), cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"), clEnumValN(AsanDtorKind::Global, "global", "Use global destructors")), cl::init(AsanDtorKind::Invalid), cl::Hidden)
static Twine genName(StringRef suffix)
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple)
const char kAsanStackMallocNameTemplate[]
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
const char kAsanInitName[]
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClRedzoneByvalArgs("asan-redzone-byval-args", cl::desc("Create redzones for byval " "arguments (extra copy " "required)"), cl::Hidden, cl::init(true))
static const uint64_t kWindowsShadowOffset64
const char kAsanGenPrefix[]
constexpr size_t kIsWriteMask
static uint64_t getRedzoneSizeForScale(int MappingScale)
static const uint64_t kDefaultShadowOffset64
static cl::opt< bool > ClOptimizeCallbacks("asan-optimize-callbacks", cl::desc("Optimize callbacks"), cl::Hidden, cl::init(false))
const char kAsanUnregisterGlobalsName[]
static const uint64_t kAsanCtorAndDtorPriority
const char kAsanUnpoisonGlobalsName[]
static cl::opt< bool > ClWithIfuncSuppressRemat("asan-with-ifunc-suppress-remat", cl::desc("Suppress rematerialization of dynamic shadow address by passing " "it through inline asm in prologue."), cl::Hidden, cl::init(true))
static cl::opt< int > ClDebugStack("asan-debug-stack", cl::desc("debug stack"), cl::Hidden, cl::init(0))
const char kAsanUnregisterElfGlobalsName[]
static bool isUnsupportedAMDGPUAddrspace(Value *Addr)
const char kAsanRegisterImageGlobalsName[]
static const uint64_t kWebAssemblyShadowOffset
static cl::opt< bool > ClOpt("asan-opt", cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true))
static const uint64_t kAllocaRzSize
const char kODRGenPrefix[]
static const uint64_t kSystemZ_ShadowOffset64
static const uint64_t kDefaultShadowOffset32
const char kAsanShadowMemoryDynamicAddress[]
static cl::opt< bool > ClUseOdrIndicator("asan-use-odr-indicator", cl::desc("Use odr indicators to improve ODR reporting"), cl::Hidden, cl::init(true))
static bool GlobalWasGeneratedByCompiler(GlobalVariable *G)
Check if G has been created by a trusted compiler pass.
const char kAsanStackMallocAlwaysNameTemplate[]
static cl::opt< bool > ClInvalidPointerCmp("asan-detect-invalid-pointer-cmp", cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kAsanEmscriptenCtorAndDtorPriority
static cl::opt< int > ClInstrumentationWithCallsThreshold("asan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented contains more than " "this number of memory accesses, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(7000))
static cl::opt< int > ClDebugMax("asan-debug-max", cl::desc("Debug max inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClInvalidPointerSub("asan-detect-invalid-pointer-sub", cl::desc("Instrument - operations with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kFreeBSD_ShadowOffset64
static cl::opt< uint32_t > ClForceExperiment("asan-force-experiment", cl::desc("Force optimization experiment (for testing)"), cl::Hidden, cl::init(0))
const char kSanCovGenPrefix[]
static const uint64_t kFreeBSDKasan_ShadowOffset64
const char kAsanModuleDtorName[]
static const uint64_t kDynamicShadowSentinel
static bool isInterestingPointerComparison(Instruction *I)
static cl::opt< bool > ClStack("asan-stack", cl::desc("Handle stack memory"), cl::Hidden, cl::init(true))
static const uint64_t kMIPS64_ShadowOffset64
static const uint64_t kLinuxKasan_ShadowOffset64
static int StackMallocSizeClass(uint64_t LocalStackSize)
static cl::opt< uint32_t > ClMaxInlinePoisoningSize("asan-max-inline-poisoning-size", cl::desc("Inline shadow poisoning for blocks up to the given size in bytes."), cl::Hidden, cl::init(64))
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseAfterScope("asan-use-after-scope", cl::desc("Check stack-use-after-scope"), cl::Hidden, cl::init(false))
constexpr size_t kAccessSizeIndexShift
static cl::opt< int > ClMappingScale("asan-mapping-scale", cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0))
const char kAsanPoisonStackMemoryName[]
static cl::opt< bool > ClEnableKasan("asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClDebugFunc("asan-debug-func", cl::Hidden, cl::desc("Debug func"))
static cl::opt< bool > ClUseGlobalsGC("asan-globals-live-support", cl::desc("Use linker features to support dead " "code stripping of globals"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
const char kAsanUnpoisonStackMemoryName[]
static const uint64_t kLoongArch64_ShadowOffset64
const char kAsanRegisterGlobalsName[]
static cl::opt< bool > ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas", cl::desc("instrument dynamic allocas"), cl::Hidden, cl::init(true))
const char kAsanModuleCtorName[]
const char kAsanGlobalsRegisteredFlagName[]
static const size_t kMaxStackMallocSize
static cl::opt< bool > ClRecover("asan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClOptSameTemp("asan-opt-same-temp", cl::desc("Instrument the same temp just once"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDynamicAllocaStack("asan-stack-dynamic-alloca", cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClOptStack("asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), cl::Hidden, cl::init(false))
static const uint64_t kMIPS_ShadowOffsetN32
const char kAsanUnregisterImageGlobalsName[]
static cl::opt< AsanDetectStackUseAfterReturnMode > ClUseAfterReturn("asan-use-after-return", cl::desc("Sets the mode of detection for stack-use-after-return."), cl::values(clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never", "Never detect stack use after return."), clEnumValN(AsanDetectStackUseAfterReturnMode::Runtime, "runtime", "Detect stack use after return if " "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."), clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always", "Always detect stack use after return.")), cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime))
static cl::opt< bool > ClOptGlobals("asan-opt-globals", cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true))
static const uintptr_t kCurrentStackFrameMagic
static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize, bool IsKasan)
static const uint64_t kPPC64_ShadowOffset64
static cl::opt< AsanCtorKind > ClConstructorKind("asan-constructor-kind", cl::desc("Sets the ASan constructor kind"), cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"), clEnumValN(AsanCtorKind::Global, "global", "Use global constructors")), cl::init(AsanCtorKind::Global), cl::Hidden)
static const int kMaxAsanStackMallocSizeClass
static const uint64_t kMIPS32_ShadowOffset32
static cl::opt< bool > ClAlwaysSlowPath("asan-always-slow-path", cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, cl::init(false))
static const uint64_t kNetBSD_ShadowOffset32
static const uint64_t kFreeBSDAArch64_ShadowOffset64
static const uint64_t kSmallX86_64ShadowOffsetBase
static cl::opt< bool > ClInitializers("asan-initialization-order", cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true))
static const uint64_t kNetBSD_ShadowOffset64
static cl::opt< unsigned > ClRealignStack("asan-realign-stack", cl::desc("Realign stack to the value of this flag (power of two)"), cl::Hidden, cl::init(32))
static const uint64_t kWindowsShadowOffset32
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static size_t TypeStoreSizeToSizeIndex(uint32_t TypeSize)
const char kAsanAllocaPoison[]
constexpr size_t kCompileKernelShift
static cl::opt< bool > ClWithIfunc("asan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
const char kAsanVersionCheckNamePrefix[]
const char kAMDGPUAddressPrivateName[]
static const uint64_t kNetBSDKasan_ShadowOffset64
const char kAMDGPUBallotName[]
const char kAsanRegisterElfGlobalsName[]
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
const char kAsanReportErrorTemplate[]
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
static StringRef getAllocaName(AllocaInst *AI)
static cl::opt< bool > ClSkipPromotableAllocas("asan-skip-promotable-allocas", cl::desc("Do not instrument promotable allocas"), cl::Hidden, cl::init(true))
static cl::opt< int > ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb", cl::init(10000), cl::desc("maximal number of instructions to instrument in any given BB"), cl::Hidden)
static const uintptr_t kRetiredStackFrameMagic
static cl::opt< bool > ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
const char kAsanPoisonGlobalsName[]
const char kAsanHandleNoReturnName[]
static const size_t kMinStackMallocSize
static cl::opt< int > ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, cl::init(0))
const char kAsanAllocasUnpoison[]
static const uint64_t kAArch64_ShadowOffset64
static cl::opt< bool > ClInvalidPointerPairs("asan-detect-invalid-pointer-pair", cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, cl::init(false))
Function Alias Analysis false
This file contains the simple types necessary to represent the attributes associated with functions a...
static bool isPointerOperand(Value *I, User *U)
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
Module.h This file contains the declarations for the Module class.
This defines the Use class.
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
Machine Check Debug Module
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
const SmallVectorImpl< MachineOperand > & Cond
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
uint64_t getZExtValue() const
Get zero extended value.
int64_t getSExtValue() const
Get sign extended value.
LLVM_ABI AddressSanitizerPass(const AddressSanitizerOptions &Options, bool UseGlobalGC=true, bool UseOdrIndicator=true, AsanDtorKind DestructorKind=AsanDtorKind::Global, AsanCtorKind ConstructorKind=AsanCtorKind::Global)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
Class to represent array types.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
LLVM_ABI const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
bool isInlineAsm() const
Check if this call is an inline asm statement.
static LLVM_ABI CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
bool doesNotReturn() const
Determine if the call cannot return.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
@ Largest
The linker will choose the largest COMDAT.
@ SameSize
The data referenced by the COMDAT must be the same size.
@ Any
The linker may choose any COMDAT.
@ NoDeduplicate
No deduplication is performed.
@ ExactMatch
The data referenced by the COMDAT must be the same.
ConstantArray - Constant Array Declarations.
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static LLVM_ABI bool isValueValidForType(Type *Ty, uint64_t V)
This static method returns true if the type Ty is big enough to represent the value V.
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
This is an important base class in LLVM.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI DILocation * get() const
Get the underlying DILocation.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
const BasicBlock & front() const
static Function * createWithDefaultAttr(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Creates a function with some attributes recorded in llvm.module.flags and the LLVMContext applied.
bool hasPersonalityFn() const
Check whether this function has a personality function.
const Constant * getAliasee() const
static LLVM_ABI GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
LLVM_ABI void copyMetadata(const GlobalObject *Src, unsigned Offset)
Copy metadata from Src, adjusting offsets by Offset.
LLVM_ABI void setComdat(Comdat *C)
LLVM_ABI void setSection(StringRef S)
Change the section for this global.
VisibilityTypes getVisibility() const
void setUnnamedAddr(UnnamedAddr Val)
bool hasLocalLinkage() const
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
ThreadLocalMode getThreadLocalMode() const
@ HiddenVisibility
The GV is hidden.
void setVisibility(VisibilityTypes V)
LinkageTypes
An enumeration for the kinds of linkage for global values.
@ PrivateLinkage
Like Internal, but omit from symbol table.
@ CommonLinkage
Tentative definitions.
@ InternalLinkage
Rename collisions when linking (static functions).
@ AvailableExternallyLinkage
Available for inspection, not emission.
@ ExternalWeakLinkage
ExternalWeak linkage description.
DLLStorageClassTypes getDLLStorageClass() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
LLVM_ABI void copyAttributesFrom(const GlobalVariable *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a GlobalVariable) fro...
void setAlignment(Align Align)
Sets the alignment attribute of the GlobalVariable.
Analysis pass providing a never-invalidated alias analysis result.
This instruction compares its operands according to the predicate given to the constructor.
Common base class shared among various IRBuilders.
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
BasicBlock * GetInsertBlock() const
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Type * getVoidTy()
Fetch the type representing void.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
static LLVM_ABI InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Base class for instruction visitors.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
An instruction for reading from memory.
static Error ParseSectionSpecifier(StringRef Spec, StringRef &Segment, StringRef &Section, unsigned &TAA, bool &TAAParsed, unsigned &StubSize)
Parse the section specifier indicated by "Spec".
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
ArrayRef< MDOperand > operands() const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
This is the common base class for memset/memcpy/memmove.
A Module instance is used to store all the information related to an LLVM module.
Evaluate the size and offset of an object pointed to by a Value* statically.
LLVM_ABI SizeOffsetAPInt compute(Value *V)
Pass interface - Implemented by all 'passes'.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
Return a value (possibly void), from a function.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
bool stackAccessIsSafe(const Instruction &I) const
bool isSafe(const AllocaInst &AI) const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr bool empty() const
empty - Check if the string is empty.
Class to represent struct types.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
Triple - Helper class for working with autoconf configuration names.
bool isAndroidVersionLT(unsigned Major) const
bool isThumb() const
Tests whether the target is Thumb (little and big endian).
bool isDriverKit() const
Is this an Apple DriverKit triple.
bool isAndroid() const
Tests whether the target is Android.
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
bool isOSWindows() const
Tests whether the OS is Windows.
bool isARM() const
Tests whether the target is ARM (little and big endian).
bool isOSLinux() const
Tests whether the OS is Linux.
bool isMacOSX() const
Is this a Mac OS X triple.
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
bool isWatchOS() const
Is this an Apple watchOS triple.
bool isiOS() const
Is this an iOS triple.
bool isPS() const
Tests whether the target is the PS4 or PS5 platform.
bool isWasm() const
Tests whether the target is wasm (32- and 64-bit).
bool isOSHaiku() const
Tests whether the OS is Haiku.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getIntegerBitWidth() const
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Base class of all SIMD vector types.
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
This class implements an extremely fast bulk output stream that can only output to a stream.
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void getInterestingMemoryOperands(Module &M, Instruction *I, SmallVectorImpl< InterestingMemoryOperand > &Interesting)
Get all the memory operands from the instruction that needs to be instrumented.
void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, Align Alignment, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, bool Recover, int AsanScale, int AsanOffset)
Instrument the memory operand Addr.
uint64_t getRedzoneSizeForGlobal(int AsanScale, uint64_t SizeInBytes)
Given SizeInBytes of the Value to be instrunmented, Returns the redzone size corresponding to it.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
@ S_CSTRING_LITERALS
S_CSTRING_LITERALS - Section with literal C strings.
@ OB
OB - OneByte - Set if this instruction has a one byte opcode.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
Context & getContext() const
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
FunctionAddr VTableAddr Value
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI SmallVector< uint8_t, 64 > GetShadowBytesAfterScope(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
LLVM_ABI GlobalVariable * createPrivateGlobalForString(Module &M, StringRef Str, bool AllowMerging, Twine NamePrefix="")
LLVM_ABI AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
LLVM_ABI Function * createSanitizerCtor(Module &M, StringRef CtorName)
Creates sanitizer constructor function.
AsanDetectStackUseAfterReturnMode
Mode of ASan detect stack use after return.
@ Always
Always detect stack use after return.
@ Never
Never detect stack use after return.
@ Runtime
Detect stack use after return if not disabled runtime with (ASAN_OPTIONS=detect_stack_use_after_retur...
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
LLVM_ABI bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
LLVM_ABI SmallString< 64 > ComputeASanStackFrameDescription(const SmallVectorImpl< ASanStackVariableDescription > &Vars)
LLVM_ABI SmallVector< uint8_t, 64 > GetShadowBytes(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
auto dyn_cast_or_null(const Y &Val)
LLVM_ABI FunctionCallee declareSanitizerInitFunction(Module &M, StringRef InitName, ArrayRef< Type * > InitArgTypes, bool Weak=false)
FunctionAddr VTableAddr uintptr_t uintptr_t Version
LLVM_ABI std::string getUniqueModuleId(Module *M)
Produce a unique identifier for this module by taking the MD5 sum of the names of the module's strong...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
LLVM_ABI std::pair< Function *, FunctionCallee > createSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function, and calls sanitizer's init function from it.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void SplitBlockAndInsertIfThenElse(Value *Cond, BasicBlock::iterator SplitBefore, Instruction **ThenTerm, Instruction **ElseTerm, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr)
SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen, but also creates the ElseBlock...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
bool isAlnum(char C)
Checks whether character C is either a decimal digit or an uppercase or lowercase letter as classifie...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
AsanDtorKind
Types of ASan module destructors supported.
@ Invalid
Not a valid destructor Kind.
@ Global
Append to llvm.global_dtors.
@ None
Do not emit any destructors for ASan.
LLVM_ABI ASanStackFrameLayout ComputeASanStackFrameLayout(SmallVectorImpl< ASanStackVariableDescription > &Vars, uint64_t Granularity, uint64_t MinHeaderSize)
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
OperandBundleDefT< Value * > OperandBundleDef
LLVM_ABI void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
static const int kAsanStackUseAfterReturnMagic
LLVM_ABI void setGlobalVariableLargeSection(const Triple &TargetTriple, GlobalVariable &GV)
void removeASanIncompatibleFnAttributes(Function &F, bool ReadsArgMem)
Remove memory attributes that are incompatible with the instrumentation added by AddressSanitizer and...
DWARFExpression::Operation Op
@ Dynamic
Denotes mode unknown at compile time.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
TinyPtrVector< BasicBlock * > ColorVector
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
iterator_range< df_iterator< T > > depth_first(const T &G)
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AsanCtorKind
Types of ASan module constructors supported.
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
LLVM_ABI void appendToUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.used list.
LLVM_ABI void appendToGlobalDtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Same as appendToGlobalCtors(), but for global dtors.
LLVM_ABI bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, int *MappingScale, bool *OrShadowOffset)
DEMANGLE_ABI std::string demangle(std::string_view MangledName)
Attempt to demangle a string using different demangling schemes.
std::string itostr(int64_t X)
LLVM_ABI void SplitBlockAndInsertForEachLane(ElementCount EC, Type *IndexTy, BasicBlock::iterator InsertBefore, std::function< void(IRBuilderBase &, Value *)> Func)
Utility function for performing a given action on each lane of a vector with EC elements.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
LLVM_ABI bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces dbg.declare record when the address it describes is replaced with a new value.
const uint8_t AccessSizeIndex
LLVM_ABI ASanAccessInfo(int32_t Packed)
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Information about a load/store intrinsic defined by the target.
SmallVector< InterestingMemoryOperand, 1 > InterestingOperands
A CRTP mix-in to automatically provide informational APIs needed for passes.
SizeOffsetAPInt - Used by ObjectSizeOffsetVisitor, which works with APInts.