184#include "llvm/IR/IntrinsicsAArch64.h"
185#include "llvm/IR/IntrinsicsX86.h"
216#define DEBUG_TYPE "msan"
219 "Controls which checks to insert");
222 "Controls which instruction to instrument");
240 "msan-track-origins",
245 cl::desc(
"keep going after reporting a UMR"),
254 "msan-poison-stack-with-call",
259 "msan-poison-stack-pattern",
260 cl::desc(
"poison uninitialized stack variables with the given pattern"),
265 cl::desc(
"Print name of local stack variable"),
270 cl::desc(
"Poison fully undef temporary values. "
271 "Partially undefined constant vectors "
272 "are unaffected by this flag (see "
273 "-msan-poison-undef-vectors)."),
277 "msan-poison-undef-vectors",
278 cl::desc(
"Precisely poison partially undefined constant vectors. "
279 "If false (legacy behavior), the entire vector is "
280 "considered fully initialized, which may lead to false "
281 "negatives. Fully undefined constant vectors are "
282 "unaffected by this flag (see -msan-poison-undef)."),
286 "msan-precise-disjoint-or",
287 cl::desc(
"Precisely poison disjoint OR. If false (legacy behavior), "
288 "disjointedness is ignored (i.e., 1|1 is initialized)."),
293 cl::desc(
"propagate shadow through ICmpEQ and ICmpNE"),
298 cl::desc(
"exact handling of relational integer ICmp"),
302 "msan-handle-lifetime-intrinsics",
304 "when possible, poison scoped variables at the beginning of the scope "
305 "(slower, but more precise)"),
316 "msan-handle-asm-conservative",
327 "msan-check-access-address",
328 cl::desc(
"report accesses through a pointer which has poisoned shadow"),
333 cl::desc(
"check arguments and return values at function call boundaries"),
337 "msan-dump-strict-instructions",
338 cl::desc(
"print out instructions with default strict semantics i.e.,"
339 "check that all the inputs are fully initialized, and mark "
340 "the output as fully initialized. These semantics are applied "
341 "to instructions that could not be handled explicitly nor "
350 "msan-dump-heuristic-instructions",
351 cl::desc(
"Prints 'unknown' instructions that were handled heuristically. "
352 "Use -msan-dump-strict-instructions to print instructions that "
353 "could not be handled explicitly nor heuristically."),
357 "msan-instrumentation-with-call-threshold",
359 "If the function being instrumented requires more than "
360 "this number of checks and origin stores, use callbacks instead of "
361 "inline checks (-1 means never use callbacks)."),
366 cl::desc(
"Enable KernelMemorySanitizer instrumentation"),
376 cl::desc(
"Insert checks for constant shadow values"),
383 cl::desc(
"Place MSan constructors in comdat sections"),
389 cl::desc(
"Define custom MSan AndMask"),
393 cl::desc(
"Define custom MSan XorMask"),
397 cl::desc(
"Define custom MSan ShadowBase"),
401 cl::desc(
"Define custom MSan OriginBase"),
406 cl::desc(
"Define threshold for number of checks per "
407 "debug location to force origin update."),
419struct MemoryMapParams {
426struct PlatformMemoryMapParams {
427 const MemoryMapParams *bits32;
428 const MemoryMapParams *bits64;
590class MemorySanitizer {
599 MemorySanitizer(MemorySanitizer &&) =
delete;
600 MemorySanitizer &operator=(MemorySanitizer &&) =
delete;
601 MemorySanitizer(
const MemorySanitizer &) =
delete;
602 MemorySanitizer &operator=(
const MemorySanitizer &) =
delete;
604 bool sanitizeFunction(Function &
F, TargetLibraryInfo &TLI);
607 friend struct MemorySanitizerVisitor;
608 friend struct VarArgHelperBase;
609 friend struct VarArgAMD64Helper;
610 friend struct VarArgAArch64Helper;
611 friend struct VarArgPowerPC64Helper;
612 friend struct VarArgPowerPC32Helper;
613 friend struct VarArgSystemZHelper;
614 friend struct VarArgI386Helper;
615 friend struct VarArgGenericHelper;
617 void initializeModule(
Module &M);
618 void initializeCallbacks(
Module &M,
const TargetLibraryInfo &TLI);
619 void createKernelApi(
Module &M,
const TargetLibraryInfo &TLI);
620 void createUserspaceApi(
Module &M,
const TargetLibraryInfo &TLI);
622 template <
typename... ArgsTy>
623 FunctionCallee getOrInsertMsanMetadataFunction(
Module &M, StringRef Name,
649 Value *ParamOriginTLS;
655 Value *RetvalOriginTLS;
661 Value *VAArgOriginTLS;
664 Value *VAArgOverflowSizeTLS;
667 bool CallbacksInitialized =
false;
670 FunctionCallee WarningFn;
674 FunctionCallee MaybeWarningVarSizeFn;
679 FunctionCallee MsanSetAllocaOriginWithDescriptionFn;
681 FunctionCallee MsanSetAllocaOriginNoDescriptionFn;
684 FunctionCallee MsanPoisonStackFn;
688 FunctionCallee MsanChainOriginFn;
691 FunctionCallee MsanSetOriginFn;
694 FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
697 StructType *MsanContextStateTy;
698 FunctionCallee MsanGetContextStateFn;
701 FunctionCallee MsanPoisonAllocaFn, MsanUnpoisonAllocaFn;
707 FunctionCallee MsanMetadataPtrForLoadN, MsanMetadataPtrForStoreN;
708 FunctionCallee MsanMetadataPtrForLoad_1_8[4];
709 FunctionCallee MsanMetadataPtrForStore_1_8[4];
710 FunctionCallee MsanInstrumentAsmStoreFn;
713 Value *MsanMetadataAlloca;
716 FunctionCallee getKmsanShadowOriginAccessFn(
bool isStore,
int size);
719 const MemoryMapParams *MapParams;
723 MemoryMapParams CustomMapParams;
725 MDNode *ColdCallWeights;
728 MDNode *OriginStoreWeights;
731void insertModuleCtor(
Module &M) {
768 if (!Options.Kernel) {
777 MemorySanitizer Msan(*
F.getParent(), Options);
796 OS, MapClassName2PassName);
802 if (Options.EagerChecks)
803 OS <<
"eager-checks;";
804 OS <<
"track-origins=" << Options.TrackOrigins;
820template <
typename... ArgsTy>
822MemorySanitizer::getOrInsertMsanMetadataFunction(
Module &M,
StringRef Name,
827 std::forward<ArgsTy>(Args)...);
830 return M.getOrInsertFunction(Name, MsanMetadata,
831 std::forward<ArgsTy>(Args)...);
840 RetvalOriginTLS =
nullptr;
842 ParamOriginTLS =
nullptr;
844 VAArgOriginTLS =
nullptr;
845 VAArgOverflowSizeTLS =
nullptr;
847 WarningFn =
M.getOrInsertFunction(
"__msan_warning",
849 IRB.getVoidTy(), IRB.getInt32Ty());
860 MsanGetContextStateFn =
861 M.getOrInsertFunction(
"__msan_get_context_state", PtrTy);
865 for (
int ind = 0,
size = 1; ind < 4; ind++,
size <<= 1) {
866 std::string name_load =
867 "__msan_metadata_ptr_for_load_" + std::to_string(
size);
868 std::string name_store =
869 "__msan_metadata_ptr_for_store_" + std::to_string(
size);
870 MsanMetadataPtrForLoad_1_8[ind] =
871 getOrInsertMsanMetadataFunction(M, name_load, PtrTy);
872 MsanMetadataPtrForStore_1_8[ind] =
873 getOrInsertMsanMetadataFunction(M, name_store, PtrTy);
876 MsanMetadataPtrForLoadN = getOrInsertMsanMetadataFunction(
877 M,
"__msan_metadata_ptr_for_load_n", PtrTy, IntptrTy);
878 MsanMetadataPtrForStoreN = getOrInsertMsanMetadataFunction(
879 M,
"__msan_metadata_ptr_for_store_n", PtrTy, IntptrTy);
882 MsanPoisonAllocaFn =
M.getOrInsertFunction(
883 "__msan_poison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
884 MsanUnpoisonAllocaFn =
M.getOrInsertFunction(
885 "__msan_unpoison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy);
889 return M.getOrInsertGlobal(Name, Ty, [&] {
891 nullptr, Name,
nullptr,
897void MemorySanitizer::createUserspaceApi(
Module &M,
905 StringRef WarningFnName = Recover ?
"__msan_warning_with_origin"
906 :
"__msan_warning_with_origin_noreturn";
907 WarningFn =
M.getOrInsertFunction(WarningFnName,
909 IRB.getVoidTy(), IRB.getInt32Ty());
912 Recover ?
"__msan_warning" :
"__msan_warning_noreturn";
913 WarningFn =
M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
940 IRB.getIntPtrTy(
M.getDataLayout()));
944 unsigned AccessSize = 1 << AccessSizeIndex;
945 std::string FunctionName =
"__msan_maybe_warning_" +
itostr(AccessSize);
946 MaybeWarningFn[AccessSizeIndex] =
M.getOrInsertFunction(
948 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt32Ty());
949 MaybeWarningVarSizeFn =
M.getOrInsertFunction(
950 "__msan_maybe_warning_N", TLI.
getAttrList(
C, {},
false),
951 IRB.getVoidTy(), PtrTy, IRB.getInt64Ty(), IRB.getInt32Ty());
952 FunctionName =
"__msan_maybe_store_origin_" +
itostr(AccessSize);
953 MaybeStoreOriginFn[AccessSizeIndex] =
M.getOrInsertFunction(
955 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), PtrTy,
959 MsanSetAllocaOriginWithDescriptionFn =
960 M.getOrInsertFunction(
"__msan_set_alloca_origin_with_descr",
961 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy, PtrTy);
962 MsanSetAllocaOriginNoDescriptionFn =
963 M.getOrInsertFunction(
"__msan_set_alloca_origin_no_descr",
964 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
965 MsanPoisonStackFn =
M.getOrInsertFunction(
"__msan_poison_stack",
966 IRB.getVoidTy(), PtrTy, IntptrTy);
970void MemorySanitizer::initializeCallbacks(
Module &M,
973 if (CallbacksInitialized)
979 MsanChainOriginFn =
M.getOrInsertFunction(
980 "__msan_chain_origin",
983 MsanSetOriginFn =
M.getOrInsertFunction(
985 IRB.getVoidTy(), PtrTy, IntptrTy, IRB.getInt32Ty());
987 M.getOrInsertFunction(
"__msan_memmove", PtrTy, PtrTy, PtrTy, IntptrTy);
989 M.getOrInsertFunction(
"__msan_memcpy", PtrTy, PtrTy, PtrTy, IntptrTy);
990 MemsetFn =
M.getOrInsertFunction(
"__msan_memset",
992 PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
994 MsanInstrumentAsmStoreFn =
M.getOrInsertFunction(
995 "__msan_instrument_asm_store", IRB.getVoidTy(), PtrTy, IntptrTy);
998 createKernelApi(M, TLI);
1000 createUserspaceApi(M, TLI);
1002 CallbacksInitialized =
true;
1008 isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8;
1026void MemorySanitizer::initializeModule(
Module &M) {
1027 auto &
DL =
M.getDataLayout();
1029 TargetTriple =
M.getTargetTriple();
1031 bool ShadowPassed =
ClShadowBase.getNumOccurrences() > 0;
1032 bool OriginPassed =
ClOriginBase.getNumOccurrences() > 0;
1034 if (ShadowPassed || OriginPassed) {
1039 MapParams = &CustomMapParams;
1041 switch (TargetTriple.getOS()) {
1043 switch (TargetTriple.getArch()) {
1058 switch (TargetTriple.getArch()) {
1067 switch (TargetTriple.getArch()) {
1101 C = &(
M.getContext());
1103 IntptrTy = IRB.getIntPtrTy(
DL);
1104 OriginTy = IRB.getInt32Ty();
1105 PtrTy = IRB.getPtrTy();
1110 if (!CompileKernel) {
1112 M.getOrInsertGlobal(
"__msan_track_origins", IRB.getInt32Ty(), [&] {
1113 return new GlobalVariable(
1114 M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
1115 IRB.getInt32(TrackOrigins),
"__msan_track_origins");
1119 M.getOrInsertGlobal(
"__msan_keep_going", IRB.getInt32Ty(), [&] {
1120 return new GlobalVariable(M, IRB.getInt32Ty(), true,
1121 GlobalValue::WeakODRLinkage,
1122 IRB.getInt32(Recover),
"__msan_keep_going");
1137struct VarArgHelper {
1138 virtual ~VarArgHelper() =
default;
1141 virtual void visitCallBase(CallBase &CB,
IRBuilder<> &IRB) = 0;
1144 virtual void visitVAStartInst(VAStartInst &
I) = 0;
1147 virtual void visitVACopyInst(VACopyInst &
I) = 0;
1153 virtual void finalizeInstrumentation() = 0;
1156struct MemorySanitizerVisitor;
1161 MemorySanitizerVisitor &Visitor);
1168 if (TypeSizeFixed <= 8)
1177class NextNodeIRBuilder :
public IRBuilder<> {
1190struct MemorySanitizerVisitor :
public InstVisitor<MemorySanitizerVisitor> {
1192 MemorySanitizer &MS;
1194 ValueMap<Value *, Value *> ShadowMap, OriginMap;
1195 std::unique_ptr<VarArgHelper> VAHelper;
1196 const TargetLibraryInfo *TLI;
1203 bool PropagateShadow;
1206 bool PoisonUndefVectors;
1208 struct ShadowOriginAndInsertPoint {
1213 ShadowOriginAndInsertPoint(
Value *S,
Value *O, Instruction *
I)
1214 : Shadow(S), Origin(
O), OrigIns(
I) {}
1217 DenseMap<const DILocation *, int> LazyWarningDebugLocationCount;
1218 SmallSetVector<AllocaInst *, 16> AllocaSet;
1221 int64_t SplittableBlocksCount = 0;
1223 MemorySanitizerVisitor(Function &
F, MemorySanitizer &MS,
1224 const TargetLibraryInfo &TLI)
1226 bool SanitizeFunction =
1228 InsertChecks = SanitizeFunction;
1229 PropagateShadow = SanitizeFunction;
1240 MS.initializeCallbacks(*
F.getParent(), TLI);
1242 IRBuilder<>(&
F.getEntryBlock(),
F.getEntryBlock().getFirstNonPHIIt())
1243 .CreateIntrinsic(Intrinsic::donothing, {});
1245 if (MS.CompileKernel) {
1247 insertKmsanPrologue(IRB);
1251 <<
"MemorySanitizer is not inserting checks into '"
1252 <<
F.getName() <<
"'\n");
1255 bool instrumentWithCalls(
Value *V) {
1259 ++SplittableBlocksCount;
1264 bool isInPrologue(Instruction &
I) {
1265 return I.getParent() == FnPrologueEnd->
getParent() &&
1274 if (MS.TrackOrigins <= 1)
1276 return IRB.
CreateCall(MS.MsanChainOriginFn, V);
1280 const DataLayout &
DL =
F.getDataLayout();
1281 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1291 TypeSize TS, Align Alignment) {
1292 const DataLayout &
DL =
F.getDataLayout();
1293 const Align IntptrAlignment =
DL.getABITypeAlign(MS.IntptrTy);
1294 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1306 auto [InsertPt,
Index] =
1318 Align CurrentAlignment = Alignment;
1319 if (Alignment >= IntptrAlignment && IntptrSize >
kOriginSize) {
1320 Value *IntptrOrigin = originToIntptr(IRB, Origin);
1322 for (
unsigned i = 0; i <
Size / IntptrSize; ++i) {
1327 CurrentAlignment = IntptrAlignment;
1340 Value *OriginPtr, Align Alignment) {
1341 const DataLayout &
DL =
F.getDataLayout();
1343 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
1345 Value *ConvertedShadow = convertShadowToScalar(Shadow, IRB);
1354 paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1361 TypeSize TypeSizeInBits =
DL.getTypeSizeInBits(ConvertedShadow->
getType());
1363 if (instrumentWithCalls(ConvertedShadow) &&
1365 FunctionCallee Fn = MS.MaybeStoreOriginFn[SizeIndex];
1366 Value *ConvertedShadow2 =
1368 CallBase *CB = IRB.
CreateCall(Fn, {ConvertedShadow2, Addr, Origin});
1372 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1376 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize,
1381 void materializeStores() {
1382 for (StoreInst *SI : StoreList) {
1384 Value *Val =
SI->getValueOperand();
1385 Value *Addr =
SI->getPointerOperand();
1386 Value *Shadow =
SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
1387 Value *ShadowPtr, *OriginPtr;
1389 const Align Alignment =
SI->getAlign();
1391 std::tie(ShadowPtr, OriginPtr) =
1392 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
true);
1394 [[maybe_unused]] StoreInst *NewSI =
1401 if (MS.TrackOrigins && !
SI->isAtomic())
1402 storeOrigin(IRB, Addr, Shadow, getOrigin(Val), OriginPtr,
1409 if (MS.TrackOrigins < 2)
1412 if (LazyWarningDebugLocationCount.
empty())
1413 for (
const auto &
I : InstrumentationList)
1414 ++LazyWarningDebugLocationCount[
I.OrigIns->getDebugLoc()];
1430 auto NewDebugLoc = OI->getDebugLoc();
1437 IRBOrigin.SetCurrentDebugLocation(NewDebugLoc);
1438 Origin = updateOrigin(Origin, IRBOrigin);
1443 if (MS.CompileKernel || MS.TrackOrigins)
1454 const DataLayout &
DL =
F.getDataLayout();
1455 TypeSize TypeSizeInBits =
DL.getTypeSizeInBits(ConvertedShadow->
getType());
1457 if (instrumentWithCalls(ConvertedShadow) && !MS.CompileKernel) {
1459 ConvertedShadow = convertShadowToScalar(ConvertedShadow, IRB);
1460 Value *ConvertedShadow2 =
1464 FunctionCallee Fn = MS.MaybeWarningFn[SizeIndex];
1468 MS.TrackOrigins && Origin ? Origin : (
Value *)IRB.
getInt32(0)});
1472 FunctionCallee Fn = MS.MaybeWarningVarSizeFn;
1475 unsigned ShadowSize =
DL.getTypeAllocSize(ConvertedShadow2->
getType());
1478 {ShadowAlloca, ConstantInt::get(IRB.
getInt64Ty(), ShadowSize),
1479 MS.TrackOrigins && Origin ? Origin : (
Value *)IRB.
getInt32(0)});
1484 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1487 !MS.Recover, MS.ColdCallWeights);
1490 insertWarningFn(IRB, Origin);
1495 void materializeInstructionChecks(
1497 const DataLayout &
DL =
F.getDataLayout();
1500 bool Combine = !MS.TrackOrigins;
1502 Value *Shadow =
nullptr;
1503 for (
const auto &ShadowData : InstructionChecks) {
1504 assert(ShadowData.OrigIns == Instruction);
1507 Value *ConvertedShadow = ShadowData.Shadow;
1516 insertWarningFn(IRB, ShadowData.Origin);
1526 materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin);
1531 Shadow = ConvertedShadow;
1535 Shadow = convertToBool(Shadow, IRB,
"_mscmp");
1536 ConvertedShadow = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1537 Shadow = IRB.
CreateOr(Shadow, ConvertedShadow,
"_msor");
1543 materializeOneCheck(IRB, Shadow,
nullptr);
1547 void materializeChecks() {
1550 SmallPtrSet<Instruction *, 16>
Done;
1553 for (
auto I = InstrumentationList.begin();
1554 I != InstrumentationList.end();) {
1555 auto OrigIns =
I->OrigIns;
1559 auto J = std::find_if(
I + 1, InstrumentationList.end(),
1560 [OrigIns](
const ShadowOriginAndInsertPoint &R) {
1561 return OrigIns != R.OrigIns;
1575 MS.ParamTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1576 {Zero, IRB.getInt32(0)},
"param_shadow");
1577 MS.RetvalTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1578 {Zero, IRB.getInt32(1)},
"retval_shadow");
1579 MS.VAArgTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1580 {Zero, IRB.getInt32(2)},
"va_arg_shadow");
1581 MS.VAArgOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1582 {Zero, IRB.getInt32(3)},
"va_arg_origin");
1583 MS.VAArgOverflowSizeTLS =
1584 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1585 {Zero, IRB.getInt32(4)},
"va_arg_overflow_size");
1586 MS.ParamOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1587 {Zero, IRB.getInt32(5)},
"param_origin");
1588 MS.RetvalOriginTLS =
1589 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1590 {Zero, IRB.getInt32(6)},
"retval_origin");
1592 MS.MsanMetadataAlloca = IRB.
CreateAlloca(MS.MsanMetadata, 0u);
1605 for (Instruction *
I : Instructions)
1609 for (PHINode *PN : ShadowPHINodes) {
1611 PHINode *PNO = MS.TrackOrigins ?
cast<PHINode>(getOrigin(PN)) : nullptr;
1612 size_t NumValues = PN->getNumIncomingValues();
1613 for (
size_t v = 0;
v < NumValues;
v++) {
1614 PNS->
addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
1616 PNO->
addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
1620 VAHelper->finalizeInstrumentation();
1625 for (
auto Item : LifetimeStartList) {
1626 instrumentAlloca(*Item.second, Item.first);
1627 AllocaSet.
remove(Item.second);
1632 for (AllocaInst *AI : AllocaSet)
1633 instrumentAlloca(*AI);
1636 materializeChecks();
1640 materializeStores();
1646 Type *getShadowTy(
Value *V) {
return getShadowTy(
V->getType()); }
1657 const DataLayout &
DL =
F.getDataLayout();
1659 uint32_t EltSize =
DL.getTypeSizeInBits(VT->getElementType());
1661 VT->getElementCount());
1664 return ArrayType::get(getShadowTy(AT->getElementType()),
1665 AT->getNumElements());
1669 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1670 Elements.push_back(getShadowTy(
ST->getElementType(i)));
1672 LLVM_DEBUG(
dbgs() <<
"getShadowTy: " << *ST <<
" ===> " << *Res <<
"\n");
1675 uint32_t TypeSize =
DL.getTypeSizeInBits(OrigTy);
1685 for (
unsigned Idx = 0; Idx <
Struct->getNumElements(); Idx++) {
1688 Value *ShadowBool = convertToBool(ShadowItem, IRB);
1690 if (Aggregator != FalseVal)
1691 Aggregator = IRB.
CreateOr(Aggregator, ShadowBool);
1693 Aggregator = ShadowBool;
1700 Value *collapseArrayShadow(ArrayType *Array,
Value *Shadow,
1702 if (!
Array->getNumElements())
1706 Value *Aggregator = convertShadowToScalar(FirstItem, IRB);
1708 for (
unsigned Idx = 1; Idx <
Array->getNumElements(); Idx++) {
1710 Value *ShadowInner = convertShadowToScalar(ShadowItem, IRB);
1711 Aggregator = IRB.
CreateOr(Aggregator, ShadowInner);
1721 return collapseStructShadow(
Struct, V, IRB);
1723 return collapseArrayShadow(Array, V, IRB);
1728 V->getType()->getPrimitiveSizeInBits().getFixedValue();
1736 Type *VTy =
V->getType();
1738 return convertToBool(convertShadowToScalar(V, IRB), IRB,
name);
1745 Type *ptrToIntPtrType(
Type *PtrTy)
const {
1747 return VectorType::get(ptrToIntPtrType(VectTy->getElementType()),
1748 VectTy->getElementCount());
1754 Type *getPtrToShadowPtrType(
Type *IntPtrTy,
Type *ShadowTy)
const {
1756 return VectorType::get(
1757 getPtrToShadowPtrType(VectTy->getElementType(), ShadowTy),
1758 VectTy->getElementCount());
1760 assert(IntPtrTy == MS.IntptrTy);
1767 VectTy->getElementCount(),
1768 constToIntPtr(VectTy->getElementType(),
C));
1770 assert(IntPtrTy == MS.IntptrTy);
1771 return ConstantInt::get(MS.IntptrTy,
C);
1784 Type *IntptrTy = ptrToIntPtrType(Addr->
getType());
1787 if (uint64_t AndMask = MS.MapParams->AndMask)
1788 OffsetLong = IRB.
CreateAnd(OffsetLong, constToIntPtr(IntptrTy, ~AndMask));
1790 if (uint64_t XorMask = MS.MapParams->XorMask)
1791 OffsetLong = IRB.
CreateXor(OffsetLong, constToIntPtr(IntptrTy, XorMask));
1803 std::pair<Value *, Value *>
1805 MaybeAlign Alignment) {
1810 assert(VectTy->getElementType()->isPointerTy());
1812 Type *IntptrTy = ptrToIntPtrType(Addr->
getType());
1813 Value *ShadowOffset = getShadowPtrOffset(Addr, IRB);
1814 Value *ShadowLong = ShadowOffset;
1815 if (uint64_t ShadowBase = MS.MapParams->ShadowBase) {
1817 IRB.
CreateAdd(ShadowLong, constToIntPtr(IntptrTy, ShadowBase));
1820 ShadowLong, getPtrToShadowPtrType(IntptrTy, ShadowTy));
1822 Value *OriginPtr =
nullptr;
1823 if (MS.TrackOrigins) {
1824 Value *OriginLong = ShadowOffset;
1825 uint64_t OriginBase = MS.MapParams->OriginBase;
1826 if (OriginBase != 0)
1828 IRB.
CreateAdd(OriginLong, constToIntPtr(IntptrTy, OriginBase));
1831 OriginLong = IRB.
CreateAnd(OriginLong, constToIntPtr(IntptrTy, ~Mask));
1834 OriginLong, getPtrToShadowPtrType(IntptrTy, MS.OriginTy));
1836 return std::make_pair(ShadowPtr, OriginPtr);
1839 template <
typename... ArgsTy>
1844 {MS.MsanMetadataAlloca, std::forward<ArgsTy>(Args)...});
1845 return IRB.
CreateLoad(MS.MsanMetadata, MS.MsanMetadataAlloca);
1848 return IRB.
CreateCall(Callee, {std::forward<ArgsTy>(Args)...});
1851 std::pair<Value *, Value *> getShadowOriginPtrKernelNoVec(
Value *Addr,
1855 Value *ShadowOriginPtrs;
1856 const DataLayout &
DL =
F.getDataLayout();
1857 TypeSize
Size =
DL.getTypeStoreSize(ShadowTy);
1859 FunctionCallee Getter = MS.getKmsanShadowOriginAccessFn(
isStore,
Size);
1862 ShadowOriginPtrs = createMetadataCall(IRB, Getter, AddrCast);
1864 Value *SizeVal = ConstantInt::get(MS.IntptrTy,
Size);
1865 ShadowOriginPtrs = createMetadataCall(
1867 isStore ? MS.MsanMetadataPtrForStoreN : MS.MsanMetadataPtrForLoadN,
1874 return std::make_pair(ShadowPtr, OriginPtr);
1880 std::pair<Value *, Value *> getShadowOriginPtrKernel(
Value *Addr,
1887 return getShadowOriginPtrKernelNoVec(Addr, IRB, ShadowTy,
isStore);
1892 Value *ShadowPtrs = ConstantInt::getNullValue(
1894 Value *OriginPtrs =
nullptr;
1895 if (MS.TrackOrigins)
1896 OriginPtrs = ConstantInt::getNullValue(
1898 for (
unsigned i = 0; i < NumElements; ++i) {
1901 auto [ShadowPtr, OriginPtr] =
1902 getShadowOriginPtrKernelNoVec(OneAddr, IRB, ShadowTy,
isStore);
1905 ShadowPtrs, ShadowPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1906 if (MS.TrackOrigins)
1908 OriginPtrs, OriginPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1910 return {ShadowPtrs, OriginPtrs};
1913 std::pair<Value *, Value *> getShadowOriginPtr(
Value *Addr,
IRBuilder<> &IRB,
1915 MaybeAlign Alignment,
1917 if (MS.CompileKernel)
1918 return getShadowOriginPtrKernel(Addr, IRB, ShadowTy,
isStore);
1919 return getShadowOriginPtrUserspace(Addr, IRB, ShadowTy, Alignment);
1934 if (!MS.TrackOrigins)
1948 Value *getOriginPtrForRetval() {
1950 return MS.RetvalOriginTLS;
1955 assert(!ShadowMap.
count(V) &&
"Values may only have one shadow");
1956 ShadowMap[
V] = PropagateShadow ? SV : getCleanShadow(V);
1961 if (!MS.TrackOrigins)
1963 assert(!OriginMap.
count(V) &&
"Values may only have one origin");
1964 LLVM_DEBUG(
dbgs() <<
"ORIGIN: " << *V <<
" ==> " << *Origin <<
"\n");
1965 OriginMap[
V] = Origin;
1969 Type *ShadowTy = getShadowTy(OrigTy);
1979 Constant *getCleanShadow(
Value *V) {
return getCleanShadow(
V->getType()); }
1988 getPoisonedShadow(AT->getElementType()));
1993 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1994 Vals.
push_back(getPoisonedShadow(
ST->getElementType(i)));
2002 Type *ShadowTy = getShadowTy(V);
2005 return getPoisonedShadow(ShadowTy);
2017 if (!PropagateShadow ||
I->getMetadata(LLVMContext::MD_nosanitize))
2018 return getCleanShadow(V);
2020 Value *Shadow = ShadowMap[
V];
2022 LLVM_DEBUG(
dbgs() <<
"No shadow: " << *V <<
"\n" << *(
I->getParent()));
2023 assert(Shadow &&
"No shadow for a value");
2030 Value *
AllOnes = (PropagateShadow && PoisonUndef) ? getPoisonedShadow(V)
2031 : getCleanShadow(V);
2037 Value *&ShadowPtr = ShadowMap[
V];
2042 unsigned ArgOffset = 0;
2043 const DataLayout &
DL =
F->getDataLayout();
2044 for (
auto &FArg :
F->args()) {
2045 if (!FArg.getType()->isSized() || FArg.getType()->isScalableTy()) {
2047 ?
"vscale not fully supported\n"
2048 :
"Arg is not sized\n"));
2050 ShadowPtr = getCleanShadow(V);
2051 setOrigin(
A, getCleanOrigin());
2057 unsigned Size = FArg.hasByValAttr()
2058 ?
DL.getTypeAllocSize(FArg.getParamByValType())
2059 :
DL.getTypeAllocSize(FArg.getType());
2063 if (FArg.hasByValAttr()) {
2067 const Align ArgAlign =
DL.getValueOrABITypeAlignment(
2068 FArg.getParamAlign(), FArg.getParamByValType());
2069 Value *CpShadowPtr, *CpOriginPtr;
2070 std::tie(CpShadowPtr, CpOriginPtr) =
2071 getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
2073 if (!PropagateShadow || Overflow) {
2075 EntryIRB.CreateMemSet(
2079 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2081 [[maybe_unused]]
Value *Cpy = EntryIRB.CreateMemCpy(
2082 CpShadowPtr, CopyAlign,
Base, CopyAlign,
Size);
2085 if (MS.TrackOrigins) {
2086 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2090 EntryIRB.CreateMemCpy(
2099 if (!PropagateShadow || Overflow || FArg.hasByValAttr() ||
2100 (MS.EagerChecks && FArg.hasAttribute(Attribute::NoUndef))) {
2101 ShadowPtr = getCleanShadow(V);
2102 setOrigin(
A, getCleanOrigin());
2105 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2106 ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg),
Base,
2108 if (MS.TrackOrigins) {
2109 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2110 setOrigin(
A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
2114 <<
" ARG: " << FArg <<
" ==> " << *ShadowPtr <<
"\n");
2120 assert(ShadowPtr &&
"Could not find shadow for an argument");
2127 cast<Constant>(V)->containsUndefOrPoisonElement() && PropagateShadow &&
2128 PoisonUndefVectors) {
2131 for (
unsigned i = 0; i != NumElems; ++i) {
2134 : getCleanShadow(Elem);
2138 LLVM_DEBUG(
dbgs() <<
"Partial undef constant vector: " << *V <<
" ==> "
2139 << *ShadowConstant <<
"\n");
2141 return ShadowConstant;
2147 return getCleanShadow(V);
2151 Value *getShadow(Instruction *
I,
int i) {
2152 return getShadow(
I->getOperand(i));
2157 if (!MS.TrackOrigins)
2160 return getCleanOrigin();
2162 "Unexpected value type in getOrigin()");
2164 if (
I->getMetadata(LLVMContext::MD_nosanitize))
2165 return getCleanOrigin();
2167 Value *Origin = OriginMap[
V];
2168 assert(Origin &&
"Missing origin");
2173 Value *getOrigin(Instruction *
I,
int i) {
2174 return getOrigin(
I->getOperand(i));
2181 void insertCheckShadow(
Value *Shadow,
Value *Origin, Instruction *OrigIns) {
2187 LLVM_DEBUG(
dbgs() <<
"Skipping check of " << *Shadow <<
" before "
2188 << *OrigIns <<
"\n");
2195 "Can only insert checks for integer, vector, and aggregate shadow "
2198 InstrumentationList.push_back(
2199 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
2207 void insertCheckShadowOf(
Value *Val, Instruction *OrigIns) {
2209 Value *Shadow, *Origin;
2211 Shadow = getShadow(Val);
2214 Origin = getOrigin(Val);
2221 insertCheckShadow(Shadow, Origin, OrigIns);
2226 case AtomicOrdering::NotAtomic:
2227 return AtomicOrdering::NotAtomic;
2228 case AtomicOrdering::Unordered:
2229 case AtomicOrdering::Monotonic:
2230 case AtomicOrdering::Release:
2231 return AtomicOrdering::Release;
2232 case AtomicOrdering::Acquire:
2233 case AtomicOrdering::AcquireRelease:
2234 return AtomicOrdering::AcquireRelease;
2235 case AtomicOrdering::SequentiallyConsistent:
2236 return AtomicOrdering::SequentiallyConsistent;
2242 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2243 uint32_t OrderingTable[NumOrderings] = {};
2245 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2246 OrderingTable[(
int)AtomicOrderingCABI::release] =
2247 (int)AtomicOrderingCABI::release;
2248 OrderingTable[(int)AtomicOrderingCABI::consume] =
2249 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2250 OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
2251 (
int)AtomicOrderingCABI::acq_rel;
2252 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2253 (
int)AtomicOrderingCABI::seq_cst;
2260 case AtomicOrdering::NotAtomic:
2261 return AtomicOrdering::NotAtomic;
2262 case AtomicOrdering::Unordered:
2263 case AtomicOrdering::Monotonic:
2264 case AtomicOrdering::Acquire:
2265 return AtomicOrdering::Acquire;
2266 case AtomicOrdering::Release:
2267 case AtomicOrdering::AcquireRelease:
2268 return AtomicOrdering::AcquireRelease;
2269 case AtomicOrdering::SequentiallyConsistent:
2270 return AtomicOrdering::SequentiallyConsistent;
2276 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2277 uint32_t OrderingTable[NumOrderings] = {};
2279 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2280 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2281 OrderingTable[(int)AtomicOrderingCABI::consume] =
2282 (
int)AtomicOrderingCABI::acquire;
2283 OrderingTable[(int)AtomicOrderingCABI::release] =
2284 OrderingTable[(
int)AtomicOrderingCABI::acq_rel] =
2285 (int)AtomicOrderingCABI::acq_rel;
2286 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2287 (
int)AtomicOrderingCABI::seq_cst;
2293 using InstVisitor<MemorySanitizerVisitor>
::visit;
2294 void visit(Instruction &
I) {
2295 if (
I.getMetadata(LLVMContext::MD_nosanitize))
2298 if (isInPrologue(
I))
2303 setShadow(&
I, getCleanShadow(&
I));
2304 setOrigin(&
I, getCleanOrigin());
2315 void visitLoadInst(LoadInst &
I) {
2316 assert(
I.getType()->isSized() &&
"Load type must have size");
2317 assert(!
I.getMetadata(LLVMContext::MD_nosanitize));
2318 NextNodeIRBuilder IRB(&
I);
2319 Type *ShadowTy = getShadowTy(&
I);
2320 Value *Addr =
I.getPointerOperand();
2321 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
2322 const Align Alignment =
I.getAlign();
2323 if (PropagateShadow) {
2324 std::tie(ShadowPtr, OriginPtr) =
2325 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
false);
2329 setShadow(&
I, getCleanShadow(&
I));
2333 insertCheckShadowOf(
I.getPointerOperand(), &
I);
2338 if (MS.TrackOrigins) {
2339 if (PropagateShadow) {
2344 setOrigin(&
I, getCleanOrigin());
2353 void visitStoreInst(StoreInst &
I) {
2354 StoreList.push_back(&
I);
2356 insertCheckShadowOf(
I.getPointerOperand(), &
I);
2359 void handleCASOrRMW(Instruction &
I) {
2363 Value *Addr =
I.getOperand(0);
2364 Value *Val =
I.getOperand(1);
2365 Value *ShadowPtr = getShadowOriginPtr(Addr, IRB, getShadowTy(Val),
Align(1),
2370 insertCheckShadowOf(Addr, &
I);
2376 insertCheckShadowOf(Val, &
I);
2380 setShadow(&
I, getCleanShadow(&
I));
2381 setOrigin(&
I, getCleanOrigin());
2384 void visitAtomicRMWInst(AtomicRMWInst &
I) {
2389 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &
I) {
2395 void visitExtractElementInst(ExtractElementInst &
I) {
2396 insertCheckShadowOf(
I.getOperand(1), &
I);
2400 setOrigin(&
I, getOrigin(&
I, 0));
2403 void visitInsertElementInst(InsertElementInst &
I) {
2404 insertCheckShadowOf(
I.getOperand(2), &
I);
2406 auto *Shadow0 = getShadow(&
I, 0);
2407 auto *Shadow1 = getShadow(&
I, 1);
2410 setOriginForNaryOp(
I);
2413 void visitShuffleVectorInst(ShuffleVectorInst &
I) {
2415 auto *Shadow0 = getShadow(&
I, 0);
2416 auto *Shadow1 = getShadow(&
I, 1);
2419 setOriginForNaryOp(
I);
2423 void visitSExtInst(SExtInst &
I) {
2425 setShadow(&
I, IRB.
CreateSExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2426 setOrigin(&
I, getOrigin(&
I, 0));
2429 void visitZExtInst(ZExtInst &
I) {
2431 setShadow(&
I, IRB.
CreateZExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2432 setOrigin(&
I, getOrigin(&
I, 0));
2435 void visitTruncInst(TruncInst &
I) {
2437 setShadow(&
I, IRB.
CreateTrunc(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2438 setOrigin(&
I, getOrigin(&
I, 0));
2441 void visitBitCastInst(BitCastInst &
I) {
2446 if (CI->isMustTailCall())
2450 setOrigin(&
I, getOrigin(&
I, 0));
2453 void visitPtrToIntInst(PtrToIntInst &
I) {
2456 "_msprop_ptrtoint"));
2457 setOrigin(&
I, getOrigin(&
I, 0));
2460 void visitIntToPtrInst(IntToPtrInst &
I) {
2463 "_msprop_inttoptr"));
2464 setOrigin(&
I, getOrigin(&
I, 0));
2467 void visitFPToSIInst(CastInst &
I) { handleShadowOr(
I); }
2468 void visitFPToUIInst(CastInst &
I) { handleShadowOr(
I); }
2469 void visitSIToFPInst(CastInst &
I) { handleShadowOr(
I); }
2470 void visitUIToFPInst(CastInst &
I) { handleShadowOr(
I); }
2471 void visitFPExtInst(CastInst &
I) { handleShadowOr(
I); }
2472 void visitFPTruncInst(CastInst &
I) { handleShadowOr(
I); }
2479 void visitAnd(BinaryOperator &
I) {
2487 Value *S2 = getShadow(&
I, 1);
2488 Value *V1 =
I.getOperand(0);
2489 Value *V2 =
I.getOperand(1);
2497 setShadow(&
I, IRB.
CreateOr({S1S2, V1S2, S1V2}));
2498 setOriginForNaryOp(
I);
2501 void visitOr(BinaryOperator &
I) {
2514 Value *S2 = getShadow(&
I, 1);
2515 Value *V1 =
I.getOperand(0);
2516 Value *V2 =
I.getOperand(1);
2535 S = IRB.
CreateOr(S, DisjointOrShadow,
"_ms_disjoint");
2539 setOriginForNaryOp(
I);
2557 template <
bool CombineShadow>
class Combiner {
2558 Value *Shadow =
nullptr;
2559 Value *Origin =
nullptr;
2561 MemorySanitizerVisitor *MSV;
2564 Combiner(MemorySanitizerVisitor *MSV,
IRBuilder<> &IRB)
2565 : IRB(IRB), MSV(MSV) {}
2569 if (CombineShadow) {
2574 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
2575 Shadow = IRB.
CreateOr(Shadow, OpShadow,
"_msprop");
2579 if (MSV->MS.TrackOrigins) {
2586 if (!ConstOrigin || !ConstOrigin->
isNullValue()) {
2587 Value *
Cond = MSV->convertToBool(OpShadow, IRB);
2597 Value *OpShadow = MSV->getShadow(V);
2598 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) :
nullptr;
2599 return Add(OpShadow, OpOrigin);
2604 void Done(Instruction *
I) {
2605 if (CombineShadow) {
2607 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(
I));
2608 MSV->setShadow(
I, Shadow);
2610 if (MSV->MS.TrackOrigins) {
2612 MSV->setOrigin(
I, Origin);
2618 void DoneAndStoreOrigin(TypeSize TS,
Value *OriginPtr) {
2619 if (MSV->MS.TrackOrigins) {
2626 using ShadowAndOriginCombiner = Combiner<true>;
2627 using OriginCombiner = Combiner<false>;
2630 void setOriginForNaryOp(Instruction &
I) {
2631 if (!MS.TrackOrigins)
2634 OriginCombiner
OC(
this, IRB);
2635 for (Use &
Op :
I.operands())
2640 size_t VectorOrPrimitiveTypeSizeInBits(
Type *Ty) {
2642 "Vector of pointers is not a valid shadow type");
2652 Type *srcTy =
V->getType();
2655 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
2656 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
2657 if (srcSizeInBits > 1 && dstSizeInBits == 1)
2675 Type *ShadowTy = getShadowTy(V);
2676 if (
V->getType() == ShadowTy)
2678 if (
V->getType()->isPtrOrPtrVectorTy())
2685 void handleShadowOr(Instruction &
I) {
2687 ShadowAndOriginCombiner SC(
this, IRB);
2688 for (Use &
Op :
I.operands())
2705 Value *horizontalReduce(IntrinsicInst &
I,
unsigned ReductionFactor,
2708 unsigned TotalNumElems =
2713 TotalNumElems = TotalNumElems * 2;
2716 assert(TotalNumElems % ReductionFactor == 0);
2721 for (
unsigned i = 0; i < ReductionFactor; i++) {
2722 SmallVector<int, 16>
Mask;
2723 for (
unsigned X = 0;
X < TotalNumElems;
X += ReductionFactor)
2724 Mask.push_back(
X + i);
2746 void handlePairwiseShadowOrIntrinsic(IntrinsicInst &
I) {
2747 assert(
I.arg_size() == 1 ||
I.arg_size() == 2);
2749 assert(
I.getType()->isVectorTy());
2750 assert(
I.getArgOperand(0)->getType()->isVectorTy());
2752 [[maybe_unused]] FixedVectorType *ParamType =
2756 [[maybe_unused]] FixedVectorType *
ReturnType =
2764 Value *FirstArgShadow = getShadow(&
I, 0);
2765 Value *SecondArgShadow =
nullptr;
2766 if (
I.arg_size() == 2)
2767 SecondArgShadow = getShadow(&
I, 1);
2769 Value *OrShadow = horizontalReduce(
I, 2, FirstArgShadow,
2772 OrShadow = CreateShadowCast(IRB, OrShadow, getShadowTy(&
I));
2774 setShadow(&
I, OrShadow);
2775 setOriginForNaryOp(
I);
2785 void handlePairwiseShadowOrIntrinsic(IntrinsicInst &
I,
2786 int ReinterpretElemWidth) {
2787 assert(
I.arg_size() == 1 ||
I.arg_size() == 2);
2789 assert(
I.getType()->isVectorTy());
2790 assert(
I.getArgOperand(0)->getType()->isVectorTy());
2792 FixedVectorType *ParamType =
2797 [[maybe_unused]] FixedVectorType *
ReturnType =
2804 FixedVectorType *ReinterpretShadowTy =
nullptr;
2812 Value *FirstArgShadow = getShadow(&
I, 0);
2813 FirstArgShadow = IRB.
CreateBitCast(FirstArgShadow, ReinterpretShadowTy);
2823 Value *SecondArgShadow =
nullptr;
2824 if (
I.arg_size() == 2) {
2825 SecondArgShadow = getShadow(&
I, 1);
2826 SecondArgShadow = IRB.
CreateBitCast(SecondArgShadow, ReinterpretShadowTy);
2829 Value *OrShadow = horizontalReduce(
I, 2, FirstArgShadow,
2832 OrShadow = CreateShadowCast(IRB, OrShadow, getShadowTy(&
I));
2834 setShadow(&
I, OrShadow);
2835 setOriginForNaryOp(
I);
2838 void visitFNeg(UnaryOperator &
I) { handleShadowOr(
I); }
2849 void handleMulByConstant(BinaryOperator &
I, Constant *ConstArg,
2855 Type *EltTy = VTy->getElementType();
2857 for (
unsigned Idx = 0; Idx < NumElements; ++Idx) {
2858 if (ConstantInt *Elt =
2860 const APInt &
V = Elt->getValue();
2861 APInt V2 = APInt(
V.getBitWidth(), 1) <<
V.countr_zero();
2862 Elements.push_back(ConstantInt::get(EltTy, V2));
2864 Elements.push_back(ConstantInt::get(EltTy, 1));
2870 const APInt &
V = Elt->getValue();
2871 APInt V2 = APInt(
V.getBitWidth(), 1) <<
V.countr_zero();
2872 ShadowMul = ConstantInt::get(Ty, V2);
2874 ShadowMul = ConstantInt::get(Ty, 1);
2880 IRB.
CreateMul(getShadow(OtherArg), ShadowMul,
"msprop_mul_cst"));
2881 setOrigin(&
I, getOrigin(OtherArg));
2884 void visitMul(BinaryOperator &
I) {
2887 if (constOp0 && !constOp1)
2888 handleMulByConstant(
I, constOp0,
I.getOperand(1));
2889 else if (constOp1 && !constOp0)
2890 handleMulByConstant(
I, constOp1,
I.getOperand(0));
2895 void visitFAdd(BinaryOperator &
I) { handleShadowOr(
I); }
2896 void visitFSub(BinaryOperator &
I) { handleShadowOr(
I); }
2897 void visitFMul(BinaryOperator &
I) { handleShadowOr(
I); }
2898 void visitAdd(BinaryOperator &
I) { handleShadowOr(
I); }
2899 void visitSub(BinaryOperator &
I) { handleShadowOr(
I); }
2900 void visitXor(BinaryOperator &
I) { handleShadowOr(
I); }
2902 void handleIntegerDiv(Instruction &
I) {
2905 insertCheckShadowOf(
I.getOperand(1), &
I);
2906 setShadow(&
I, getShadow(&
I, 0));
2907 setOrigin(&
I, getOrigin(&
I, 0));
2910 void visitUDiv(BinaryOperator &
I) { handleIntegerDiv(
I); }
2911 void visitSDiv(BinaryOperator &
I) { handleIntegerDiv(
I); }
2912 void visitURem(BinaryOperator &
I) { handleIntegerDiv(
I); }
2913 void visitSRem(BinaryOperator &
I) { handleIntegerDiv(
I); }
2917 void visitFDiv(BinaryOperator &
I) { handleShadowOr(
I); }
2918 void visitFRem(BinaryOperator &
I) { handleShadowOr(
I); }
2924 void handleEqualityComparison(ICmpInst &
I) {
2928 Value *Sa = getShadow(
A);
2929 Value *Sb = getShadow(
B);
2955 setOriginForNaryOp(
I);
2963 void handleRelationalComparisonExact(ICmpInst &
I) {
2967 Value *Sa = getShadow(
A);
2968 Value *Sb = getShadow(
B);
2979 bool IsSigned =
I.isSigned();
2981 auto GetMinMaxUnsigned = [&](
Value *
V,
Value *S) {
2991 V = IRB.
CreateXor(V, ConstantInt::get(
V->getType(), MinVal));
2996 return std::make_pair(Min, Max);
2999 auto [Amin, Amax] = GetMinMaxUnsigned(
A, Sa);
3000 auto [Bmin, Bmax] = GetMinMaxUnsigned(
B, Sb);
3006 setOriginForNaryOp(
I);
3013 void handleSignedRelationalComparison(ICmpInst &
I) {
3018 op =
I.getOperand(0);
3019 pre =
I.getPredicate();
3021 op =
I.getOperand(1);
3022 pre =
I.getSwappedPredicate();
3035 setShadow(&
I, Shadow);
3036 setOrigin(&
I, getOrigin(
op));
3042 void visitICmpInst(ICmpInst &
I) {
3047 if (
I.isEquality()) {
3048 handleEqualityComparison(
I);
3054 handleRelationalComparisonExact(
I);
3058 handleSignedRelationalComparison(
I);
3064 handleRelationalComparisonExact(
I);
3071 void visitFCmpInst(FCmpInst &
I) { handleShadowOr(
I); }
3073 void handleShift(BinaryOperator &
I) {
3078 Value *S2 = getShadow(&
I, 1);
3081 Value *V2 =
I.getOperand(1);
3083 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3084 setOriginForNaryOp(
I);
3087 void visitShl(BinaryOperator &
I) { handleShift(
I); }
3088 void visitAShr(BinaryOperator &
I) { handleShift(
I); }
3089 void visitLShr(BinaryOperator &
I) { handleShift(
I); }
3091 void handleFunnelShift(IntrinsicInst &
I) {
3095 Value *S0 = getShadow(&
I, 0);
3097 Value *S2 = getShadow(&
I, 2);
3100 Value *V2 =
I.getOperand(2);
3103 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3104 setOriginForNaryOp(
I);
3117 void visitMemMoveInst(MemMoveInst &
I) {
3118 getShadow(
I.getArgOperand(1));
3121 {I.getArgOperand(0), I.getArgOperand(1),
3122 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3140 void visitMemCpyInst(MemCpyInst &
I) {
3141 getShadow(
I.getArgOperand(1));
3144 {I.getArgOperand(0), I.getArgOperand(1),
3145 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3150 void visitMemSetInst(MemSetInst &
I) {
3154 {I.getArgOperand(0),
3155 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
3156 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
3160 void visitVAStartInst(VAStartInst &
I) { VAHelper->visitVAStartInst(
I); }
3162 void visitVACopyInst(VACopyInst &
I) { VAHelper->visitVACopyInst(
I); }
3168 bool handleVectorStoreIntrinsic(IntrinsicInst &
I) {
3172 Value *Addr =
I.getArgOperand(0);
3173 Value *Shadow = getShadow(&
I, 1);
3174 Value *ShadowPtr, *OriginPtr;
3178 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
3183 insertCheckShadowOf(Addr, &
I);
3186 if (MS.TrackOrigins)
3195 bool handleVectorLoadIntrinsic(IntrinsicInst &
I) {
3199 Value *Addr =
I.getArgOperand(0);
3201 Type *ShadowTy = getShadowTy(&
I);
3202 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
3203 if (PropagateShadow) {
3207 std::tie(ShadowPtr, OriginPtr) =
3208 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment,
false);
3212 setShadow(&
I, getCleanShadow(&
I));
3216 insertCheckShadowOf(Addr, &
I);
3218 if (MS.TrackOrigins) {
3219 if (PropagateShadow)
3220 setOrigin(&
I, IRB.
CreateLoad(MS.OriginTy, OriginPtr));
3222 setOrigin(&
I, getCleanOrigin());
3242 [[maybe_unused]]
bool
3243 maybeHandleSimpleNomemIntrinsic(IntrinsicInst &
I,
3244 unsigned int trailingFlags) {
3245 Type *RetTy =
I.getType();
3249 unsigned NumArgOperands =
I.arg_size();
3250 assert(NumArgOperands >= trailingFlags);
3251 for (
unsigned i = 0; i < NumArgOperands - trailingFlags; ++i) {
3252 Type *Ty =
I.getArgOperand(i)->getType();
3258 ShadowAndOriginCombiner SC(
this, IRB);
3259 for (
unsigned i = 0; i < NumArgOperands; ++i)
3260 SC.Add(
I.getArgOperand(i));
3277 bool maybeHandleUnknownIntrinsicUnlogged(IntrinsicInst &
I) {
3278 unsigned NumArgOperands =
I.arg_size();
3279 if (NumArgOperands == 0)
3282 if (NumArgOperands == 2 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3283 I.getArgOperand(1)->getType()->isVectorTy() &&
3284 I.getType()->isVoidTy() && !
I.onlyReadsMemory()) {
3286 return handleVectorStoreIntrinsic(
I);
3289 if (NumArgOperands == 1 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3290 I.getType()->isVectorTy() &&
I.onlyReadsMemory()) {
3292 return handleVectorLoadIntrinsic(
I);
3295 if (
I.doesNotAccessMemory())
3296 if (maybeHandleSimpleNomemIntrinsic(
I, 0))
3304 bool maybeHandleUnknownIntrinsic(IntrinsicInst &
I) {
3305 if (maybeHandleUnknownIntrinsicUnlogged(
I)) {
3309 LLVM_DEBUG(
dbgs() <<
"UNKNOWN INSTRUCTION HANDLED HEURISTICALLY: " <<
I
3316 void handleInvariantGroup(IntrinsicInst &
I) {
3317 setShadow(&
I, getShadow(&
I, 0));
3318 setOrigin(&
I, getOrigin(&
I, 0));
3321 void handleLifetimeStart(IntrinsicInst &
I) {
3326 LifetimeStartList.push_back(std::make_pair(&
I, AI));
3329 void handleBswap(IntrinsicInst &
I) {
3332 Type *OpType =
Op->getType();
3335 setOrigin(&
I, getOrigin(
Op));
3356 void handleCountLeadingTrailingZeros(IntrinsicInst &
I) {
3358 Value *Src =
I.getArgOperand(0);
3359 Value *SrcShadow = getShadow(Src);
3363 I.getType(),
I.getIntrinsicID(), {Src, False});
3365 I.getType(),
I.getIntrinsicID(), {SrcShadow, False});
3368 ConcreteZerosCount, ShadowZerosCount,
"_mscz_cmp_zeros");
3370 Value *NotAllZeroShadow =
3372 Value *OutputShadow =
3373 IRB.
CreateAnd(CompareConcreteZeros, NotAllZeroShadow,
"_mscz_main");
3379 OutputShadow = IRB.
CreateOr(OutputShadow, BoolZeroPoison,
"_mscz_bs");
3382 OutputShadow = IRB.
CreateSExt(OutputShadow, getShadowTy(Src),
"_mscz_os");
3384 setShadow(&
I, OutputShadow);
3385 setOriginForNaryOp(
I);
3395 void handleNEONVectorConvertIntrinsic(IntrinsicInst &
I) {
3399 Value *S0 = getShadow(&
I, 0);
3408 setShadow(&
I, OutShadow);
3409 setOriginForNaryOp(
I);
3418 FixedVectorType *maybeShrinkVectorShadowType(
Value *Src, IntrinsicInst &
I) {
3438 Value *maybeExtendVectorShadowWithZeros(
Value *Shadow, IntrinsicInst &
I) {
3443 Value *FullShadow = getCleanShadow(&
I);
3444 unsigned ShadowNumElems =
3446 unsigned FullShadowNumElems =
3449 assert((ShadowNumElems == FullShadowNumElems) ||
3450 (ShadowNumElems * 2 == FullShadowNumElems));
3452 if (ShadowNumElems == FullShadowNumElems) {
3453 FullShadow = Shadow;
3457 std::iota(ShadowMask.begin(), ShadowMask.end(), 0);
3482 void handleSSEVectorConvertIntrinsicByProp(IntrinsicInst &
I,
3483 bool HasRoundingMode) {
3484 if (HasRoundingMode) {
3492 Value *Src =
I.getArgOperand(0);
3493 assert(Src->getType()->isVectorTy());
3497 VectorType *ShadowType = maybeShrinkVectorShadowType(Src,
I);
3500 Value *S0 = getShadow(&
I, 0);
3512 Value *FullShadow = maybeExtendVectorShadowWithZeros(Shadow,
I);
3514 setShadow(&
I, FullShadow);
3515 setOriginForNaryOp(
I);
3536 void handleSSEVectorConvertIntrinsic(IntrinsicInst &
I,
int NumUsedElements,
3537 bool HasRoundingMode =
false) {
3539 Value *CopyOp, *ConvertOp;
3541 assert((!HasRoundingMode ||
3543 "Invalid rounding mode");
3545 switch (
I.arg_size() - HasRoundingMode) {
3547 CopyOp =
I.getArgOperand(0);
3548 ConvertOp =
I.getArgOperand(1);
3551 ConvertOp =
I.getArgOperand(0);
3565 Value *ConvertShadow = getShadow(ConvertOp);
3566 Value *AggShadow =
nullptr;
3569 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), 0));
3570 for (
int i = 1; i < NumUsedElements; ++i) {
3572 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), i));
3573 AggShadow = IRB.
CreateOr(AggShadow, MoreShadow);
3576 AggShadow = ConvertShadow;
3579 insertCheckShadow(AggShadow, getOrigin(ConvertOp), &
I);
3586 Value *ResultShadow = getShadow(CopyOp);
3588 for (
int i = 0; i < NumUsedElements; ++i) {
3590 ResultShadow, ConstantInt::getNullValue(EltTy),
3593 setShadow(&
I, ResultShadow);
3594 setOrigin(&
I, getOrigin(CopyOp));
3596 setShadow(&
I, getCleanShadow(&
I));
3597 setOrigin(&
I, getCleanOrigin());
3605 S = CreateShadowCast(IRB, S, IRB.
getInt64Ty(),
true);
3608 return CreateShadowCast(IRB, S2,
T,
true);
3616 return CreateShadowCast(IRB, S2,
T,
true);
3633 void handleVectorShiftIntrinsic(IntrinsicInst &
I,
bool Variable) {
3639 Value *S2 = getShadow(&
I, 1);
3641 : Lower64ShadowExtend(IRB, S2, getShadowTy(&
I));
3642 Value *V1 =
I.getOperand(0);
3643 Value *V2 =
I.getOperand(1);
3645 {IRB.CreateBitCast(S1, V1->getType()), V2});
3647 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3648 setOriginForNaryOp(
I);
3653 Type *getMMXVectorTy(
unsigned EltSizeInBits,
3654 unsigned X86_MMXSizeInBits = 64) {
3655 assert(EltSizeInBits != 0 && (X86_MMXSizeInBits % EltSizeInBits) == 0 &&
3656 "Illegal MMX vector element size");
3658 X86_MMXSizeInBits / EltSizeInBits);
3665 case Intrinsic::x86_sse2_packsswb_128:
3666 case Intrinsic::x86_sse2_packuswb_128:
3667 return Intrinsic::x86_sse2_packsswb_128;
3669 case Intrinsic::x86_sse2_packssdw_128:
3670 case Intrinsic::x86_sse41_packusdw:
3671 return Intrinsic::x86_sse2_packssdw_128;
3673 case Intrinsic::x86_avx2_packsswb:
3674 case Intrinsic::x86_avx2_packuswb:
3675 return Intrinsic::x86_avx2_packsswb;
3677 case Intrinsic::x86_avx2_packssdw:
3678 case Intrinsic::x86_avx2_packusdw:
3679 return Intrinsic::x86_avx2_packssdw;
3681 case Intrinsic::x86_mmx_packsswb:
3682 case Intrinsic::x86_mmx_packuswb:
3683 return Intrinsic::x86_mmx_packsswb;
3685 case Intrinsic::x86_mmx_packssdw:
3686 return Intrinsic::x86_mmx_packssdw;
3688 case Intrinsic::x86_avx512_packssdw_512:
3689 case Intrinsic::x86_avx512_packusdw_512:
3690 return Intrinsic::x86_avx512_packssdw_512;
3692 case Intrinsic::x86_avx512_packsswb_512:
3693 case Intrinsic::x86_avx512_packuswb_512:
3694 return Intrinsic::x86_avx512_packsswb_512;
3710 void handleVectorPackIntrinsic(IntrinsicInst &
I,
3711 unsigned MMXEltSizeInBits = 0) {
3715 Value *S2 = getShadow(&
I, 1);
3716 assert(
S1->getType()->isVectorTy());
3722 MMXEltSizeInBits ? getMMXVectorTy(MMXEltSizeInBits) :
S1->
getType();
3723 if (MMXEltSizeInBits) {
3731 if (MMXEltSizeInBits) {
3737 {S1_ext, S2_ext},
nullptr,
3738 "_msprop_vector_pack");
3739 if (MMXEltSizeInBits)
3742 setOriginForNaryOp(
I);
3746 Constant *createDppMask(
unsigned Width,
unsigned Mask) {
3759 const unsigned Width =
3766 Value *DstMaskV = createDppMask(Width, DstMask);
3783 void handleDppIntrinsic(IntrinsicInst &
I) {
3786 Value *S0 = getShadow(&
I, 0);
3790 const unsigned Width =
3792 assert(Width == 2 || Width == 4 || Width == 8);
3795 const unsigned SrcMask =
Mask >> 4;
3796 const unsigned DstMask =
Mask & 0xf;
3799 Value *SI1 = findDppPoisonedOutput(IRB, S, SrcMask, DstMask);
3804 SI1, findDppPoisonedOutput(IRB, S, SrcMask << 4, DstMask << 4));
3811 setOriginForNaryOp(
I);
3815 C = CreateAppToShadowCast(IRB,
C);
3824 void handleBlendvIntrinsic(IntrinsicInst &
I) {
3829 Value *Sc = getShadow(&
I, 2);
3830 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
3835 C = convertBlendvToSelectMask(IRB,
C);
3836 Sc = convertBlendvToSelectMask(IRB, Sc);
3842 handleSelectLikeInst(
I,
C,
T,
F);
3846 void handleVectorSadIntrinsic(IntrinsicInst &
I,
bool IsMMX =
false) {
3847 const unsigned SignificantBitsPerResultElement = 16;
3849 unsigned ZeroBitsPerResultElement =
3853 auto *Shadow0 = getShadow(&
I, 0);
3854 auto *Shadow1 = getShadow(&
I, 1);
3859 S = IRB.
CreateLShr(S, ZeroBitsPerResultElement);
3862 setOriginForNaryOp(
I);
3880 void handleVectorPmaddIntrinsic(IntrinsicInst &
I,
unsigned ReductionFactor,
3881 unsigned EltSizeInBits = 0) {
3884 [[maybe_unused]] FixedVectorType *
ReturnType =
3889 Value *Va =
nullptr;
3890 Value *Vb =
nullptr;
3891 Value *Sa =
nullptr;
3892 Value *Sb =
nullptr;
3894 assert(
I.arg_size() == 2 ||
I.arg_size() == 3);
3895 if (
I.arg_size() == 2) {
3896 Va =
I.getOperand(0);
3897 Vb =
I.getOperand(1);
3899 Sa = getShadow(&
I, 0);
3900 Sb = getShadow(&
I, 1);
3901 }
else if (
I.arg_size() == 3) {
3903 Va =
I.getOperand(1);
3904 Vb =
I.getOperand(2);
3906 Sa = getShadow(&
I, 1);
3907 Sb = getShadow(&
I, 2);
3916 if (
I.arg_size() == 3) {
3917 [[maybe_unused]]
auto *AccumulatorType =
3919 assert(AccumulatorType == ReturnType);
3922 FixedVectorType *ImplicitReturnType =
ReturnType;
3924 if (EltSizeInBits) {
3926 getMMXVectorTy(EltSizeInBits * ReductionFactor,
3938 ReturnType->getNumElements() * ReductionFactor);
3964 Value *
And = IRB.
CreateOr({SaAndSbNonZero, VaAndSbNonZero, SaAndVbNonZero});
3983 ImplicitReturnType);
3988 OutShadow = CreateShadowCast(IRB, OutShadow, getShadowTy(&
I));
3991 if (
I.arg_size() == 3)
3992 OutShadow = IRB.
CreateOr(OutShadow, getShadow(&
I, 0));
3994 setShadow(&
I, OutShadow);
3995 setOriginForNaryOp(
I);
4001 void handleVectorComparePackedIntrinsic(IntrinsicInst &
I) {
4003 Type *ResTy = getShadowTy(&
I);
4004 auto *Shadow0 = getShadow(&
I, 0);
4005 auto *Shadow1 = getShadow(&
I, 1);
4010 setOriginForNaryOp(
I);
4016 void handleVectorCompareScalarIntrinsic(IntrinsicInst &
I) {
4018 auto *Shadow0 = getShadow(&
I, 0);
4019 auto *Shadow1 = getShadow(&
I, 1);
4021 Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&
I));
4023 setOriginForNaryOp(
I);
4032 void handleVectorReduceIntrinsic(IntrinsicInst &
I,
bool AllowShadowCast) {
4037 if (AllowShadowCast)
4038 S = CreateShadowCast(IRB, S, getShadowTy(&
I));
4042 setOriginForNaryOp(
I);
4052 void handleVectorReduceWithStarterIntrinsic(IntrinsicInst &
I) {
4056 Value *Shadow0 = getShadow(&
I, 0);
4062 setOriginForNaryOp(
I);
4068 void handleVectorReduceOrIntrinsic(IntrinsicInst &
I) {
4072 Value *OperandShadow = getShadow(&
I, 0);
4074 Value *OperandUnsetOrPoison = IRB.
CreateOr(OperandUnsetBits, OperandShadow);
4082 setOrigin(&
I, getOrigin(&
I, 0));
4088 void handleVectorReduceAndIntrinsic(IntrinsicInst &
I) {
4092 Value *OperandShadow = getShadow(&
I, 0);
4093 Value *OperandSetOrPoison = IRB.
CreateOr(
I.getOperand(0), OperandShadow);
4101 setOrigin(&
I, getOrigin(&
I, 0));
4104 void handleStmxcsr(IntrinsicInst &
I) {
4106 Value *Addr =
I.getArgOperand(0);
4109 getShadowOriginPtr(Addr, IRB, Ty,
Align(1),
true).first;
4114 insertCheckShadowOf(Addr, &
I);
4117 void handleLdmxcsr(IntrinsicInst &
I) {
4122 Value *Addr =
I.getArgOperand(0);
4125 Value *ShadowPtr, *OriginPtr;
4126 std::tie(ShadowPtr, OriginPtr) =
4127 getShadowOriginPtr(Addr, IRB, Ty, Alignment,
false);
4130 insertCheckShadowOf(Addr, &
I);
4133 Value *Origin = MS.TrackOrigins ? IRB.
CreateLoad(MS.OriginTy, OriginPtr)
4135 insertCheckShadow(Shadow, Origin, &
I);
4138 void handleMaskedExpandLoad(IntrinsicInst &
I) {
4141 MaybeAlign
Align =
I.getParamAlign(0);
4143 Value *PassThru =
I.getArgOperand(2);
4146 insertCheckShadowOf(
Ptr, &
I);
4147 insertCheckShadowOf(Mask, &
I);
4150 if (!PropagateShadow) {
4151 setShadow(&
I, getCleanShadow(&
I));
4152 setOrigin(&
I, getCleanOrigin());
4156 Type *ShadowTy = getShadowTy(&
I);
4158 auto [ShadowPtr, OriginPtr] =
4159 getShadowOriginPtr(
Ptr, IRB, ElementShadowTy, Align,
false);
4163 getShadow(PassThru),
"_msmaskedexpload");
4165 setShadow(&
I, Shadow);
4168 setOrigin(&
I, getCleanOrigin());
4171 void handleMaskedCompressStore(IntrinsicInst &
I) {
4173 Value *Values =
I.getArgOperand(0);
4175 MaybeAlign
Align =
I.getParamAlign(1);
4179 insertCheckShadowOf(
Ptr, &
I);
4180 insertCheckShadowOf(Mask, &
I);
4183 Value *Shadow = getShadow(Values);
4184 Type *ElementShadowTy =
4186 auto [ShadowPtr, OriginPtrs] =
4187 getShadowOriginPtr(
Ptr, IRB, ElementShadowTy, Align,
true);
4194 void handleMaskedGather(IntrinsicInst &
I) {
4196 Value *Ptrs =
I.getArgOperand(0);
4197 const Align Alignment(
4200 Value *PassThru =
I.getArgOperand(3);
4202 Type *PtrsShadowTy = getShadowTy(Ptrs);
4204 insertCheckShadowOf(Mask, &
I);
4208 insertCheckShadow(MaskedPtrShadow, getOrigin(Ptrs), &
I);
4211 if (!PropagateShadow) {
4212 setShadow(&
I, getCleanShadow(&
I));
4213 setOrigin(&
I, getCleanOrigin());
4217 Type *ShadowTy = getShadowTy(&
I);
4219 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
4220 Ptrs, IRB, ElementShadowTy, Alignment,
false);
4224 getShadow(PassThru),
"_msmaskedgather");
4226 setShadow(&
I, Shadow);
4229 setOrigin(&
I, getCleanOrigin());
4232 void handleMaskedScatter(IntrinsicInst &
I) {
4234 Value *Values =
I.getArgOperand(0);
4235 Value *Ptrs =
I.getArgOperand(1);
4236 const Align Alignment(
4240 Type *PtrsShadowTy = getShadowTy(Ptrs);
4242 insertCheckShadowOf(Mask, &
I);
4246 insertCheckShadow(MaskedPtrShadow, getOrigin(Ptrs), &
I);
4249 Value *Shadow = getShadow(Values);
4250 Type *ElementShadowTy =
4252 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
4253 Ptrs, IRB, ElementShadowTy, Alignment,
true);
4264 void handleMaskedStore(IntrinsicInst &
I) {
4266 Value *
V =
I.getArgOperand(0);
4268 const Align Alignment(
4271 Value *Shadow = getShadow(V);
4274 insertCheckShadowOf(
Ptr, &
I);
4275 insertCheckShadowOf(Mask, &
I);
4280 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
4281 Ptr, IRB, Shadow->
getType(), Alignment,
true);
4285 if (!MS.TrackOrigins)
4288 auto &
DL =
F.getDataLayout();
4289 paintOrigin(IRB, getOrigin(V), OriginPtr,
4298 void handleMaskedLoad(IntrinsicInst &
I) {
4301 const Align Alignment(
4304 Value *PassThru =
I.getArgOperand(3);
4307 insertCheckShadowOf(
Ptr, &
I);
4308 insertCheckShadowOf(Mask, &
I);
4311 if (!PropagateShadow) {
4312 setShadow(&
I, getCleanShadow(&
I));
4313 setOrigin(&
I, getCleanOrigin());
4317 Type *ShadowTy = getShadowTy(&
I);
4318 Value *ShadowPtr, *OriginPtr;
4319 std::tie(ShadowPtr, OriginPtr) =
4320 getShadowOriginPtr(
Ptr, IRB, ShadowTy, Alignment,
false);
4322 getShadow(PassThru),
"_msmaskedld"));
4324 if (!MS.TrackOrigins)
4331 Value *NotNull = convertToBool(MaskedPassThruShadow, IRB,
"_mscmp");
4336 setOrigin(&
I, Origin);
4352 void handleAVXMaskedStore(IntrinsicInst &
I) {
4357 Value *Dst =
I.getArgOperand(0);
4358 assert(Dst->getType()->isPointerTy() &&
"Destination is not a pointer!");
4363 Value *Src =
I.getArgOperand(2);
4368 Value *SrcShadow = getShadow(Src);
4371 insertCheckShadowOf(Dst, &
I);
4372 insertCheckShadowOf(Mask, &
I);
4375 Value *DstShadowPtr;
4376 Value *DstOriginPtr;
4377 std::tie(DstShadowPtr, DstOriginPtr) = getShadowOriginPtr(
4378 Dst, IRB, SrcShadow->
getType(), Alignment,
true);
4380 SmallVector<Value *, 2> ShadowArgs;
4381 ShadowArgs.
append(1, DstShadowPtr);
4382 ShadowArgs.
append(1, Mask);
4393 if (!MS.TrackOrigins)
4397 auto &
DL =
F.getDataLayout();
4398 paintOrigin(IRB, getOrigin(Src), DstOriginPtr,
4399 DL.getTypeStoreSize(SrcShadow->
getType()),
4418 void handleAVXMaskedLoad(IntrinsicInst &
I) {
4423 Value *Src =
I.getArgOperand(0);
4424 assert(Src->getType()->isPointerTy() &&
"Source is not a pointer!");
4432 insertCheckShadowOf(Mask, &
I);
4435 Type *SrcShadowTy = getShadowTy(Src);
4436 Value *SrcShadowPtr, *SrcOriginPtr;
4437 std::tie(SrcShadowPtr, SrcOriginPtr) =
4438 getShadowOriginPtr(Src, IRB, SrcShadowTy, Alignment,
false);
4440 SmallVector<Value *, 2> ShadowArgs;
4441 ShadowArgs.
append(1, SrcShadowPtr);
4442 ShadowArgs.
append(1, Mask);
4451 if (!MS.TrackOrigins)
4458 setOrigin(&
I, PtrSrcOrigin);
4467 assert(isFixedIntVector(Idx));
4468 auto IdxVectorSize =
4476 auto *IdxShadow = getShadow(Idx);
4481 insertCheckShadow(Truncated, getOrigin(Idx),
I);
4486 void handleAVXVpermilvar(IntrinsicInst &
I) {
4488 Value *Shadow = getShadow(&
I, 0);
4489 maskedCheckAVXIndexShadow(IRB,
I.getArgOperand(1), &
I);
4493 Shadow = IRB.
CreateBitCast(Shadow,
I.getArgOperand(0)->getType());
4495 {Shadow, I.getArgOperand(1)});
4498 setOriginForNaryOp(
I);
4503 void handleAVXVpermi2var(IntrinsicInst &
I) {
4508 [[maybe_unused]]
auto ArgVectorSize =
4511 ->getNumElements() == ArgVectorSize);
4513 ->getNumElements() == ArgVectorSize);
4514 assert(
I.getArgOperand(0)->getType() ==
I.getArgOperand(2)->getType());
4515 assert(
I.getType() ==
I.getArgOperand(0)->getType());
4516 assert(
I.getArgOperand(1)->getType()->isIntOrIntVectorTy());
4518 Value *AShadow = getShadow(&
I, 0);
4519 Value *Idx =
I.getArgOperand(1);
4520 Value *BShadow = getShadow(&
I, 2);
4522 maskedCheckAVXIndexShadow(IRB, Idx, &
I);
4526 AShadow = IRB.
CreateBitCast(AShadow,
I.getArgOperand(0)->getType());
4527 BShadow = IRB.
CreateBitCast(BShadow,
I.getArgOperand(2)->getType());
4529 {AShadow, Idx, BShadow});
4531 setOriginForNaryOp(
I);
4534 [[maybe_unused]]
static bool isFixedIntVectorTy(
const Type *
T) {
4538 [[maybe_unused]]
static bool isFixedFPVectorTy(
const Type *
T) {
4542 [[maybe_unused]]
static bool isFixedIntVector(
const Value *V) {
4543 return isFixedIntVectorTy(
V->getType());
4546 [[maybe_unused]]
static bool isFixedFPVector(
const Value *V) {
4547 return isFixedFPVectorTy(
V->getType());
4569 void handleAVX512VectorConvertFPToInt(IntrinsicInst &
I,
bool LastMask) {
4574 Value *WriteThrough;
4578 WriteThrough =
I.getOperand(2);
4579 Mask =
I.getOperand(3);
4582 WriteThrough =
I.getOperand(1);
4583 Mask =
I.getOperand(2);
4588 assert(isFixedIntVector(WriteThrough));
4590 unsigned ANumElements =
4592 [[maybe_unused]]
unsigned WriteThruNumElements =
4594 assert(ANumElements == WriteThruNumElements ||
4595 ANumElements * 2 == WriteThruNumElements);
4598 unsigned MaskNumElements =
Mask->getType()->getScalarSizeInBits();
4599 assert(ANumElements == MaskNumElements ||
4600 ANumElements * 2 == MaskNumElements);
4602 assert(WriteThruNumElements == MaskNumElements);
4606 insertCheckShadowOf(Mask, &
I);
4616 Value *AShadow = getShadow(
A);
4617 AShadow = maybeExtendVectorShadowWithZeros(AShadow,
I);
4619 if (ANumElements * 2 == MaskNumElements) {
4631 "_ms_mask_bitcast");
4641 getShadowTy(&
I),
"_ms_a_shadow");
4643 Value *WriteThroughShadow = getShadow(WriteThrough);
4645 "_ms_writethru_select");
4647 setShadow(&
I, Shadow);
4648 setOriginForNaryOp(
I);
4656 void handleBmiIntrinsic(IntrinsicInst &
I) {
4658 Type *ShadowTy = getShadowTy(&
I);
4661 Value *SMask = getShadow(&
I, 1);
4666 {getShadow(&I, 0), I.getOperand(1)});
4669 setOriginForNaryOp(
I);
4672 static SmallVector<int, 8> getPclmulMask(
unsigned Width,
bool OddElements) {
4673 SmallVector<int, 8>
Mask;
4674 for (
unsigned X = OddElements ? 1 : 0;
X < Width;
X += 2) {
4688 void handlePclmulIntrinsic(IntrinsicInst &
I) {
4693 "pclmul 3rd operand must be a constant");
4696 getPclmulMask(Width, Imm & 0x01));
4698 getPclmulMask(Width, Imm & 0x10));
4699 ShadowAndOriginCombiner SOC(
this, IRB);
4700 SOC.Add(Shuf0, getOrigin(&
I, 0));
4701 SOC.Add(Shuf1, getOrigin(&
I, 1));
4706 void handleUnarySdSsIntrinsic(IntrinsicInst &
I) {
4711 Value *Second = getShadow(&
I, 1);
4713 SmallVector<int, 16>
Mask;
4714 Mask.push_back(Width);
4715 for (
unsigned i = 1; i < Width; i++)
4719 setShadow(&
I, Shadow);
4720 setOriginForNaryOp(
I);
4723 void handleVtestIntrinsic(IntrinsicInst &
I) {
4725 Value *Shadow0 = getShadow(&
I, 0);
4726 Value *Shadow1 = getShadow(&
I, 1);
4732 setShadow(&
I, Shadow);
4733 setOriginForNaryOp(
I);
4736 void handleBinarySdSsIntrinsic(IntrinsicInst &
I) {
4741 Value *Second = getShadow(&
I, 1);
4744 SmallVector<int, 16>
Mask;
4745 Mask.push_back(Width);
4746 for (
unsigned i = 1; i < Width; i++)
4750 setShadow(&
I, Shadow);
4751 setOriginForNaryOp(
I);
4757 void handleRoundPdPsIntrinsic(IntrinsicInst &
I) {
4758 assert(
I.getArgOperand(0)->getType() ==
I.getType());
4763 ShadowAndOriginCombiner SC(
this, IRB);
4764 SC.Add(
I.getArgOperand(0));
4772 void handleAbsIntrinsic(IntrinsicInst &
I) {
4774 Value *Src =
I.getArgOperand(0);
4775 Value *IsIntMinPoison =
I.getArgOperand(1);
4777 assert(
I.getType()->isIntOrIntVectorTy());
4779 assert(Src->getType() ==
I.getType());
4785 Value *SrcShadow = getShadow(Src);
4789 Value *MinValVec = ConstantInt::get(Src->getType(), MinVal);
4792 Value *PoisonedShadow = getPoisonedShadow(Src);
4793 Value *PoisonedIfIntMinShadow =
4796 IRB.
CreateSelect(IsIntMinPoison, PoisonedIfIntMinShadow, SrcShadow);
4798 setShadow(&
I, Shadow);
4799 setOrigin(&
I, getOrigin(&
I, 0));
4802 void handleIsFpClass(IntrinsicInst &
I) {
4804 Value *Shadow = getShadow(&
I, 0);
4805 setShadow(&
I, IRB.
CreateICmpNE(Shadow, getCleanShadow(Shadow)));
4806 setOrigin(&
I, getOrigin(&
I, 0));
4809 void handleArithmeticWithOverflow(IntrinsicInst &
I) {
4811 Value *Shadow0 = getShadow(&
I, 0);
4812 Value *Shadow1 = getShadow(&
I, 1);
4815 IRB.
CreateICmpNE(ShadowElt0, getCleanShadow(ShadowElt0));
4821 setShadow(&
I, Shadow);
4822 setOriginForNaryOp(
I);
4828 Value *Shadow = getShadow(V);
4850 void handleAVX512VectorDownConvert(IntrinsicInst &
I) {
4855 Value *WriteThrough =
I.getOperand(1);
4859 assert(isFixedIntVector(WriteThrough));
4861 unsigned ANumElements =
4863 unsigned OutputNumElements =
4865 assert(ANumElements == OutputNumElements ||
4866 ANumElements * 2 == OutputNumElements);
4869 assert(
Mask->getType()->getScalarSizeInBits() == ANumElements);
4870 insertCheckShadowOf(Mask, &
I);
4881 if (ANumElements != OutputNumElements) {
4883 Mask = IRB.
CreateZExt(Mask, Type::getIntNTy(*MS.C, OutputNumElements),
4890 Value *AShadow = getShadow(
A);
4894 VectorType *ShadowType = maybeShrinkVectorShadowType(
A,
I);
4904 AShadow = IRB.
CreateTrunc(AShadow, ShadowType,
"_ms_trunc_shadow");
4905 AShadow = maybeExtendVectorShadowWithZeros(AShadow,
I);
4907 Value *WriteThroughShadow = getShadow(WriteThrough);
4910 setShadow(&
I, Shadow);
4911 setOriginForNaryOp(
I);
4938 void handleAVX512VectorGenericMaskedFP(IntrinsicInst &
I,
unsigned AIndex,
4939 unsigned WriteThruIndex,
4940 unsigned MaskIndex) {
4943 unsigned NumArgs =
I.arg_size();
4944 assert(AIndex < NumArgs);
4945 assert(WriteThruIndex < NumArgs);
4946 assert(MaskIndex < NumArgs);
4947 assert(AIndex != WriteThruIndex);
4948 assert(AIndex != MaskIndex);
4949 assert(WriteThruIndex != MaskIndex);
4951 Value *
A =
I.getOperand(AIndex);
4952 Value *WriteThru =
I.getOperand(WriteThruIndex);
4956 assert(isFixedFPVector(WriteThru));
4958 [[maybe_unused]]
unsigned ANumElements =
4960 unsigned OutputNumElements =
4962 assert(ANumElements == OutputNumElements);
4964 for (
unsigned i = 0; i < NumArgs; ++i) {
4965 if (i != AIndex && i != WriteThruIndex) {
4968 assert(
I.getOperand(i)->getType()->isIntegerTy());
4969 insertCheckShadowOf(
I.getOperand(i), &
I);
4974 if (
Mask->getType()->getScalarSizeInBits() == 8 && ANumElements < 8)
4976 assert(
Mask->getType()->getScalarSizeInBits() == ANumElements);
4983 Value *AShadow = getShadow(
A);
4989 Value *WriteThruShadow = getShadow(WriteThru);
4992 setShadow(&
I, Shadow);
4994 setOriginForNaryOp(
I);
5004 void visitGenericScalarHalfwordInst(IntrinsicInst &
I) {
5010 Value *WriteThrough =
I.getOperand(2);
5017 insertCheckShadowOf(Mask, &
I);
5021 unsigned NumElements =
5023 assert(NumElements == 8);
5024 assert(
A->getType() ==
B->getType());
5026 assert(
Mask->getType()->getPrimitiveSizeInBits() == NumElements);
5029 Value *ALowerShadow = extractLowerShadow(IRB,
A);
5030 Value *BLowerShadow = extractLowerShadow(IRB,
B);
5032 Value *ABLowerShadow = IRB.
CreateOr(ALowerShadow, BLowerShadow);
5034 Value *WriteThroughLowerShadow = extractLowerShadow(IRB, WriteThrough);
5041 Value *AShadow = getShadow(
A);
5042 Value *DstLowerShadow =
5043 IRB.
CreateSelect(MaskLower, ABLowerShadow, WriteThroughLowerShadow);
5045 AShadow, DstLowerShadow, ConstantInt::get(IRB.
getInt32Ty(), 0),
5048 setShadow(&
I, DstShadow);
5049 setOriginForNaryOp(
I);
5079 void handleAVXGF2P8Affine(IntrinsicInst &
I) {
5090 ->getScalarSizeInBits() == 8);
5092 assert(
A->getType() ==
X->getType());
5094 assert(
B->getType()->isIntegerTy());
5095 assert(
B->getType()->getScalarSizeInBits() == 8);
5097 assert(
I.getType() ==
A->getType());
5099 Value *AShadow = getShadow(
A);
5100 Value *XShadow = getShadow(
X);
5101 Value *BZeroShadow = getCleanShadow(
B);
5104 I.getType(),
I.getIntrinsicID(), {XShadow, AShadow, BZeroShadow});
5106 {X, AShadow, BZeroShadow});
5108 {XShadow, A, BZeroShadow});
5111 Value *BShadow = getShadow(
B);
5112 Value *BBroadcastShadow = getCleanShadow(AShadow);
5117 for (
unsigned i = 0; i < NumElements; i++)
5121 {AShadowXShadow, AShadowX, XShadowA, BBroadcastShadow}));
5122 setOriginForNaryOp(
I);
5136 void handleNEONVectorLoad(IntrinsicInst &
I,
bool WithLane) {
5137 unsigned int numArgs =
I.arg_size();
5140 assert(
I.getType()->isStructTy());
5150 assert(4 <= numArgs && numArgs <= 6);
5164 for (
unsigned int i = 0; i < numArgs - 2; i++)
5165 ShadowArgs.
push_back(getShadow(
I.getArgOperand(i)));
5168 Value *LaneNumber =
I.getArgOperand(numArgs - 2);
5172 insertCheckShadowOf(LaneNumber, &
I);
5175 Value *Src =
I.getArgOperand(numArgs - 1);
5176 assert(Src->getType()->isPointerTy() &&
"Source is not a pointer!");
5178 Type *SrcShadowTy = getShadowTy(Src);
5179 auto [SrcShadowPtr, SrcOriginPtr] =
5180 getShadowOriginPtr(Src, IRB, SrcShadowTy,
Align(1),
false);
5190 if (!MS.TrackOrigins)
5194 setOrigin(&
I, PtrSrcOrigin);
5211 void handleNEONVectorStoreIntrinsic(IntrinsicInst &
I,
bool useLane) {
5215 int numArgOperands =
I.arg_size();
5218 assert(numArgOperands >= 1);
5219 Value *Addr =
I.getArgOperand(numArgOperands - 1);
5221 int skipTrailingOperands = 1;
5224 insertCheckShadowOf(Addr, &
I);
5228 skipTrailingOperands++;
5229 assert(numArgOperands >=
static_cast<int>(skipTrailingOperands));
5231 I.getArgOperand(numArgOperands - skipTrailingOperands)->getType()));
5234 SmallVector<Value *, 8> ShadowArgs;
5236 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++) {
5238 Value *Shadow = getShadow(&
I, i);
5239 ShadowArgs.
append(1, Shadow);
5256 (numArgOperands - skipTrailingOperands));
5257 Type *OutputShadowTy = getShadowTy(OutputVectorTy);
5261 I.getArgOperand(numArgOperands - skipTrailingOperands));
5263 Value *OutputShadowPtr, *OutputOriginPtr;
5265 std::tie(OutputShadowPtr, OutputOriginPtr) = getShadowOriginPtr(
5266 Addr, IRB, OutputShadowTy,
Align(1),
true);
5267 ShadowArgs.
append(1, OutputShadowPtr);
5273 if (MS.TrackOrigins) {
5281 OriginCombiner
OC(
this, IRB);
5282 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++)
5283 OC.Add(
I.getArgOperand(i));
5285 const DataLayout &
DL =
F.getDataLayout();
5286 OC.DoneAndStoreOrigin(
DL.getTypeStoreSize(OutputVectorTy),
5313 void handleIntrinsicByApplyingToShadow(IntrinsicInst &
I,
5315 unsigned int trailingVerbatimArgs) {
5318 assert(trailingVerbatimArgs <
I.arg_size());
5320 SmallVector<Value *, 8> ShadowArgs;
5322 for (
unsigned int i = 0; i <
I.arg_size() - trailingVerbatimArgs; i++) {
5323 Value *Shadow = getShadow(&
I, i);
5331 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
5333 Value *Arg =
I.getArgOperand(i);
5339 Value *CombinedShadow = CI;
5342 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
5345 CreateShadowCast(IRB, getShadow(&
I, i), CombinedShadow->
getType());
5346 CombinedShadow = IRB.
CreateOr(Shadow, CombinedShadow,
"_msprop");
5351 setOriginForNaryOp(
I);
5357 void handleNEONVectorMultiplyIntrinsic(IntrinsicInst &
I) {
5363 bool maybeHandleCrossPlatformIntrinsic(IntrinsicInst &
I) {
5364 switch (
I.getIntrinsicID()) {
5365 case Intrinsic::uadd_with_overflow:
5366 case Intrinsic::sadd_with_overflow:
5367 case Intrinsic::usub_with_overflow:
5368 case Intrinsic::ssub_with_overflow:
5369 case Intrinsic::umul_with_overflow:
5370 case Intrinsic::smul_with_overflow:
5371 handleArithmeticWithOverflow(
I);
5373 case Intrinsic::abs:
5374 handleAbsIntrinsic(
I);
5376 case Intrinsic::bitreverse:
5377 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
5380 case Intrinsic::is_fpclass:
5383 case Intrinsic::lifetime_start:
5384 handleLifetimeStart(
I);
5386 case Intrinsic::launder_invariant_group:
5387 case Intrinsic::strip_invariant_group:
5388 handleInvariantGroup(
I);
5390 case Intrinsic::bswap:
5393 case Intrinsic::ctlz:
5394 case Intrinsic::cttz:
5395 handleCountLeadingTrailingZeros(
I);
5397 case Intrinsic::masked_compressstore:
5398 handleMaskedCompressStore(
I);
5400 case Intrinsic::masked_expandload:
5401 handleMaskedExpandLoad(
I);
5403 case Intrinsic::masked_gather:
5404 handleMaskedGather(
I);
5406 case Intrinsic::masked_scatter:
5407 handleMaskedScatter(
I);
5409 case Intrinsic::masked_store:
5410 handleMaskedStore(
I);
5412 case Intrinsic::masked_load:
5413 handleMaskedLoad(
I);
5415 case Intrinsic::vector_reduce_and:
5416 handleVectorReduceAndIntrinsic(
I);
5418 case Intrinsic::vector_reduce_or:
5419 handleVectorReduceOrIntrinsic(
I);
5422 case Intrinsic::vector_reduce_add:
5423 case Intrinsic::vector_reduce_xor:
5424 case Intrinsic::vector_reduce_mul:
5427 case Intrinsic::vector_reduce_smax:
5428 case Intrinsic::vector_reduce_smin:
5429 case Intrinsic::vector_reduce_umax:
5430 case Intrinsic::vector_reduce_umin:
5433 case Intrinsic::vector_reduce_fmax:
5434 case Intrinsic::vector_reduce_fmin:
5435 handleVectorReduceIntrinsic(
I,
false);
5438 case Intrinsic::vector_reduce_fadd:
5439 case Intrinsic::vector_reduce_fmul:
5440 handleVectorReduceWithStarterIntrinsic(
I);
5443 case Intrinsic::scmp:
5444 case Intrinsic::ucmp: {
5449 case Intrinsic::fshl:
5450 case Intrinsic::fshr:
5451 handleFunnelShift(
I);
5454 case Intrinsic::is_constant:
5456 setShadow(&
I, getCleanShadow(&
I));
5457 setOrigin(&
I, getCleanOrigin());
5467 bool maybeHandleX86SIMDIntrinsic(IntrinsicInst &
I) {
5468 switch (
I.getIntrinsicID()) {
5469 case Intrinsic::x86_sse_stmxcsr:
5472 case Intrinsic::x86_sse_ldmxcsr:
5479 case Intrinsic::x86_avx512_vcvtsd2usi64:
5480 case Intrinsic::x86_avx512_vcvtsd2usi32:
5481 case Intrinsic::x86_avx512_vcvtss2usi64:
5482 case Intrinsic::x86_avx512_vcvtss2usi32:
5483 case Intrinsic::x86_avx512_cvttss2usi64:
5484 case Intrinsic::x86_avx512_cvttss2usi:
5485 case Intrinsic::x86_avx512_cvttsd2usi64:
5486 case Intrinsic::x86_avx512_cvttsd2usi:
5487 case Intrinsic::x86_avx512_cvtusi2ss:
5488 case Intrinsic::x86_avx512_cvtusi642sd:
5489 case Intrinsic::x86_avx512_cvtusi642ss:
5490 handleSSEVectorConvertIntrinsic(
I, 1,
true);
5492 case Intrinsic::x86_sse2_cvtsd2si64:
5493 case Intrinsic::x86_sse2_cvtsd2si:
5494 case Intrinsic::x86_sse2_cvtsd2ss:
5495 case Intrinsic::x86_sse2_cvttsd2si64:
5496 case Intrinsic::x86_sse2_cvttsd2si:
5497 case Intrinsic::x86_sse_cvtss2si64:
5498 case Intrinsic::x86_sse_cvtss2si:
5499 case Intrinsic::x86_sse_cvttss2si64:
5500 case Intrinsic::x86_sse_cvttss2si:
5501 handleSSEVectorConvertIntrinsic(
I, 1);
5503 case Intrinsic::x86_sse_cvtps2pi:
5504 case Intrinsic::x86_sse_cvttps2pi:
5505 handleSSEVectorConvertIntrinsic(
I, 2);
5513 case Intrinsic::x86_vcvtps2ph_128:
5514 case Intrinsic::x86_vcvtps2ph_256: {
5515 handleSSEVectorConvertIntrinsicByProp(
I,
true);
5524 case Intrinsic::x86_avx512_mask_cvtps2dq_512:
5525 handleAVX512VectorConvertFPToInt(
I,
false);
5530 case Intrinsic::x86_sse2_cvtpd2ps:
5531 case Intrinsic::x86_sse2_cvtps2dq:
5532 case Intrinsic::x86_sse2_cvtpd2dq:
5533 case Intrinsic::x86_sse2_cvttps2dq:
5534 case Intrinsic::x86_sse2_cvttpd2dq:
5535 case Intrinsic::x86_avx_cvt_pd2_ps_256:
5536 case Intrinsic::x86_avx_cvt_ps2dq_256:
5537 case Intrinsic::x86_avx_cvt_pd2dq_256:
5538 case Intrinsic::x86_avx_cvtt_ps2dq_256:
5539 case Intrinsic::x86_avx_cvtt_pd2dq_256: {
5540 handleSSEVectorConvertIntrinsicByProp(
I,
false);
5551 case Intrinsic::x86_avx512_mask_vcvtps2ph_512:
5552 case Intrinsic::x86_avx512_mask_vcvtps2ph_256:
5553 case Intrinsic::x86_avx512_mask_vcvtps2ph_128:
5554 handleAVX512VectorConvertFPToInt(
I,
true);
5558 case Intrinsic::x86_avx512_psll_w_512:
5559 case Intrinsic::x86_avx512_psll_d_512:
5560 case Intrinsic::x86_avx512_psll_q_512:
5561 case Intrinsic::x86_avx512_pslli_w_512:
5562 case Intrinsic::x86_avx512_pslli_d_512:
5563 case Intrinsic::x86_avx512_pslli_q_512:
5564 case Intrinsic::x86_avx512_psrl_w_512:
5565 case Intrinsic::x86_avx512_psrl_d_512:
5566 case Intrinsic::x86_avx512_psrl_q_512:
5567 case Intrinsic::x86_avx512_psra_w_512:
5568 case Intrinsic::x86_avx512_psra_d_512:
5569 case Intrinsic::x86_avx512_psra_q_512:
5570 case Intrinsic::x86_avx512_psrli_w_512:
5571 case Intrinsic::x86_avx512_psrli_d_512:
5572 case Intrinsic::x86_avx512_psrli_q_512:
5573 case Intrinsic::x86_avx512_psrai_w_512:
5574 case Intrinsic::x86_avx512_psrai_d_512:
5575 case Intrinsic::x86_avx512_psrai_q_512:
5576 case Intrinsic::x86_avx512_psra_q_256:
5577 case Intrinsic::x86_avx512_psra_q_128:
5578 case Intrinsic::x86_avx512_psrai_q_256:
5579 case Intrinsic::x86_avx512_psrai_q_128:
5580 case Intrinsic::x86_avx2_psll_w:
5581 case Intrinsic::x86_avx2_psll_d:
5582 case Intrinsic::x86_avx2_psll_q:
5583 case Intrinsic::x86_avx2_pslli_w:
5584 case Intrinsic::x86_avx2_pslli_d:
5585 case Intrinsic::x86_avx2_pslli_q:
5586 case Intrinsic::x86_avx2_psrl_w:
5587 case Intrinsic::x86_avx2_psrl_d:
5588 case Intrinsic::x86_avx2_psrl_q:
5589 case Intrinsic::x86_avx2_psra_w:
5590 case Intrinsic::x86_avx2_psra_d:
5591 case Intrinsic::x86_avx2_psrli_w:
5592 case Intrinsic::x86_avx2_psrli_d:
5593 case Intrinsic::x86_avx2_psrli_q:
5594 case Intrinsic::x86_avx2_psrai_w:
5595 case Intrinsic::x86_avx2_psrai_d:
5596 case Intrinsic::x86_sse2_psll_w:
5597 case Intrinsic::x86_sse2_psll_d:
5598 case Intrinsic::x86_sse2_psll_q:
5599 case Intrinsic::x86_sse2_pslli_w:
5600 case Intrinsic::x86_sse2_pslli_d:
5601 case Intrinsic::x86_sse2_pslli_q:
5602 case Intrinsic::x86_sse2_psrl_w:
5603 case Intrinsic::x86_sse2_psrl_d:
5604 case Intrinsic::x86_sse2_psrl_q:
5605 case Intrinsic::x86_sse2_psra_w:
5606 case Intrinsic::x86_sse2_psra_d:
5607 case Intrinsic::x86_sse2_psrli_w:
5608 case Intrinsic::x86_sse2_psrli_d:
5609 case Intrinsic::x86_sse2_psrli_q:
5610 case Intrinsic::x86_sse2_psrai_w:
5611 case Intrinsic::x86_sse2_psrai_d:
5612 case Intrinsic::x86_mmx_psll_w:
5613 case Intrinsic::x86_mmx_psll_d:
5614 case Intrinsic::x86_mmx_psll_q:
5615 case Intrinsic::x86_mmx_pslli_w:
5616 case Intrinsic::x86_mmx_pslli_d:
5617 case Intrinsic::x86_mmx_pslli_q:
5618 case Intrinsic::x86_mmx_psrl_w:
5619 case Intrinsic::x86_mmx_psrl_d:
5620 case Intrinsic::x86_mmx_psrl_q:
5621 case Intrinsic::x86_mmx_psra_w:
5622 case Intrinsic::x86_mmx_psra_d:
5623 case Intrinsic::x86_mmx_psrli_w:
5624 case Intrinsic::x86_mmx_psrli_d:
5625 case Intrinsic::x86_mmx_psrli_q:
5626 case Intrinsic::x86_mmx_psrai_w:
5627 case Intrinsic::x86_mmx_psrai_d:
5628 handleVectorShiftIntrinsic(
I,
false);
5630 case Intrinsic::x86_avx2_psllv_d:
5631 case Intrinsic::x86_avx2_psllv_d_256:
5632 case Intrinsic::x86_avx512_psllv_d_512:
5633 case Intrinsic::x86_avx2_psllv_q:
5634 case Intrinsic::x86_avx2_psllv_q_256:
5635 case Intrinsic::x86_avx512_psllv_q_512:
5636 case Intrinsic::x86_avx2_psrlv_d:
5637 case Intrinsic::x86_avx2_psrlv_d_256:
5638 case Intrinsic::x86_avx512_psrlv_d_512:
5639 case Intrinsic::x86_avx2_psrlv_q:
5640 case Intrinsic::x86_avx2_psrlv_q_256:
5641 case Intrinsic::x86_avx512_psrlv_q_512:
5642 case Intrinsic::x86_avx2_psrav_d:
5643 case Intrinsic::x86_avx2_psrav_d_256:
5644 case Intrinsic::x86_avx512_psrav_d_512:
5645 case Intrinsic::x86_avx512_psrav_q_128:
5646 case Intrinsic::x86_avx512_psrav_q_256:
5647 case Intrinsic::x86_avx512_psrav_q_512:
5648 handleVectorShiftIntrinsic(
I,
true);
5652 case Intrinsic::x86_sse2_packsswb_128:
5653 case Intrinsic::x86_sse2_packssdw_128:
5654 case Intrinsic::x86_sse2_packuswb_128:
5655 case Intrinsic::x86_sse41_packusdw:
5656 case Intrinsic::x86_avx2_packsswb:
5657 case Intrinsic::x86_avx2_packssdw:
5658 case Intrinsic::x86_avx2_packuswb:
5659 case Intrinsic::x86_avx2_packusdw:
5665 case Intrinsic::x86_avx512_packsswb_512:
5666 case Intrinsic::x86_avx512_packssdw_512:
5667 case Intrinsic::x86_avx512_packuswb_512:
5668 case Intrinsic::x86_avx512_packusdw_512:
5669 handleVectorPackIntrinsic(
I);
5672 case Intrinsic::x86_sse41_pblendvb:
5673 case Intrinsic::x86_sse41_blendvpd:
5674 case Intrinsic::x86_sse41_blendvps:
5675 case Intrinsic::x86_avx_blendv_pd_256:
5676 case Intrinsic::x86_avx_blendv_ps_256:
5677 case Intrinsic::x86_avx2_pblendvb:
5678 handleBlendvIntrinsic(
I);
5681 case Intrinsic::x86_avx_dp_ps_256:
5682 case Intrinsic::x86_sse41_dppd:
5683 case Intrinsic::x86_sse41_dpps:
5684 handleDppIntrinsic(
I);
5687 case Intrinsic::x86_mmx_packsswb:
5688 case Intrinsic::x86_mmx_packuswb:
5689 handleVectorPackIntrinsic(
I, 16);
5692 case Intrinsic::x86_mmx_packssdw:
5693 handleVectorPackIntrinsic(
I, 32);
5696 case Intrinsic::x86_mmx_psad_bw:
5697 handleVectorSadIntrinsic(
I,
true);
5699 case Intrinsic::x86_sse2_psad_bw:
5700 case Intrinsic::x86_avx2_psad_bw:
5701 handleVectorSadIntrinsic(
I);
5727 case Intrinsic::x86_sse2_pmadd_wd:
5728 case Intrinsic::x86_avx2_pmadd_wd:
5729 case Intrinsic::x86_avx512_pmaddw_d_512:
5730 case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
5731 case Intrinsic::x86_avx2_pmadd_ub_sw:
5732 case Intrinsic::x86_avx512_pmaddubs_w_512:
5733 handleVectorPmaddIntrinsic(
I, 2);
5737 case Intrinsic::x86_ssse3_pmadd_ub_sw:
5738 handleVectorPmaddIntrinsic(
I, 2, 8);
5742 case Intrinsic::x86_mmx_pmadd_wd:
5743 handleVectorPmaddIntrinsic(
I, 2, 16);
5805 case Intrinsic::x86_avx512_vpdpbusd_128:
5806 case Intrinsic::x86_avx512_vpdpbusd_256:
5807 case Intrinsic::x86_avx512_vpdpbusd_512:
5808 case Intrinsic::x86_avx512_vpdpbusds_128:
5809 case Intrinsic::x86_avx512_vpdpbusds_256:
5810 case Intrinsic::x86_avx512_vpdpbusds_512:
5811 case Intrinsic::x86_avx2_vpdpbssd_128:
5812 case Intrinsic::x86_avx2_vpdpbssd_256:
5813 case Intrinsic::x86_avx10_vpdpbssd_512:
5814 case Intrinsic::x86_avx2_vpdpbssds_128:
5815 case Intrinsic::x86_avx2_vpdpbssds_256:
5816 case Intrinsic::x86_avx10_vpdpbssds_512:
5817 case Intrinsic::x86_avx2_vpdpbsud_128:
5818 case Intrinsic::x86_avx2_vpdpbsud_256:
5819 case Intrinsic::x86_avx10_vpdpbsud_512:
5820 case Intrinsic::x86_avx2_vpdpbsuds_128:
5821 case Intrinsic::x86_avx2_vpdpbsuds_256:
5822 case Intrinsic::x86_avx10_vpdpbsuds_512:
5823 case Intrinsic::x86_avx2_vpdpbuud_128:
5824 case Intrinsic::x86_avx2_vpdpbuud_256:
5825 case Intrinsic::x86_avx10_vpdpbuud_512:
5826 case Intrinsic::x86_avx2_vpdpbuuds_128:
5827 case Intrinsic::x86_avx2_vpdpbuuds_256:
5828 case Intrinsic::x86_avx10_vpdpbuuds_512:
5829 handleVectorPmaddIntrinsic(
I, 4, 8);
5876 case Intrinsic::x86_avx512_vpdpwssd_128:
5877 case Intrinsic::x86_avx512_vpdpwssd_256:
5878 case Intrinsic::x86_avx512_vpdpwssd_512:
5879 case Intrinsic::x86_avx512_vpdpwssds_128:
5880 case Intrinsic::x86_avx512_vpdpwssds_256:
5881 case Intrinsic::x86_avx512_vpdpwssds_512:
5882 handleVectorPmaddIntrinsic(
I, 2, 16);
5895 case Intrinsic::x86_sse_cmp_ss:
5896 case Intrinsic::x86_sse2_cmp_sd:
5897 case Intrinsic::x86_sse_comieq_ss:
5898 case Intrinsic::x86_sse_comilt_ss:
5899 case Intrinsic::x86_sse_comile_ss:
5900 case Intrinsic::x86_sse_comigt_ss:
5901 case Intrinsic::x86_sse_comige_ss:
5902 case Intrinsic::x86_sse_comineq_ss:
5903 case Intrinsic::x86_sse_ucomieq_ss:
5904 case Intrinsic::x86_sse_ucomilt_ss:
5905 case Intrinsic::x86_sse_ucomile_ss:
5906 case Intrinsic::x86_sse_ucomigt_ss:
5907 case Intrinsic::x86_sse_ucomige_ss:
5908 case Intrinsic::x86_sse_ucomineq_ss:
5909 case Intrinsic::x86_sse2_comieq_sd:
5910 case Intrinsic::x86_sse2_comilt_sd:
5911 case Intrinsic::x86_sse2_comile_sd:
5912 case Intrinsic::x86_sse2_comigt_sd:
5913 case Intrinsic::x86_sse2_comige_sd:
5914 case Intrinsic::x86_sse2_comineq_sd:
5915 case Intrinsic::x86_sse2_ucomieq_sd:
5916 case Intrinsic::x86_sse2_ucomilt_sd:
5917 case Intrinsic::x86_sse2_ucomile_sd:
5918 case Intrinsic::x86_sse2_ucomigt_sd:
5919 case Intrinsic::x86_sse2_ucomige_sd:
5920 case Intrinsic::x86_sse2_ucomineq_sd:
5921 handleVectorCompareScalarIntrinsic(
I);
5924 case Intrinsic::x86_avx_cmp_pd_256:
5925 case Intrinsic::x86_avx_cmp_ps_256:
5926 case Intrinsic::x86_sse2_cmp_pd:
5927 case Intrinsic::x86_sse_cmp_ps:
5928 handleVectorComparePackedIntrinsic(
I);
5931 case Intrinsic::x86_bmi_bextr_32:
5932 case Intrinsic::x86_bmi_bextr_64:
5933 case Intrinsic::x86_bmi_bzhi_32:
5934 case Intrinsic::x86_bmi_bzhi_64:
5935 case Intrinsic::x86_bmi_pdep_32:
5936 case Intrinsic::x86_bmi_pdep_64:
5937 case Intrinsic::x86_bmi_pext_32:
5938 case Intrinsic::x86_bmi_pext_64:
5939 handleBmiIntrinsic(
I);
5942 case Intrinsic::x86_pclmulqdq:
5943 case Intrinsic::x86_pclmulqdq_256:
5944 case Intrinsic::x86_pclmulqdq_512:
5945 handlePclmulIntrinsic(
I);
5948 case Intrinsic::x86_avx_round_pd_256:
5949 case Intrinsic::x86_avx_round_ps_256:
5950 case Intrinsic::x86_sse41_round_pd:
5951 case Intrinsic::x86_sse41_round_ps:
5952 handleRoundPdPsIntrinsic(
I);
5955 case Intrinsic::x86_sse41_round_sd:
5956 case Intrinsic::x86_sse41_round_ss:
5957 handleUnarySdSsIntrinsic(
I);
5960 case Intrinsic::x86_sse2_max_sd:
5961 case Intrinsic::x86_sse_max_ss:
5962 case Intrinsic::x86_sse2_min_sd:
5963 case Intrinsic::x86_sse_min_ss:
5964 handleBinarySdSsIntrinsic(
I);
5967 case Intrinsic::x86_avx_vtestc_pd:
5968 case Intrinsic::x86_avx_vtestc_pd_256:
5969 case Intrinsic::x86_avx_vtestc_ps:
5970 case Intrinsic::x86_avx_vtestc_ps_256:
5971 case Intrinsic::x86_avx_vtestnzc_pd:
5972 case Intrinsic::x86_avx_vtestnzc_pd_256:
5973 case Intrinsic::x86_avx_vtestnzc_ps:
5974 case Intrinsic::x86_avx_vtestnzc_ps_256:
5975 case Intrinsic::x86_avx_vtestz_pd:
5976 case Intrinsic::x86_avx_vtestz_pd_256:
5977 case Intrinsic::x86_avx_vtestz_ps:
5978 case Intrinsic::x86_avx_vtestz_ps_256:
5979 case Intrinsic::x86_avx_ptestc_256:
5980 case Intrinsic::x86_avx_ptestnzc_256:
5981 case Intrinsic::x86_avx_ptestz_256:
5982 case Intrinsic::x86_sse41_ptestc:
5983 case Intrinsic::x86_sse41_ptestnzc:
5984 case Intrinsic::x86_sse41_ptestz:
5985 handleVtestIntrinsic(
I);
5989 case Intrinsic::x86_ssse3_phadd_w:
5990 case Intrinsic::x86_ssse3_phadd_w_128:
5991 case Intrinsic::x86_avx2_phadd_w:
5992 case Intrinsic::x86_ssse3_phsub_w:
5993 case Intrinsic::x86_ssse3_phsub_w_128:
5994 case Intrinsic::x86_avx2_phsub_w: {
5995 handlePairwiseShadowOrIntrinsic(
I, 16);
6000 case Intrinsic::x86_ssse3_phadd_d:
6001 case Intrinsic::x86_ssse3_phadd_d_128:
6002 case Intrinsic::x86_avx2_phadd_d:
6003 case Intrinsic::x86_ssse3_phsub_d:
6004 case Intrinsic::x86_ssse3_phsub_d_128:
6005 case Intrinsic::x86_avx2_phsub_d: {
6006 handlePairwiseShadowOrIntrinsic(
I, 32);
6011 case Intrinsic::x86_ssse3_phadd_sw:
6012 case Intrinsic::x86_ssse3_phadd_sw_128:
6013 case Intrinsic::x86_avx2_phadd_sw:
6014 case Intrinsic::x86_ssse3_phsub_sw:
6015 case Intrinsic::x86_ssse3_phsub_sw_128:
6016 case Intrinsic::x86_avx2_phsub_sw: {
6017 handlePairwiseShadowOrIntrinsic(
I, 16);
6022 case Intrinsic::x86_sse3_hadd_ps:
6023 case Intrinsic::x86_sse3_hadd_pd:
6024 case Intrinsic::x86_avx_hadd_pd_256:
6025 case Intrinsic::x86_avx_hadd_ps_256:
6026 case Intrinsic::x86_sse3_hsub_ps:
6027 case Intrinsic::x86_sse3_hsub_pd:
6028 case Intrinsic::x86_avx_hsub_pd_256:
6029 case Intrinsic::x86_avx_hsub_ps_256: {
6030 handlePairwiseShadowOrIntrinsic(
I);
6034 case Intrinsic::x86_avx_maskstore_ps:
6035 case Intrinsic::x86_avx_maskstore_pd:
6036 case Intrinsic::x86_avx_maskstore_ps_256:
6037 case Intrinsic::x86_avx_maskstore_pd_256:
6038 case Intrinsic::x86_avx2_maskstore_d:
6039 case Intrinsic::x86_avx2_maskstore_q:
6040 case Intrinsic::x86_avx2_maskstore_d_256:
6041 case Intrinsic::x86_avx2_maskstore_q_256: {
6042 handleAVXMaskedStore(
I);
6046 case Intrinsic::x86_avx_maskload_ps:
6047 case Intrinsic::x86_avx_maskload_pd:
6048 case Intrinsic::x86_avx_maskload_ps_256:
6049 case Intrinsic::x86_avx_maskload_pd_256:
6050 case Intrinsic::x86_avx2_maskload_d:
6051 case Intrinsic::x86_avx2_maskload_q:
6052 case Intrinsic::x86_avx2_maskload_d_256:
6053 case Intrinsic::x86_avx2_maskload_q_256: {
6054 handleAVXMaskedLoad(
I);
6059 case Intrinsic::x86_avx512fp16_add_ph_512:
6060 case Intrinsic::x86_avx512fp16_sub_ph_512:
6061 case Intrinsic::x86_avx512fp16_mul_ph_512:
6062 case Intrinsic::x86_avx512fp16_div_ph_512:
6063 case Intrinsic::x86_avx512fp16_max_ph_512:
6064 case Intrinsic::x86_avx512fp16_min_ph_512:
6065 case Intrinsic::x86_avx512_min_ps_512:
6066 case Intrinsic::x86_avx512_min_pd_512:
6067 case Intrinsic::x86_avx512_max_ps_512:
6068 case Intrinsic::x86_avx512_max_pd_512: {
6073 [[maybe_unused]]
bool Success =
6074 maybeHandleSimpleNomemIntrinsic(
I, 1);
6079 case Intrinsic::x86_avx_vpermilvar_pd:
6080 case Intrinsic::x86_avx_vpermilvar_pd_256:
6081 case Intrinsic::x86_avx512_vpermilvar_pd_512:
6082 case Intrinsic::x86_avx_vpermilvar_ps:
6083 case Intrinsic::x86_avx_vpermilvar_ps_256:
6084 case Intrinsic::x86_avx512_vpermilvar_ps_512: {
6085 handleAVXVpermilvar(
I);
6089 case Intrinsic::x86_avx512_vpermi2var_d_128:
6090 case Intrinsic::x86_avx512_vpermi2var_d_256:
6091 case Intrinsic::x86_avx512_vpermi2var_d_512:
6092 case Intrinsic::x86_avx512_vpermi2var_hi_128:
6093 case Intrinsic::x86_avx512_vpermi2var_hi_256:
6094 case Intrinsic::x86_avx512_vpermi2var_hi_512:
6095 case Intrinsic::x86_avx512_vpermi2var_pd_128:
6096 case Intrinsic::x86_avx512_vpermi2var_pd_256:
6097 case Intrinsic::x86_avx512_vpermi2var_pd_512:
6098 case Intrinsic::x86_avx512_vpermi2var_ps_128:
6099 case Intrinsic::x86_avx512_vpermi2var_ps_256:
6100 case Intrinsic::x86_avx512_vpermi2var_ps_512:
6101 case Intrinsic::x86_avx512_vpermi2var_q_128:
6102 case Intrinsic::x86_avx512_vpermi2var_q_256:
6103 case Intrinsic::x86_avx512_vpermi2var_q_512:
6104 case Intrinsic::x86_avx512_vpermi2var_qi_128:
6105 case Intrinsic::x86_avx512_vpermi2var_qi_256:
6106 case Intrinsic::x86_avx512_vpermi2var_qi_512:
6107 handleAVXVpermi2var(
I);
6121 case Intrinsic::x86_avx2_pshuf_b:
6122 case Intrinsic::x86_sse_pshuf_w:
6123 case Intrinsic::x86_ssse3_pshuf_b_128:
6124 case Intrinsic::x86_ssse3_pshuf_b:
6125 case Intrinsic::x86_avx512_pshuf_b_512:
6126 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
6132 case Intrinsic::x86_avx512_mask_pmov_dw_512:
6133 case Intrinsic::x86_avx512_mask_pmov_db_512:
6134 case Intrinsic::x86_avx512_mask_pmov_qb_512:
6135 case Intrinsic::x86_avx512_mask_pmov_qw_512: {
6138 handleIntrinsicByApplyingToShadow(
I,
I.getIntrinsicID(),
6146 case Intrinsic::x86_avx512_mask_pmovs_dw_512:
6147 case Intrinsic::x86_avx512_mask_pmovus_dw_512: {
6148 handleIntrinsicByApplyingToShadow(
I,
6149 Intrinsic::x86_avx512_mask_pmov_dw_512,
6154 case Intrinsic::x86_avx512_mask_pmovs_db_512:
6155 case Intrinsic::x86_avx512_mask_pmovus_db_512: {
6156 handleIntrinsicByApplyingToShadow(
I,
6157 Intrinsic::x86_avx512_mask_pmov_db_512,
6162 case Intrinsic::x86_avx512_mask_pmovs_qb_512:
6163 case Intrinsic::x86_avx512_mask_pmovus_qb_512: {
6164 handleIntrinsicByApplyingToShadow(
I,
6165 Intrinsic::x86_avx512_mask_pmov_qb_512,
6170 case Intrinsic::x86_avx512_mask_pmovs_qw_512:
6171 case Intrinsic::x86_avx512_mask_pmovus_qw_512: {
6172 handleIntrinsicByApplyingToShadow(
I,
6173 Intrinsic::x86_avx512_mask_pmov_qw_512,
6178 case Intrinsic::x86_avx512_mask_pmovs_qd_512:
6179 case Intrinsic::x86_avx512_mask_pmovus_qd_512:
6180 case Intrinsic::x86_avx512_mask_pmovs_wb_512:
6181 case Intrinsic::x86_avx512_mask_pmovus_wb_512: {
6185 handleAVX512VectorDownConvert(
I);
6225 case Intrinsic::x86_avx512_rsqrt14_ps_512:
6226 case Intrinsic::x86_avx512_rsqrt14_ps_256:
6227 case Intrinsic::x86_avx512_rsqrt14_ps_128:
6228 case Intrinsic::x86_avx512_rsqrt14_pd_512:
6229 case Intrinsic::x86_avx512_rsqrt14_pd_256:
6230 case Intrinsic::x86_avx512_rsqrt14_pd_128:
6231 case Intrinsic::x86_avx10_mask_rsqrt_bf16_512:
6232 case Intrinsic::x86_avx10_mask_rsqrt_bf16_256:
6233 case Intrinsic::x86_avx10_mask_rsqrt_bf16_128:
6234 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_512:
6235 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_256:
6236 case Intrinsic::x86_avx512fp16_mask_rsqrt_ph_128:
6237 handleAVX512VectorGenericMaskedFP(
I, 0, 1,
6277 case Intrinsic::x86_avx512_rcp14_ps_512:
6278 case Intrinsic::x86_avx512_rcp14_ps_256:
6279 case Intrinsic::x86_avx512_rcp14_ps_128:
6280 case Intrinsic::x86_avx512_rcp14_pd_512:
6281 case Intrinsic::x86_avx512_rcp14_pd_256:
6282 case Intrinsic::x86_avx512_rcp14_pd_128:
6283 case Intrinsic::x86_avx10_mask_rcp_bf16_512:
6284 case Intrinsic::x86_avx10_mask_rcp_bf16_256:
6285 case Intrinsic::x86_avx10_mask_rcp_bf16_128:
6286 case Intrinsic::x86_avx512fp16_mask_rcp_ph_512:
6287 case Intrinsic::x86_avx512fp16_mask_rcp_ph_256:
6288 case Intrinsic::x86_avx512fp16_mask_rcp_ph_128:
6289 handleAVX512VectorGenericMaskedFP(
I, 0, 1,
6333 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_512:
6334 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_256:
6335 case Intrinsic::x86_avx512fp16_mask_rndscale_ph_128:
6336 case Intrinsic::x86_avx512_mask_rndscale_ps_512:
6337 case Intrinsic::x86_avx512_mask_rndscale_ps_256:
6338 case Intrinsic::x86_avx512_mask_rndscale_ps_128:
6339 case Intrinsic::x86_avx512_mask_rndscale_pd_512:
6340 case Intrinsic::x86_avx512_mask_rndscale_pd_256:
6341 case Intrinsic::x86_avx512_mask_rndscale_pd_128:
6342 case Intrinsic::x86_avx10_mask_rndscale_bf16_512:
6343 case Intrinsic::x86_avx10_mask_rndscale_bf16_256:
6344 case Intrinsic::x86_avx10_mask_rndscale_bf16_128:
6345 handleAVX512VectorGenericMaskedFP(
I, 0, 2,
6350 case Intrinsic::x86_avx512fp16_mask_add_sh_round:
6351 case Intrinsic::x86_avx512fp16_mask_sub_sh_round:
6352 case Intrinsic::x86_avx512fp16_mask_mul_sh_round:
6353 case Intrinsic::x86_avx512fp16_mask_div_sh_round:
6354 case Intrinsic::x86_avx512fp16_mask_max_sh_round:
6355 case Intrinsic::x86_avx512fp16_mask_min_sh_round: {
6356 visitGenericScalarHalfwordInst(
I);
6361 case Intrinsic::x86_vgf2p8affineqb_128:
6362 case Intrinsic::x86_vgf2p8affineqb_256:
6363 case Intrinsic::x86_vgf2p8affineqb_512:
6364 handleAVXGF2P8Affine(
I);
6374 bool maybeHandleArmSIMDIntrinsic(IntrinsicInst &
I) {
6375 switch (
I.getIntrinsicID()) {
6376 case Intrinsic::aarch64_neon_rshrn:
6377 case Intrinsic::aarch64_neon_sqrshl:
6378 case Intrinsic::aarch64_neon_sqrshrn:
6379 case Intrinsic::aarch64_neon_sqrshrun:
6380 case Intrinsic::aarch64_neon_sqshl:
6381 case Intrinsic::aarch64_neon_sqshlu:
6382 case Intrinsic::aarch64_neon_sqshrn:
6383 case Intrinsic::aarch64_neon_sqshrun:
6384 case Intrinsic::aarch64_neon_srshl:
6385 case Intrinsic::aarch64_neon_sshl:
6386 case Intrinsic::aarch64_neon_uqrshl:
6387 case Intrinsic::aarch64_neon_uqrshrn:
6388 case Intrinsic::aarch64_neon_uqshl:
6389 case Intrinsic::aarch64_neon_uqshrn:
6390 case Intrinsic::aarch64_neon_urshl:
6391 case Intrinsic::aarch64_neon_ushl:
6393 handleVectorShiftIntrinsic(
I,
false);
6398 case Intrinsic::aarch64_neon_fmaxp:
6399 case Intrinsic::aarch64_neon_fminp:
6401 case Intrinsic::aarch64_neon_fmaxnmp:
6402 case Intrinsic::aarch64_neon_fminnmp:
6404 case Intrinsic::aarch64_neon_smaxp:
6405 case Intrinsic::aarch64_neon_sminp:
6406 case Intrinsic::aarch64_neon_umaxp:
6407 case Intrinsic::aarch64_neon_uminp:
6409 case Intrinsic::aarch64_neon_addp:
6411 case Intrinsic::aarch64_neon_faddp:
6413 case Intrinsic::aarch64_neon_saddlp:
6414 case Intrinsic::aarch64_neon_uaddlp: {
6415 handlePairwiseShadowOrIntrinsic(
I);
6420 case Intrinsic::aarch64_neon_fcvtas:
6421 case Intrinsic::aarch64_neon_fcvtau:
6423 case Intrinsic::aarch64_neon_fcvtms:
6424 case Intrinsic::aarch64_neon_fcvtmu:
6426 case Intrinsic::aarch64_neon_fcvtns:
6427 case Intrinsic::aarch64_neon_fcvtnu:
6429 case Intrinsic::aarch64_neon_fcvtps:
6430 case Intrinsic::aarch64_neon_fcvtpu:
6432 case Intrinsic::aarch64_neon_fcvtzs:
6433 case Intrinsic::aarch64_neon_fcvtzu:
6435 case Intrinsic::aarch64_neon_fcvtxn: {
6436 handleNEONVectorConvertIntrinsic(
I);
6441 case Intrinsic::aarch64_neon_faddv:
6442 case Intrinsic::aarch64_neon_saddv:
6443 case Intrinsic::aarch64_neon_uaddv:
6446 case Intrinsic::aarch64_neon_smaxv:
6447 case Intrinsic::aarch64_neon_sminv:
6448 case Intrinsic::aarch64_neon_umaxv:
6449 case Intrinsic::aarch64_neon_uminv:
6453 case Intrinsic::aarch64_neon_fmaxv:
6454 case Intrinsic::aarch64_neon_fminv:
6455 case Intrinsic::aarch64_neon_fmaxnmv:
6456 case Intrinsic::aarch64_neon_fminnmv:
6458 case Intrinsic::aarch64_neon_saddlv:
6459 case Intrinsic::aarch64_neon_uaddlv:
6460 handleVectorReduceIntrinsic(
I,
true);
6463 case Intrinsic::aarch64_neon_ld1x2:
6464 case Intrinsic::aarch64_neon_ld1x3:
6465 case Intrinsic::aarch64_neon_ld1x4:
6466 case Intrinsic::aarch64_neon_ld2:
6467 case Intrinsic::aarch64_neon_ld3:
6468 case Intrinsic::aarch64_neon_ld4:
6469 case Intrinsic::aarch64_neon_ld2r:
6470 case Intrinsic::aarch64_neon_ld3r:
6471 case Intrinsic::aarch64_neon_ld4r: {
6472 handleNEONVectorLoad(
I,
false);
6476 case Intrinsic::aarch64_neon_ld2lane:
6477 case Intrinsic::aarch64_neon_ld3lane:
6478 case Intrinsic::aarch64_neon_ld4lane: {
6479 handleNEONVectorLoad(
I,
true);
6484 case Intrinsic::aarch64_neon_sqxtn:
6485 case Intrinsic::aarch64_neon_sqxtun:
6486 case Intrinsic::aarch64_neon_uqxtn:
6493 case Intrinsic::aarch64_neon_st1x2:
6494 case Intrinsic::aarch64_neon_st1x3:
6495 case Intrinsic::aarch64_neon_st1x4:
6496 case Intrinsic::aarch64_neon_st2:
6497 case Intrinsic::aarch64_neon_st3:
6498 case Intrinsic::aarch64_neon_st4: {
6499 handleNEONVectorStoreIntrinsic(
I,
false);
6503 case Intrinsic::aarch64_neon_st2lane:
6504 case Intrinsic::aarch64_neon_st3lane:
6505 case Intrinsic::aarch64_neon_st4lane: {
6506 handleNEONVectorStoreIntrinsic(
I,
true);
6519 case Intrinsic::aarch64_neon_tbl1:
6520 case Intrinsic::aarch64_neon_tbl2:
6521 case Intrinsic::aarch64_neon_tbl3:
6522 case Intrinsic::aarch64_neon_tbl4:
6523 case Intrinsic::aarch64_neon_tbx1:
6524 case Intrinsic::aarch64_neon_tbx2:
6525 case Intrinsic::aarch64_neon_tbx3:
6526 case Intrinsic::aarch64_neon_tbx4: {
6528 handleIntrinsicByApplyingToShadow(
6529 I,
I.getIntrinsicID(),
6534 case Intrinsic::aarch64_neon_fmulx:
6535 case Intrinsic::aarch64_neon_pmul:
6536 case Intrinsic::aarch64_neon_pmull:
6537 case Intrinsic::aarch64_neon_smull:
6538 case Intrinsic::aarch64_neon_pmull64:
6539 case Intrinsic::aarch64_neon_umull: {
6540 handleNEONVectorMultiplyIntrinsic(
I);
6551 void visitIntrinsicInst(IntrinsicInst &
I) {
6552 if (maybeHandleCrossPlatformIntrinsic(
I))
6555 if (maybeHandleX86SIMDIntrinsic(
I))
6558 if (maybeHandleArmSIMDIntrinsic(
I))
6561 if (maybeHandleUnknownIntrinsic(
I))
6564 visitInstruction(
I);
6567 void visitLibAtomicLoad(CallBase &CB) {
6578 Value *NewOrdering =
6582 NextNodeIRBuilder NextIRB(&CB);
6583 Value *SrcShadowPtr, *SrcOriginPtr;
6584 std::tie(SrcShadowPtr, SrcOriginPtr) =
6585 getShadowOriginPtr(SrcPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
6587 Value *DstShadowPtr =
6588 getShadowOriginPtr(DstPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
6592 NextIRB.CreateMemCpy(DstShadowPtr,
Align(1), SrcShadowPtr,
Align(1),
Size);
6593 if (MS.TrackOrigins) {
6594 Value *SrcOrigin = NextIRB.CreateAlignedLoad(MS.OriginTy, SrcOriginPtr,
6596 Value *NewOrigin = updateOrigin(SrcOrigin, NextIRB);
6597 NextIRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, NewOrigin});
6601 void visitLibAtomicStore(CallBase &CB) {
6608 Value *NewOrdering =
6612 Value *DstShadowPtr =
6622 void visitCallBase(CallBase &CB) {
6630 visitAsmInstruction(CB);
6632 visitInstruction(CB);
6641 case LibFunc_atomic_load:
6643 llvm::errs() <<
"MSAN -- cannot instrument invoke of libatomic load."
6647 visitLibAtomicLoad(CB);
6649 case LibFunc_atomic_store:
6650 visitLibAtomicStore(CB);
6666 B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
6670 Func->removeFnAttrs(
B);
6676 bool MayCheckCall = MS.EagerChecks;
6680 MayCheckCall &= !
Func->getName().starts_with(
"__sanitizer_unaligned_");
6683 unsigned ArgOffset = 0;
6686 if (!
A->getType()->isSized()) {
6687 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is not sized: " << CB <<
"\n");
6691 if (
A->getType()->isScalableTy()) {
6692 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is vscale: " << CB <<
"\n");
6694 insertCheckShadowOf(
A, &CB);
6699 const DataLayout &
DL =
F.getDataLayout();
6703 bool EagerCheck = MayCheckCall && !ByVal && NoUndef;
6706 insertCheckShadowOf(
A, &CB);
6707 Size =
DL.getTypeAllocSize(
A->getType());
6713 Value *ArgShadow = getShadow(
A);
6714 Value *ArgShadowBase = getShadowPtrForArgument(IRB, ArgOffset);
6716 <<
" Shadow: " << *ArgShadow <<
"\n");
6720 assert(
A->getType()->isPointerTy() &&
6721 "ByVal argument is not a pointer!");
6726 MaybeAlign Alignment = std::nullopt;
6729 Value *AShadowPtr, *AOriginPtr;
6730 std::tie(AShadowPtr, AOriginPtr) =
6731 getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(), Alignment,
6733 if (!PropagateShadow) {
6740 if (MS.TrackOrigins) {
6741 Value *ArgOriginBase = getOriginPtrForArgument(IRB, ArgOffset);
6755 Size =
DL.getTypeAllocSize(
A->getType());
6761 if (MS.TrackOrigins && !(Cst && Cst->
isNullValue())) {
6763 getOriginPtrForArgument(IRB, ArgOffset));
6766 assert(Store !=
nullptr);
6775 if (FT->isVarArg()) {
6776 VAHelper->visitCallBase(CB, IRB);
6786 if (MayCheckCall && CB.
hasRetAttr(Attribute::NoUndef)) {
6787 setShadow(&CB, getCleanShadow(&CB));
6788 setOrigin(&CB, getCleanOrigin());
6794 Value *
Base = getShadowPtrForRetval(IRBBefore);
6795 IRBBefore.CreateAlignedStore(getCleanShadow(&CB),
Base,
6807 setShadow(&CB, getCleanShadow(&CB));
6808 setOrigin(&CB, getCleanOrigin());
6815 "Could not find insertion point for retval shadow load");
6818 Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
6821 setShadow(&CB, RetvalShadow);
6822 if (MS.TrackOrigins)
6823 setOrigin(&CB, IRBAfter.CreateLoad(MS.OriginTy, getOriginPtrForRetval()));
6828 RetVal =
I->getOperand(0);
6831 return I->isMustTailCall();
6836 void visitReturnInst(ReturnInst &
I) {
6838 Value *RetVal =
I.getReturnValue();
6844 Value *ShadowPtr = getShadowPtrForRetval(IRB);
6845 bool HasNoUndef =
F.hasRetAttribute(Attribute::NoUndef);
6846 bool StoreShadow = !(MS.EagerChecks && HasNoUndef);
6849 bool EagerCheck = (MS.EagerChecks && HasNoUndef) || (
F.getName() ==
"main");
6851 Value *Shadow = getShadow(RetVal);
6852 bool StoreOrigin =
true;
6854 insertCheckShadowOf(RetVal, &
I);
6855 Shadow = getCleanShadow(RetVal);
6856 StoreOrigin =
false;
6863 if (MS.TrackOrigins && StoreOrigin)
6864 IRB.
CreateStore(getOrigin(RetVal), getOriginPtrForRetval());
6868 void visitPHINode(PHINode &
I) {
6870 if (!PropagateShadow) {
6871 setShadow(&
I, getCleanShadow(&
I));
6872 setOrigin(&
I, getCleanOrigin());
6876 ShadowPHINodes.push_back(&
I);
6877 setShadow(&
I, IRB.
CreatePHI(getShadowTy(&
I),
I.getNumIncomingValues(),
6879 if (MS.TrackOrigins)
6881 &
I, IRB.
CreatePHI(MS.OriginTy,
I.getNumIncomingValues(),
"_msphi_o"));
6884 Value *getLocalVarIdptr(AllocaInst &
I) {
6885 ConstantInt *IntConst =
6886 ConstantInt::get(Type::getInt32Ty((*
F.getParent()).getContext()), 0);
6887 return new GlobalVariable(*
F.getParent(), IntConst->
getType(),
6892 Value *getLocalVarDescription(AllocaInst &
I) {
6898 IRB.
CreateCall(MS.MsanPoisonStackFn, {&I, Len});
6900 Value *ShadowBase, *OriginBase;
6901 std::tie(ShadowBase, OriginBase) = getShadowOriginPtr(
6905 IRB.
CreateMemSet(ShadowBase, PoisonValue, Len,
I.getAlign());
6908 if (PoisonStack && MS.TrackOrigins) {
6909 Value *Idptr = getLocalVarIdptr(
I);
6911 Value *Descr = getLocalVarDescription(
I);
6912 IRB.
CreateCall(MS.MsanSetAllocaOriginWithDescriptionFn,
6913 {&I, Len, Idptr, Descr});
6915 IRB.
CreateCall(MS.MsanSetAllocaOriginNoDescriptionFn, {&I, Len, Idptr});
6921 Value *Descr = getLocalVarDescription(
I);
6923 IRB.
CreateCall(MS.MsanPoisonAllocaFn, {&I, Len, Descr});
6925 IRB.
CreateCall(MS.MsanUnpoisonAllocaFn, {&I, Len});
6929 void instrumentAlloca(AllocaInst &
I, Instruction *InsPoint =
nullptr) {
6932 NextNodeIRBuilder IRB(InsPoint);
6933 const DataLayout &
DL =
F.getDataLayout();
6934 TypeSize TS =
DL.getTypeAllocSize(
I.getAllocatedType());
6936 if (
I.isArrayAllocation())
6940 if (MS.CompileKernel)
6941 poisonAllocaKmsan(
I, IRB, Len);
6943 poisonAllocaUserspace(
I, IRB, Len);
6946 void visitAllocaInst(AllocaInst &
I) {
6947 setShadow(&
I, getCleanShadow(&
I));
6948 setOrigin(&
I, getCleanOrigin());
6954 void visitSelectInst(SelectInst &
I) {
6960 handleSelectLikeInst(
I,
B,
C,
D);
6966 Value *Sb = getShadow(
B);
6967 Value *Sc = getShadow(
C);
6968 Value *Sd = getShadow(
D);
6970 Value *Ob = MS.TrackOrigins ? getOrigin(
B) : nullptr;
6971 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
6972 Value *Od = MS.TrackOrigins ? getOrigin(
D) : nullptr;
6977 if (
I.getType()->isAggregateType()) {
6981 Sa1 = getPoisonedShadow(getShadowTy(
I.getType()));
6989 C = CreateAppToShadowCast(IRB,
C);
6990 D = CreateAppToShadowCast(IRB,
D);
6997 if (MS.TrackOrigins) {
7000 if (
B->getType()->isVectorTy()) {
7001 B = convertToBool(
B, IRB);
7002 Sb = convertToBool(Sb, IRB);
7010 void visitLandingPadInst(LandingPadInst &
I) {
7013 setShadow(&
I, getCleanShadow(&
I));
7014 setOrigin(&
I, getCleanOrigin());
7017 void visitCatchSwitchInst(CatchSwitchInst &
I) {
7018 setShadow(&
I, getCleanShadow(&
I));
7019 setOrigin(&
I, getCleanOrigin());
7022 void visitFuncletPadInst(FuncletPadInst &
I) {
7023 setShadow(&
I, getCleanShadow(&
I));
7024 setOrigin(&
I, getCleanOrigin());
7027 void visitGetElementPtrInst(GetElementPtrInst &
I) { handleShadowOr(
I); }
7029 void visitExtractValueInst(ExtractValueInst &
I) {
7031 Value *Agg =
I.getAggregateOperand();
7033 Value *AggShadow = getShadow(Agg);
7037 setShadow(&
I, ResShadow);
7038 setOriginForNaryOp(
I);
7041 void visitInsertValueInst(InsertValueInst &
I) {
7044 Value *AggShadow = getShadow(
I.getAggregateOperand());
7045 Value *InsShadow = getShadow(
I.getInsertedValueOperand());
7051 setOriginForNaryOp(
I);
7054 void dumpInst(Instruction &
I) {
7058 errs() <<
"ZZZ " <<
I.getOpcodeName() <<
"\n";
7060 errs() <<
"QQQ " <<
I <<
"\n";
7063 void visitResumeInst(ResumeInst &
I) {
7068 void visitCleanupReturnInst(CleanupReturnInst &CRI) {
7073 void visitCatchReturnInst(CatchReturnInst &CRI) {
7078 void instrumentAsmArgument(
Value *Operand,
Type *ElemTy, Instruction &
I,
7087 insertCheckShadowOf(Operand, &
I);
7094 auto Size =
DL.getTypeStoreSize(ElemTy);
7096 if (MS.CompileKernel) {
7097 IRB.
CreateCall(MS.MsanInstrumentAsmStoreFn, {Operand, SizeVal});
7103 auto [ShadowPtr,
_] =
7104 getShadowOriginPtrUserspace(Operand, IRB, IRB.
getInt8Ty(),
Align(1));
7114 int getNumOutputArgs(InlineAsm *IA, CallBase *CB) {
7115 int NumRetOutputs = 0;
7122 NumRetOutputs =
ST->getNumElements();
7127 for (
const InlineAsm::ConstraintInfo &
Info : Constraints) {
7128 switch (
Info.Type) {
7136 return NumOutputs - NumRetOutputs;
7139 void visitAsmInstruction(Instruction &
I) {
7155 const DataLayout &
DL =
F.getDataLayout();
7159 int OutputArgs = getNumOutputArgs(IA, CB);
7165 for (
int i = OutputArgs; i < NumOperands; i++) {
7173 for (
int i = 0; i < OutputArgs; i++) {
7179 setShadow(&
I, getCleanShadow(&
I));
7180 setOrigin(&
I, getCleanOrigin());
7183 void visitFreezeInst(FreezeInst &
I) {
7185 setShadow(&
I, getCleanShadow(&
I));
7186 setOrigin(&
I, getCleanOrigin());
7189 void visitInstruction(Instruction &
I) {
7194 for (
size_t i = 0, n =
I.getNumOperands(); i < n; i++) {
7195 Value *Operand =
I.getOperand(i);
7197 insertCheckShadowOf(Operand, &
I);
7199 setShadow(&
I, getCleanShadow(&
I));
7200 setOrigin(&
I, getCleanOrigin());
7204struct VarArgHelperBase :
public VarArgHelper {
7206 MemorySanitizer &MS;
7207 MemorySanitizerVisitor &MSV;
7209 const unsigned VAListTagSize;
7211 VarArgHelperBase(Function &
F, MemorySanitizer &MS,
7212 MemorySanitizerVisitor &MSV,
unsigned VAListTagSize)
7213 :
F(
F), MS(MS), MSV(MSV), VAListTagSize(VAListTagSize) {}
7217 return IRB.
CreateAdd(
Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
7233 return getShadowPtrForVAArgument(IRB, ArgOffset);
7247 unsigned BaseOffset) {
7256 TailSize,
Align(8));
7259 void unpoisonVAListTagForInst(IntrinsicInst &
I) {
7261 Value *VAListTag =
I.getArgOperand(0);
7263 auto [ShadowPtr, OriginPtr] = MSV.getShadowOriginPtr(
7264 VAListTag, IRB, IRB.
getInt8Ty(), Alignment,
true);
7267 VAListTagSize, Alignment,
false);
7270 void visitVAStartInst(VAStartInst &
I)
override {
7271 if (
F.getCallingConv() == CallingConv::Win64)
7274 unpoisonVAListTagForInst(
I);
7277 void visitVACopyInst(VACopyInst &
I)
override {
7278 if (
F.getCallingConv() == CallingConv::Win64)
7280 unpoisonVAListTagForInst(
I);
7285struct VarArgAMD64Helper :
public VarArgHelperBase {
7288 static const unsigned AMD64GpEndOffset = 48;
7289 static const unsigned AMD64FpEndOffsetSSE = 176;
7291 static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
7293 unsigned AMD64FpEndOffset;
7294 AllocaInst *VAArgTLSCopy =
nullptr;
7295 AllocaInst *VAArgTLSOriginCopy =
nullptr;
7296 Value *VAArgOverflowSize =
nullptr;
7298 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
7300 VarArgAMD64Helper(Function &
F, MemorySanitizer &MS,
7301 MemorySanitizerVisitor &MSV)
7302 : VarArgHelperBase(
F, MS, MSV, 24) {
7303 AMD64FpEndOffset = AMD64FpEndOffsetSSE;
7304 for (
const auto &Attr :
F.getAttributes().getFnAttrs()) {
7305 if (Attr.isStringAttribute() &&
7306 (Attr.getKindAsString() ==
"target-features")) {
7307 if (Attr.getValueAsString().contains(
"-sse"))
7308 AMD64FpEndOffset = AMD64FpEndOffsetNoSSE;
7314 ArgKind classifyArgument(
Value *arg) {
7317 if (
T->isX86_FP80Ty())
7319 if (
T->isFPOrFPVectorTy())
7320 return AK_FloatingPoint;
7321 if (
T->isIntegerTy() &&
T->getPrimitiveSizeInBits() <= 64)
7322 return AK_GeneralPurpose;
7323 if (
T->isPointerTy())
7324 return AK_GeneralPurpose;
7336 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7337 unsigned GpOffset = 0;
7338 unsigned FpOffset = AMD64GpEndOffset;
7339 unsigned OverflowOffset = AMD64FpEndOffset;
7340 const DataLayout &
DL =
F.getDataLayout();
7344 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
7351 assert(
A->getType()->isPointerTy());
7353 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
7354 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7355 unsigned BaseOffset = OverflowOffset;
7356 Value *ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
7357 Value *OriginBase =
nullptr;
7358 if (MS.TrackOrigins)
7359 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
7360 OverflowOffset += AlignedSize;
7363 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
7367 Value *ShadowPtr, *OriginPtr;
7368 std::tie(ShadowPtr, OriginPtr) =
7373 if (MS.TrackOrigins)
7377 ArgKind AK = classifyArgument(
A);
7378 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
7380 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
7382 Value *ShadowBase, *OriginBase =
nullptr;
7384 case AK_GeneralPurpose:
7385 ShadowBase = getShadowPtrForVAArgument(IRB, GpOffset);
7386 if (MS.TrackOrigins)
7387 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset);
7391 case AK_FloatingPoint:
7392 ShadowBase = getShadowPtrForVAArgument(IRB, FpOffset);
7393 if (MS.TrackOrigins)
7394 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
7401 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
7402 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7403 unsigned BaseOffset = OverflowOffset;
7404 ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
7405 if (MS.TrackOrigins) {
7406 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
7408 OverflowOffset += AlignedSize;
7411 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
7420 Value *Shadow = MSV.getShadow(
A);
7422 if (MS.TrackOrigins) {
7423 Value *Origin = MSV.getOrigin(
A);
7424 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
7425 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
7431 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
7432 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
7435 void finalizeInstrumentation()
override {
7436 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
7437 "finalizeInstrumentation called twice");
7438 if (!VAStartInstrumentationList.
empty()) {
7445 ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), VAArgOverflowSize);
7446 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7452 Intrinsic::umin, CopySize,
7456 if (MS.TrackOrigins) {
7457 VAArgTLSOriginCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7466 for (CallInst *OrigInst : VAStartInstrumentationList) {
7467 NextNodeIRBuilder IRB(OrigInst);
7468 Value *VAListTag = OrigInst->getArgOperand(0);
7472 ConstantInt::get(MS.IntptrTy, 16)),
7475 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
7477 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
7478 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7480 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
7482 if (MS.TrackOrigins)
7483 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
7484 Alignment, AMD64FpEndOffset);
7487 ConstantInt::get(MS.IntptrTy, 8)),
7489 Value *OverflowArgAreaPtr =
7490 IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
7491 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
7492 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
7493 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
7497 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
7499 if (MS.TrackOrigins) {
7502 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
7510struct VarArgAArch64Helper :
public VarArgHelperBase {
7511 static const unsigned kAArch64GrArgSize = 64;
7512 static const unsigned kAArch64VrArgSize = 128;
7514 static const unsigned AArch64GrBegOffset = 0;
7515 static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
7517 static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
7518 static const unsigned AArch64VrEndOffset =
7519 AArch64VrBegOffset + kAArch64VrArgSize;
7520 static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
7522 AllocaInst *VAArgTLSCopy =
nullptr;
7523 Value *VAArgOverflowSize =
nullptr;
7525 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
7527 VarArgAArch64Helper(Function &
F, MemorySanitizer &MS,
7528 MemorySanitizerVisitor &MSV)
7529 : VarArgHelperBase(
F, MS, MSV, 32) {}
7532 std::pair<ArgKind, uint64_t> classifyArgument(
Type *
T) {
7533 if (
T->isIntOrPtrTy() &&
T->getPrimitiveSizeInBits() <= 64)
7534 return {AK_GeneralPurpose, 1};
7535 if (
T->isFloatingPointTy() &&
T->getPrimitiveSizeInBits() <= 128)
7536 return {AK_FloatingPoint, 1};
7538 if (
T->isArrayTy()) {
7539 auto R = classifyArgument(
T->getArrayElementType());
7540 R.second *=
T->getScalarType()->getArrayNumElements();
7545 auto R = classifyArgument(FV->getScalarType());
7546 R.second *= FV->getNumElements();
7551 return {AK_Memory, 0};
7563 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7564 unsigned GrOffset = AArch64GrBegOffset;
7565 unsigned VrOffset = AArch64VrBegOffset;
7566 unsigned OverflowOffset = AArch64VAEndOffset;
7568 const DataLayout &
DL =
F.getDataLayout();
7571 auto [AK, RegNum] = classifyArgument(
A->getType());
7572 if (AK == AK_GeneralPurpose &&
7573 (GrOffset + RegNum * 8) > AArch64GrEndOffset)
7575 if (AK == AK_FloatingPoint &&
7576 (VrOffset + RegNum * 16) > AArch64VrEndOffset)
7580 case AK_GeneralPurpose:
7581 Base = getShadowPtrForVAArgument(IRB, GrOffset);
7582 GrOffset += 8 * RegNum;
7584 case AK_FloatingPoint:
7585 Base = getShadowPtrForVAArgument(IRB, VrOffset);
7586 VrOffset += 16 * RegNum;
7593 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
7594 uint64_t AlignedSize =
alignTo(ArgSize, 8);
7595 unsigned BaseOffset = OverflowOffset;
7596 Base = getShadowPtrForVAArgument(IRB, BaseOffset);
7597 OverflowOffset += AlignedSize;
7600 CleanUnusedTLS(IRB,
Base, BaseOffset);
7612 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
7613 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
7620 ConstantInt::get(MS.IntptrTy, offset)),
7622 return IRB.
CreateLoad(Type::getInt64Ty(*MS.C), SaveAreaPtrPtr);
7629 ConstantInt::get(MS.IntptrTy, offset)),
7632 return IRB.
CreateSExt(SaveArea32, MS.IntptrTy);
7635 void finalizeInstrumentation()
override {
7636 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
7637 "finalizeInstrumentation called twice");
7638 if (!VAStartInstrumentationList.empty()) {
7645 ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset), VAArgOverflowSize);
7646 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7652 Intrinsic::umin, CopySize,
7658 Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
7659 Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
7663 for (CallInst *OrigInst : VAStartInstrumentationList) {
7664 NextNodeIRBuilder IRB(OrigInst);
7666 Value *VAListTag = OrigInst->getArgOperand(0);
7683 Value *StackSaveAreaPtr =
7684 IRB.
CreateIntToPtr(getVAField64(IRB, VAListTag, 0), RegSaveAreaPtrTy);
7687 Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
7688 Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
7691 IRB.
CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea), RegSaveAreaPtrTy);
7694 Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
7695 Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
7698 IRB.
CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea), RegSaveAreaPtrTy);
7704 Value *GrRegSaveAreaShadowPtrOff =
7705 IRB.
CreateAdd(GrArgSize, GrOffSaveArea);
7707 Value *GrRegSaveAreaShadowPtr =
7708 MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7714 Value *GrCopySize = IRB.
CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
7720 Value *VrRegSaveAreaShadowPtrOff =
7721 IRB.
CreateAdd(VrArgSize, VrOffSaveArea);
7723 Value *VrRegSaveAreaShadowPtr =
7724 MSV.getShadowOriginPtr(VrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7731 VrRegSaveAreaShadowPtrOff);
7732 Value *VrCopySize = IRB.
CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
7738 Value *StackSaveAreaShadowPtr =
7739 MSV.getShadowOriginPtr(StackSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7744 VAArgTLSCopy, IRB.
getInt32(AArch64VAEndOffset));
7747 Align(16), VAArgOverflowSize);
7753struct VarArgPowerPC64Helper :
public VarArgHelperBase {
7754 AllocaInst *VAArgTLSCopy =
nullptr;
7755 Value *VAArgSize =
nullptr;
7757 VarArgPowerPC64Helper(Function &
F, MemorySanitizer &MS,
7758 MemorySanitizerVisitor &MSV)
7759 : VarArgHelperBase(
F, MS, MSV, 8) {}
7761 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7769 Triple TargetTriple(
F.getParent()->getTargetTriple());
7773 if (TargetTriple.isPPC64ELFv2ABI())
7777 unsigned VAArgOffset = VAArgBase;
7778 const DataLayout &
DL =
F.getDataLayout();
7781 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
7783 assert(
A->getType()->isPointerTy());
7785 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
7788 ArgAlign =
Align(8);
7789 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
7792 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
7794 Value *AShadowPtr, *AOriginPtr;
7795 std::tie(AShadowPtr, AOriginPtr) =
7796 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
7806 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
7808 if (
A->getType()->isArrayTy()) {
7811 Type *ElementTy =
A->getType()->getArrayElementType();
7813 ArgAlign =
Align(
DL.getTypeAllocSize(ElementTy));
7814 }
else if (
A->getType()->isVectorTy()) {
7816 ArgAlign =
Align(ArgSize);
7819 ArgAlign =
Align(8);
7820 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
7821 if (
DL.isBigEndian()) {
7825 VAArgOffset += (8 - ArgSize);
7829 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
7833 VAArgOffset += ArgSize;
7837 VAArgBase = VAArgOffset;
7841 ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
7844 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
7847 void finalizeInstrumentation()
override {
7848 assert(!VAArgSize && !VAArgTLSCopy &&
7849 "finalizeInstrumentation called twice");
7852 Value *CopySize = VAArgSize;
7854 if (!VAStartInstrumentationList.empty()) {
7858 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
7864 Intrinsic::umin, CopySize,
7872 for (CallInst *OrigInst : VAStartInstrumentationList) {
7873 NextNodeIRBuilder IRB(OrigInst);
7874 Value *VAListTag = OrigInst->getArgOperand(0);
7877 RegSaveAreaPtrPtr = IRB.
CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
7880 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
7881 const DataLayout &
DL =
F.getDataLayout();
7882 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
7884 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
7885 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
7887 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
7894struct VarArgPowerPC32Helper :
public VarArgHelperBase {
7895 AllocaInst *VAArgTLSCopy =
nullptr;
7896 Value *VAArgSize =
nullptr;
7898 VarArgPowerPC32Helper(Function &
F, MemorySanitizer &MS,
7899 MemorySanitizerVisitor &MSV)
7900 : VarArgHelperBase(
F, MS, MSV, 12) {}
7902 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
7906 unsigned VAArgOffset = VAArgBase;
7907 const DataLayout &
DL =
F.getDataLayout();
7908 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
7911 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
7913 assert(
A->getType()->isPointerTy());
7915 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
7917 if (ArgAlign < IntptrSize)
7918 ArgAlign =
Align(IntptrSize);
7919 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
7922 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
7924 Value *AShadowPtr, *AOriginPtr;
7925 std::tie(AShadowPtr, AOriginPtr) =
7926 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
7936 Type *ArgTy =
A->getType();
7942 uint64_t ArgSize =
DL.getTypeAllocSize(ArgTy);
7949 ArgAlign =
Align(
DL.getTypeAllocSize(ElementTy));
7952 ArgAlign =
Align(ArgSize);
7954 if (ArgAlign < IntptrSize)
7955 ArgAlign =
Align(IntptrSize);
7956 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
7957 if (
DL.isBigEndian()) {
7960 if (ArgSize < IntptrSize)
7961 VAArgOffset += (IntptrSize - ArgSize);
7964 Base = getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase,
7970 VAArgOffset += ArgSize;
7977 ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
7980 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
7983 void finalizeInstrumentation()
override {
7984 assert(!VAArgSize && !VAArgTLSCopy &&
7985 "finalizeInstrumentation called twice");
7987 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
7988 Value *CopySize = VAArgSize;
7990 if (!VAStartInstrumentationList.empty()) {
7994 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8000 Intrinsic::umin, CopySize,
8008 for (CallInst *OrigInst : VAStartInstrumentationList) {
8009 NextNodeIRBuilder IRB(OrigInst);
8010 Value *VAListTag = OrigInst->getArgOperand(0);
8012 Value *RegSaveAreaSize = CopySize;
8016 IRB.
CreateAdd(RegSaveAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 8));
8020 Intrinsic::umin, CopySize, ConstantInt::get(MS.IntptrTy, 32));
8022 RegSaveAreaPtrPtr = IRB.
CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
8025 const DataLayout &
DL =
F.getDataLayout();
8026 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8030 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8031 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8032 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8034 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy,
8035 Alignment, RegSaveAreaSize);
8037 RegSaveAreaShadowPtr =
8040 ConstantInt::get(MS.IntptrTy, 32));
8045 ConstantInt::get(MS.IntptrTy, 32), Alignment);
8050 Value *OverflowAreaSize = IRB.
CreateSub(CopySize, RegSaveAreaSize);
8053 OverflowAreaPtrPtr =
8054 IRB.
CreateAdd(OverflowAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 4));
8055 OverflowAreaPtrPtr = IRB.
CreateIntToPtr(OverflowAreaPtrPtr, MS.PtrTy);
8057 Value *OverflowAreaPtr = IRB.
CreateLoad(MS.PtrTy, OverflowAreaPtrPtr);
8059 Value *OverflowAreaShadowPtr, *OverflowAreaOriginPtr;
8060 std::tie(OverflowAreaShadowPtr, OverflowAreaOriginPtr) =
8061 MSV.getShadowOriginPtr(OverflowAreaPtr, IRB, IRB.
getInt8Ty(),
8064 Value *OverflowVAArgTLSCopyPtr =
8066 OverflowVAArgTLSCopyPtr =
8067 IRB.
CreateAdd(OverflowVAArgTLSCopyPtr, RegSaveAreaSize);
8069 OverflowVAArgTLSCopyPtr =
8072 OverflowVAArgTLSCopyPtr, Alignment, OverflowAreaSize);
8079struct VarArgSystemZHelper :
public VarArgHelperBase {
8080 static const unsigned SystemZGpOffset = 16;
8081 static const unsigned SystemZGpEndOffset = 56;
8082 static const unsigned SystemZFpOffset = 128;
8083 static const unsigned SystemZFpEndOffset = 160;
8084 static const unsigned SystemZMaxVrArgs = 8;
8085 static const unsigned SystemZRegSaveAreaSize = 160;
8086 static const unsigned SystemZOverflowOffset = 160;
8087 static const unsigned SystemZVAListTagSize = 32;
8088 static const unsigned SystemZOverflowArgAreaPtrOffset = 16;
8089 static const unsigned SystemZRegSaveAreaPtrOffset = 24;
8091 bool IsSoftFloatABI;
8092 AllocaInst *VAArgTLSCopy =
nullptr;
8093 AllocaInst *VAArgTLSOriginCopy =
nullptr;
8094 Value *VAArgOverflowSize =
nullptr;
8096 enum class ArgKind {
8104 enum class ShadowExtension {
None,
Zero, Sign };
8106 VarArgSystemZHelper(Function &
F, MemorySanitizer &MS,
8107 MemorySanitizerVisitor &MSV)
8108 : VarArgHelperBase(
F, MS, MSV, SystemZVAListTagSize),
8109 IsSoftFloatABI(
F.getFnAttribute(
"use-soft-float").getValueAsBool()) {}
8111 ArgKind classifyArgument(
Type *
T) {
8118 if (
T->isIntegerTy(128) ||
T->isFP128Ty())
8119 return ArgKind::Indirect;
8120 if (
T->isFloatingPointTy())
8121 return IsSoftFloatABI ? ArgKind::GeneralPurpose : ArgKind::FloatingPoint;
8122 if (
T->isIntegerTy() ||
T->isPointerTy())
8123 return ArgKind::GeneralPurpose;
8124 if (
T->isVectorTy())
8125 return ArgKind::Vector;
8126 return ArgKind::Memory;
8129 ShadowExtension getShadowExtension(
const CallBase &CB,
unsigned ArgNo) {
8139 return ShadowExtension::Zero;
8143 return ShadowExtension::Sign;
8145 return ShadowExtension::None;
8148 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8149 unsigned GpOffset = SystemZGpOffset;
8150 unsigned FpOffset = SystemZFpOffset;
8151 unsigned VrIndex = 0;
8152 unsigned OverflowOffset = SystemZOverflowOffset;
8153 const DataLayout &
DL =
F.getDataLayout();
8159 ArgKind AK = classifyArgument(
T);
8160 if (AK == ArgKind::Indirect) {
8162 AK = ArgKind::GeneralPurpose;
8164 if (AK == ArgKind::GeneralPurpose && GpOffset >= SystemZGpEndOffset)
8165 AK = ArgKind::Memory;
8166 if (AK == ArgKind::FloatingPoint && FpOffset >= SystemZFpEndOffset)
8167 AK = ArgKind::Memory;
8168 if (AK == ArgKind::Vector && (VrIndex >= SystemZMaxVrArgs || !IsFixed))
8169 AK = ArgKind::Memory;
8170 Value *ShadowBase =
nullptr;
8171 Value *OriginBase =
nullptr;
8172 ShadowExtension SE = ShadowExtension::None;
8174 case ArgKind::GeneralPurpose: {
8176 uint64_t ArgSize = 8;
8179 SE = getShadowExtension(CB, ArgNo);
8180 uint64_t GapSize = 0;
8181 if (SE == ShadowExtension::None) {
8182 uint64_t ArgAllocSize =
DL.getTypeAllocSize(
T);
8183 assert(ArgAllocSize <= ArgSize);
8184 GapSize = ArgSize - ArgAllocSize;
8186 ShadowBase = getShadowAddrForVAArgument(IRB, GpOffset + GapSize);
8187 if (MS.TrackOrigins)
8188 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset + GapSize);
8190 GpOffset += ArgSize;
8196 case ArgKind::FloatingPoint: {
8198 uint64_t ArgSize = 8;
8205 ShadowBase = getShadowAddrForVAArgument(IRB, FpOffset);
8206 if (MS.TrackOrigins)
8207 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
8209 FpOffset += ArgSize;
8215 case ArgKind::Vector: {
8222 case ArgKind::Memory: {
8227 uint64_t ArgAllocSize =
DL.getTypeAllocSize(
T);
8228 uint64_t ArgSize =
alignTo(ArgAllocSize, 8);
8230 SE = getShadowExtension(CB, ArgNo);
8232 SE == ShadowExtension::None ? ArgSize - ArgAllocSize : 0;
8234 getShadowAddrForVAArgument(IRB, OverflowOffset + GapSize);
8235 if (MS.TrackOrigins)
8237 getOriginPtrForVAArgument(IRB, OverflowOffset + GapSize);
8238 OverflowOffset += ArgSize;
8245 case ArgKind::Indirect:
8248 if (ShadowBase ==
nullptr)
8250 Value *Shadow = MSV.getShadow(
A);
8251 if (SE != ShadowExtension::None)
8252 Shadow = MSV.CreateShadowCast(IRB, Shadow, IRB.
getInt64Ty(),
8253 SE == ShadowExtension::Sign);
8254 ShadowBase = IRB.
CreateIntToPtr(ShadowBase, MS.PtrTy,
"_msarg_va_s");
8256 if (MS.TrackOrigins) {
8257 Value *Origin = MSV.getOrigin(
A);
8258 TypeSize StoreSize =
DL.getTypeStoreSize(Shadow->
getType());
8259 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
8263 Constant *OverflowSize = ConstantInt::get(
8264 IRB.
getInt64Ty(), OverflowOffset - SystemZOverflowOffset);
8265 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
8272 ConstantInt::get(MS.IntptrTy, SystemZRegSaveAreaPtrOffset)),
8275 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8277 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8278 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(), Alignment,
8283 unsigned RegSaveAreaSize =
8284 IsSoftFloatABI ? SystemZGpEndOffset : SystemZRegSaveAreaSize;
8285 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8287 if (MS.TrackOrigins)
8288 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
8289 Alignment, RegSaveAreaSize);
8298 ConstantInt::get(MS.IntptrTy, SystemZOverflowArgAreaPtrOffset)),
8300 Value *OverflowArgAreaPtr = IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
8301 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
8303 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
8304 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
8307 SystemZOverflowOffset);
8308 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
8310 if (MS.TrackOrigins) {
8312 SystemZOverflowOffset);
8313 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
8318 void finalizeInstrumentation()
override {
8319 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
8320 "finalizeInstrumentation called twice");
8321 if (!VAStartInstrumentationList.empty()) {
8328 IRB.
CreateAdd(ConstantInt::get(MS.IntptrTy, SystemZOverflowOffset),
8330 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8336 Intrinsic::umin, CopySize,
8340 if (MS.TrackOrigins) {
8341 VAArgTLSOriginCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8350 for (CallInst *OrigInst : VAStartInstrumentationList) {
8351 NextNodeIRBuilder IRB(OrigInst);
8352 Value *VAListTag = OrigInst->getArgOperand(0);
8353 copyRegSaveArea(IRB, VAListTag);
8354 copyOverflowArea(IRB, VAListTag);
8360struct VarArgI386Helper :
public VarArgHelperBase {
8361 AllocaInst *VAArgTLSCopy =
nullptr;
8362 Value *VAArgSize =
nullptr;
8364 VarArgI386Helper(Function &
F, MemorySanitizer &MS,
8365 MemorySanitizerVisitor &MSV)
8366 : VarArgHelperBase(
F, MS, MSV, 4) {}
8368 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8369 const DataLayout &
DL =
F.getDataLayout();
8370 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8371 unsigned VAArgOffset = 0;
8374 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
8376 assert(
A->getType()->isPointerTy());
8378 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
8380 if (ArgAlign < IntptrSize)
8381 ArgAlign =
Align(IntptrSize);
8382 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8384 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8386 Value *AShadowPtr, *AOriginPtr;
8387 std::tie(AShadowPtr, AOriginPtr) =
8388 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
8398 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
8400 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
8401 if (
DL.isBigEndian()) {
8404 if (ArgSize < IntptrSize)
8405 VAArgOffset += (IntptrSize - ArgSize);
8408 Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8411 VAArgOffset += ArgSize;
8417 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
8420 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8423 void finalizeInstrumentation()
override {
8424 assert(!VAArgSize && !VAArgTLSCopy &&
8425 "finalizeInstrumentation called twice");
8427 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
8428 Value *CopySize = VAArgSize;
8430 if (!VAStartInstrumentationList.empty()) {
8433 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8439 Intrinsic::umin, CopySize,
8447 for (CallInst *OrigInst : VAStartInstrumentationList) {
8448 NextNodeIRBuilder IRB(OrigInst);
8449 Value *VAListTag = OrigInst->getArgOperand(0);
8450 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
8451 Value *RegSaveAreaPtrPtr =
8453 PointerType::get(*MS.C, 0));
8454 Value *RegSaveAreaPtr =
8455 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
8456 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8457 const DataLayout &
DL =
F.getDataLayout();
8458 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8460 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8461 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8463 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8471struct VarArgGenericHelper :
public VarArgHelperBase {
8472 AllocaInst *VAArgTLSCopy =
nullptr;
8473 Value *VAArgSize =
nullptr;
8475 VarArgGenericHelper(Function &
F, MemorySanitizer &MS,
8476 MemorySanitizerVisitor &MSV,
const unsigned VAListTagSize)
8477 : VarArgHelperBase(
F, MS, MSV, VAListTagSize) {}
8479 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {
8480 unsigned VAArgOffset = 0;
8481 const DataLayout &
DL =
F.getDataLayout();
8482 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8487 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
8488 if (
DL.isBigEndian()) {
8491 if (ArgSize < IntptrSize)
8492 VAArgOffset += (IntptrSize - ArgSize);
8494 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
8495 VAArgOffset += ArgSize;
8496 VAArgOffset =
alignTo(VAArgOffset, IntptrSize);
8502 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
8505 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
8508 void finalizeInstrumentation()
override {
8509 assert(!VAArgSize && !VAArgTLSCopy &&
8510 "finalizeInstrumentation called twice");
8512 VAArgSize = IRB.
CreateLoad(MS.IntptrTy, MS.VAArgOverflowSizeTLS);
8513 Value *CopySize = VAArgSize;
8515 if (!VAStartInstrumentationList.empty()) {
8518 VAArgTLSCopy = IRB.
CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
8524 Intrinsic::umin, CopySize,
8532 for (CallInst *OrigInst : VAStartInstrumentationList) {
8533 NextNodeIRBuilder IRB(OrigInst);
8534 Value *VAListTag = OrigInst->getArgOperand(0);
8535 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
8536 Value *RegSaveAreaPtrPtr =
8538 PointerType::get(*MS.C, 0));
8539 Value *RegSaveAreaPtr =
8540 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
8541 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
8542 const DataLayout &
DL =
F.getDataLayout();
8543 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
8545 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
8546 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
8548 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
8556using VarArgARM32Helper = VarArgGenericHelper;
8557using VarArgRISCVHelper = VarArgGenericHelper;
8558using VarArgMIPSHelper = VarArgGenericHelper;
8559using VarArgLoongArch64Helper = VarArgGenericHelper;
8562struct VarArgNoOpHelper :
public VarArgHelper {
8563 VarArgNoOpHelper(Function &
F, MemorySanitizer &MS,
8564 MemorySanitizerVisitor &MSV) {}
8566 void visitCallBase(CallBase &CB,
IRBuilder<> &IRB)
override {}
8568 void visitVAStartInst(VAStartInst &
I)
override {}
8570 void visitVACopyInst(VACopyInst &
I)
override {}
8572 void finalizeInstrumentation()
override {}
8578 MemorySanitizerVisitor &Visitor) {
8581 Triple TargetTriple(Func.getParent()->getTargetTriple());
8584 return new VarArgI386Helper(Func, Msan, Visitor);
8587 return new VarArgAMD64Helper(Func, Msan, Visitor);
8589 if (TargetTriple.
isARM())
8590 return new VarArgARM32Helper(Func, Msan, Visitor, 4);
8593 return new VarArgAArch64Helper(Func, Msan, Visitor);
8596 return new VarArgSystemZHelper(Func, Msan, Visitor);
8601 return new VarArgPowerPC32Helper(Func, Msan, Visitor);
8604 return new VarArgPowerPC64Helper(Func, Msan, Visitor);
8607 return new VarArgRISCVHelper(Func, Msan, Visitor, 4);
8610 return new VarArgRISCVHelper(Func, Msan, Visitor, 8);
8613 return new VarArgMIPSHelper(Func, Msan, Visitor, 4);
8616 return new VarArgMIPSHelper(Func, Msan, Visitor, 8);
8619 return new VarArgLoongArch64Helper(Func, Msan, Visitor,
8622 return new VarArgNoOpHelper(Func, Msan, Visitor);
8629 if (
F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
8632 MemorySanitizerVisitor Visitor(
F, *
this, TLI);
8639 return Visitor.runOnFunction();
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file implements a class to represent arbitrary precision integral constant values and operations...
static bool isStore(int Opcode)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
VarLocInsertPt getNextNode(const DbgRecord *DVR)
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const MemoryMapParams Linux_LoongArch64_MemoryMapParams
const MemoryMapParams Linux_X86_64_MemoryMapParams
static cl::opt< int > ClTrackOrigins("dfsan-track-origins", cl::desc("Track origins of labels"), cl::Hidden, cl::init(0))
static AtomicOrdering addReleaseOrdering(AtomicOrdering AO)
static AtomicOrdering addAcquireOrdering(AtomicOrdering AO)
const MemoryMapParams Linux_AArch64_MemoryMapParams
static bool isAMustTailRetVal(Value *RetVal)
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
Module.h This file contains the declarations for the Module class.
Machine Check Debug Module
static const PlatformMemoryMapParams Linux_S390_MemoryMapParams
static const Align kMinOriginAlignment
static cl::opt< uint64_t > ClShadowBase("msan-shadow-base", cl::desc("Define custom MSan ShadowBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClPoisonUndef("msan-poison-undef", cl::desc("Poison fully undef temporary values. " "Partially undefined constant vectors " "are unaffected by this flag (see " "-msan-poison-undef-vectors)."), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_X86_MemoryMapParams
static cl::opt< uint64_t > ClOriginBase("msan-origin-base", cl::desc("Define custom MSan OriginBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClCheckConstantShadow("msan-check-constant-shadow", cl::desc("Insert checks for constant shadow values"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_LoongArch_MemoryMapParams
static const MemoryMapParams NetBSD_X86_64_MemoryMapParams
static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams
static const unsigned kOriginSize
static cl::opt< bool > ClWithComdat("msan-with-comdat", cl::desc("Place MSan constructors in comdat sections"), cl::Hidden, cl::init(false))
static cl::opt< int > ClTrackOrigins("msan-track-origins", cl::desc("Track origins (allocation sites) of poisoned memory"), cl::Hidden, cl::init(0))
Track origins of uninitialized values.
static cl::opt< int > ClInstrumentationWithCallThreshold("msan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented requires more than " "this number of checks and origin stores, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(3500))
static cl::opt< int > ClPoisonStackPattern("msan-poison-stack-pattern", cl::desc("poison uninitialized stack variables with the given pattern"), cl::Hidden, cl::init(0xff))
static const Align kShadowTLSAlignment
static cl::opt< bool > ClHandleICmpExact("msan-handle-icmp-exact", cl::desc("exact handling of relational integer ICmp"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams
static cl::opt< bool > ClDumpStrictInstructions("msan-dump-strict-instructions", cl::desc("print out instructions with default strict semantics i.e.," "check that all the inputs are fully initialized, and mark " "the output as fully initialized. These semantics are applied " "to instructions that could not be handled explicitly nor " "heuristically."), cl::Hidden, cl::init(false))
static Constant * getOrInsertGlobal(Module &M, StringRef Name, Type *Ty)
static cl::opt< bool > ClPreciseDisjointOr("msan-precise-disjoint-or", cl::desc("Precisely poison disjoint OR. If false (legacy behavior), " "disjointedness is ignored (i.e., 1|1 is initialized)."), cl::Hidden, cl::init(false))
static const MemoryMapParams Linux_S390X_MemoryMapParams
static cl::opt< bool > ClPoisonStack("msan-poison-stack", cl::desc("poison uninitialized stack variables"), cl::Hidden, cl::init(true))
static const MemoryMapParams Linux_I386_MemoryMapParams
const char kMsanInitName[]
static cl::opt< bool > ClPoisonUndefVectors("msan-poison-undef-vectors", cl::desc("Precisely poison partially undefined constant vectors. " "If false (legacy behavior), the entire vector is " "considered fully initialized, which may lead to false " "negatives. Fully undefined constant vectors are " "unaffected by this flag (see -msan-poison-undef)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPrintStackNames("msan-print-stack-names", cl::desc("Print name of local stack variable"), cl::Hidden, cl::init(true))
static cl::opt< uint64_t > ClAndMask("msan-and-mask", cl::desc("Define custom MSan AndMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleLifetimeIntrinsics("msan-handle-lifetime-intrinsics", cl::desc("when possible, poison scoped variables at the beginning of the scope " "(slower, but more precise)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKeepGoing("msan-keep-going", cl::desc("keep going after reporting a UMR"), cl::Hidden, cl::init(false))
static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams
static GlobalVariable * createPrivateConstGlobalForString(Module &M, StringRef Str)
Create a non-const global initialized with the given string.
static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClEagerChecks("msan-eager-checks", cl::desc("check arguments and return values at function call boundaries"), cl::Hidden, cl::init(false))
static cl::opt< int > ClDisambiguateWarning("msan-disambiguate-warning-threshold", cl::desc("Define threshold for number of checks per " "debug location to force origin update."), cl::Hidden, cl::init(3))
static VarArgHelper * CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, MemorySanitizerVisitor &Visitor)
static const MemoryMapParams Linux_MIPS64_MemoryMapParams
static const MemoryMapParams Linux_PowerPC64_MemoryMapParams
static cl::opt< uint64_t > ClXorMask("msan-xor-mask", cl::desc("Define custom MSan XorMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleAsmConservative("msan-handle-asm-conservative", cl::desc("conservative handling of inline assembly"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams
static const PlatformMemoryMapParams FreeBSD_ARM_MemoryMapParams
static const unsigned kParamTLSSize
static cl::opt< bool > ClHandleICmp("msan-handle-icmp", cl::desc("propagate shadow through ICmpEQ and ICmpNE"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClEnableKmsan("msan-kernel", cl::desc("Enable KernelMemorySanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPoisonStackWithCall("msan-poison-stack-with-call", cl::desc("poison uninitialized stack variables with a call"), cl::Hidden, cl::init(false))
static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams
static cl::opt< bool > ClDumpHeuristicInstructions("msan-dump-heuristic-instructions", cl::desc("Prints 'unknown' instructions that were handled heuristically. " "Use -msan-dump-strict-instructions to print instructions that " "could not be handled explicitly nor heuristically."), cl::Hidden, cl::init(false))
static const unsigned kRetvalTLSSize
static const MemoryMapParams FreeBSD_AArch64_MemoryMapParams
const char kMsanModuleCtorName[]
static const MemoryMapParams FreeBSD_I386_MemoryMapParams
static cl::opt< bool > ClCheckAccessAddress("msan-check-access-address", cl::desc("report accesses through a pointer which has poisoned shadow"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDisableChecks("msan-disable-checks", cl::desc("Apply no_sanitize to the whole file"), cl::Hidden, cl::init(false))
FunctionAnalysisManager FAM
const SmallVectorImpl< MachineOperand > & Cond
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
void setAlignment(Align Align)
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
const T & front() const
front - Get the first element.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
This class stores enough information to efficiently remove some attributes from an existing AttrBuild...
AttributeMask & addAttribute(Attribute::AttrKind Val)
Add an attribute to the mask.
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
InstListType::iterator iterator
Instruction iterators...
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
void removeFnAttrs(const AttributeMask &AttrsToRemove)
Removes the attributes from the function.
MaybeAlign getParamAlign(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
void setArgOperand(unsigned i, Value *v)
FunctionType * getFunctionType() const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_SGT
signed greater than
@ ICMP_SGE
signed greater or equal
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getString(LLVMContext &Context, StringRef Initializer, bool AddNull=true)
This method constructs a CDS and initializes it with a text string.
static LLVM_ABI Constant * get(LLVMContext &Context, ArrayRef< uint8_t > Elts)
get() constructors - Return a constant with vector type with an element count and element type matchi...
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
LLVM_ABI bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isZeroValue() const
Return true if the value is negative zero or null value.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
static bool shouldExecute(unsigned CounterName)
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
static FixedVectorType * getHalfElementsVectorType(FixedVectorType *VTy)
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
LLVM_ABI void setComdat(Comdat *C)
@ PrivateLinkage
Like Internal, but omit from symbol table.
@ ExternalLinkage
Externally visible function.
Analysis pass providing a never-invalidated alias analysis result.
ConstantInt * getInt1(bool V)
Get a constant value representing either true or false.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
LLVM_ABI CallInst * CreateMaskedCompressStore(Value *Val, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr)
Create a call to Masked Compress Store intrinsic.
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
LLVM_ABI CallInst * CreateAndReduce(Value *Src)
Create a vector int AND reduction intrinsic of the source vector.
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
LLVM_ABI CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
LLVM_ABI CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
LLVM_ABI Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI DebugLoc getCurrentDebugLocation() const
Get location information used by debugging information.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memset to the specified pointer and the specified value.
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
LLVMContext & getContext() const
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
LLVM_ABI CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name="")
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Value * CreateIsNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg == 0.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Type * getVoidTy()
Fetch the type representing void.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
LLVM_ABI CallInst * CreateMaskedExpandLoad(Type *Ty, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Expand Load intrinsic.
Value * CreateInBoundsPtrAdd(Value *Ptr, Value *Offset, const Twine &Name="")
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
LLVM_ABI CallInst * CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, Value *Mask=nullptr)
Create a call to Masked Scatter intrinsic.
LLVM_ABI CallInst * CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Gather intrinsic.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
std::vector< ConstraintInfo > ConstraintInfoVector
void visit(Iterator Start, Iterator End)
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
A Module instance is used to store all the information related to an LLVM module.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
bool remove(const value_type &X)
Remove an item from the set vector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
StringRef - Represent a constant reference to a string, i.e.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
unsigned getNumElements() const
Random access to the elements.
Type * getElementType(unsigned N) const
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Triple - Helper class for working with autoconf configuration names.
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
bool isRISCV32() const
Tests whether the target is 32-bit RISC-V.
bool isPPC32() const
Tests whether the target is 32-bit PowerPC (little and big endian).
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
bool isARM() const
Tests whether the target is ARM (little and big endian).
bool isPPC64() const
Tests whether the target is 64-bit PowerPC (little and big endian).
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
bool isSystemZ() const
Tests whether the target is SystemZ.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isArrayTy() const
True if this is an instance of ArrayType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
Type * getArrayElementType() const
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isVoidTy() const
Return true if this is 'void'.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Type * getElementType() const
int getNumOccurrences() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
initializer< Ty > init(const Ty &Val)
Function * Kernel
Summary of a kernel (=entry point for target offloading).
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
LLVM_ABI std::pair< Instruction *, Value * > SplitBlockAndInsertSimpleForLoop(Value *End, BasicBlock::iterator SplitBefore)
Insert a for (int i = 0; i < End; i++) loop structure (with the exception that End is assumed > 0,...
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
auto dyn_cast_or_null(const Y &Val)
LLVM_ABI std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
@ Or
Bitwise or logical OR of integers.
@ And
Bitwise or logical AND of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
RoundingMode
Rounding mode.
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr unsigned BitWidth
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
iterator_range< df_iterator< T > > depth_first(const T &G)
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
LLVM_ABI bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
LLVM_ABI bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
std::string itostr(int64_t X)
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
A CRTP mix-in to automatically provide informational APIs needed for passes.