Thanks to visit codestin.com
Credit goes to llvm.org

LLVM 22.0.0git
AddressSanitizer.cpp
Go to the documentation of this file.
1//===- AddressSanitizer.cpp - memory error detector -----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of AddressSanitizer, an address basic correctness
10// checker.
11// Details of the algorithm:
12// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
13//
14// FIXME: This sanitizer does not yet handle scalable vectors
15//
16//===----------------------------------------------------------------------===//
17
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/DenseMap.h"
24#include "llvm/ADT/Statistic.h"
26#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
36#include "llvm/IR/Argument.h"
37#include "llvm/IR/Attributes.h"
38#include "llvm/IR/BasicBlock.h"
39#include "llvm/IR/Comdat.h"
40#include "llvm/IR/Constant.h"
41#include "llvm/IR/Constants.h"
42#include "llvm/IR/DIBuilder.h"
43#include "llvm/IR/DataLayout.h"
45#include "llvm/IR/DebugLoc.h"
48#include "llvm/IR/Function.h"
49#include "llvm/IR/GlobalAlias.h"
50#include "llvm/IR/GlobalValue.h"
52#include "llvm/IR/IRBuilder.h"
53#include "llvm/IR/InlineAsm.h"
54#include "llvm/IR/InstVisitor.h"
55#include "llvm/IR/InstrTypes.h"
56#include "llvm/IR/Instruction.h"
59#include "llvm/IR/Intrinsics.h"
60#include "llvm/IR/LLVMContext.h"
61#include "llvm/IR/MDBuilder.h"
62#include "llvm/IR/Metadata.h"
63#include "llvm/IR/Module.h"
64#include "llvm/IR/Type.h"
65#include "llvm/IR/Use.h"
66#include "llvm/IR/Value.h"
70#include "llvm/Support/Debug.h"
83#include <algorithm>
84#include <cassert>
85#include <cstddef>
86#include <cstdint>
87#include <iomanip>
88#include <limits>
89#include <sstream>
90#include <string>
91#include <tuple>
92
93using namespace llvm;
94
95#define DEBUG_TYPE "asan"
96
98static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
99static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
101 std::numeric_limits<uint64_t>::max();
102static const uint64_t kSmallX86_64ShadowOffsetBase = 0x7FFFFFFF; // < 2G.
104static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000;
105static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 44;
106static const uint64_t kSystemZ_ShadowOffset64 = 1ULL << 52;
107static const uint64_t kMIPS_ShadowOffsetN32 = 1ULL << 29;
108static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
109static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37;
110static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36;
111static const uint64_t kLoongArch64_ShadowOffset64 = 1ULL << 46;
113static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
114static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
115static const uint64_t kFreeBSDAArch64_ShadowOffset64 = 1ULL << 47;
116static const uint64_t kFreeBSDKasan_ShadowOffset64 = 0xdffff7c000000000;
117static const uint64_t kNetBSD_ShadowOffset32 = 1ULL << 30;
118static const uint64_t kNetBSD_ShadowOffset64 = 1ULL << 46;
119static const uint64_t kNetBSDKasan_ShadowOffset64 = 0xdfff900000000000;
120static const uint64_t kPS_ShadowOffset64 = 1ULL << 40;
121static const uint64_t kWindowsShadowOffset32 = 3ULL << 28;
123
124// The shadow memory space is dynamically allocated.
126
127static const size_t kMinStackMallocSize = 1 << 6; // 64B
128static const size_t kMaxStackMallocSize = 1 << 16; // 64K
129static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
130static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
131
132const char kAsanModuleCtorName[] = "asan.module_ctor";
133const char kAsanModuleDtorName[] = "asan.module_dtor";
135// On Emscripten, the system needs more than one priorities for constructors.
137const char kAsanReportErrorTemplate[] = "__asan_report_";
138const char kAsanRegisterGlobalsName[] = "__asan_register_globals";
139const char kAsanUnregisterGlobalsName[] = "__asan_unregister_globals";
140const char kAsanRegisterImageGlobalsName[] = "__asan_register_image_globals";
142 "__asan_unregister_image_globals";
143const char kAsanRegisterElfGlobalsName[] = "__asan_register_elf_globals";
144const char kAsanUnregisterElfGlobalsName[] = "__asan_unregister_elf_globals";
145const char kAsanPoisonGlobalsName[] = "__asan_before_dynamic_init";
146const char kAsanUnpoisonGlobalsName[] = "__asan_after_dynamic_init";
147const char kAsanInitName[] = "__asan_init";
148const char kAsanVersionCheckNamePrefix[] = "__asan_version_mismatch_check_v";
149const char kAsanPtrCmp[] = "__sanitizer_ptr_cmp";
150const char kAsanPtrSub[] = "__sanitizer_ptr_sub";
151const char kAsanHandleNoReturnName[] = "__asan_handle_no_return";
152static const int kMaxAsanStackMallocSizeClass = 10;
153const char kAsanStackMallocNameTemplate[] = "__asan_stack_malloc_";
155 "__asan_stack_malloc_always_";
156const char kAsanStackFreeNameTemplate[] = "__asan_stack_free_";
157const char kAsanGenPrefix[] = "___asan_gen_";
158const char kODRGenPrefix[] = "__odr_asan_gen_";
159const char kSanCovGenPrefix[] = "__sancov_gen_";
160const char kAsanSetShadowPrefix[] = "__asan_set_shadow_";
161const char kAsanPoisonStackMemoryName[] = "__asan_poison_stack_memory";
162const char kAsanUnpoisonStackMemoryName[] = "__asan_unpoison_stack_memory";
163
164// ASan version script has __asan_* wildcard. Triple underscore prevents a
165// linker (gold) warning about attempting to export a local symbol.
166const char kAsanGlobalsRegisteredFlagName[] = "___asan_globals_registered";
167
169 "__asan_option_detect_stack_use_after_return";
170
172 "__asan_shadow_memory_dynamic_address";
173
174const char kAsanAllocaPoison[] = "__asan_alloca_poison";
175const char kAsanAllocasUnpoison[] = "__asan_allocas_unpoison";
176
177const char kAMDGPUAddressSharedName[] = "llvm.amdgcn.is.shared";
178const char kAMDGPUAddressPrivateName[] = "llvm.amdgcn.is.private";
179const char kAMDGPUBallotName[] = "llvm.amdgcn.ballot.i64";
180const char kAMDGPUUnreachableName[] = "llvm.amdgcn.unreachable";
181
182// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
183static const size_t kNumberOfAccessSizes = 5;
184
185static const uint64_t kAllocaRzSize = 32;
186
187// ASanAccessInfo implementation constants.
188constexpr size_t kCompileKernelShift = 0;
189constexpr size_t kCompileKernelMask = 0x1;
190constexpr size_t kAccessSizeIndexShift = 1;
191constexpr size_t kAccessSizeIndexMask = 0xf;
192constexpr size_t kIsWriteShift = 5;
193constexpr size_t kIsWriteMask = 0x1;
194
195// Command-line flags.
196
198 "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"),
199 cl::Hidden, cl::init(false));
200
202 "asan-recover",
203 cl::desc("Enable recovery mode (continue-after-error)."),
204 cl::Hidden, cl::init(false));
205
207 "asan-guard-against-version-mismatch",
208 cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden,
209 cl::init(true));
210
211// This flag may need to be replaced with -f[no-]asan-reads.
212static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
213 cl::desc("instrument read instructions"),
214 cl::Hidden, cl::init(true));
215
217 "asan-instrument-writes", cl::desc("instrument write instructions"),
218 cl::Hidden, cl::init(true));
219
220static cl::opt<bool>
221 ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true),
222 cl::Hidden, cl::desc("Use Stack Safety analysis results"),
224
226 "asan-instrument-atomics",
227 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
228 cl::init(true));
229
230static cl::opt<bool>
231 ClInstrumentByval("asan-instrument-byval",
232 cl::desc("instrument byval call arguments"), cl::Hidden,
233 cl::init(true));
234
236 "asan-always-slow-path",
237 cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden,
238 cl::init(false));
239
241 "asan-force-dynamic-shadow",
242 cl::desc("Load shadow address into a local variable for each function"),
243 cl::Hidden, cl::init(false));
244
245static cl::opt<bool>
246 ClWithIfunc("asan-with-ifunc",
247 cl::desc("Access dynamic shadow through an ifunc global on "
248 "platforms that support this"),
249 cl::Hidden, cl::init(true));
250
252 "asan-with-ifunc-suppress-remat",
253 cl::desc("Suppress rematerialization of dynamic shadow address by passing "
254 "it through inline asm in prologue."),
255 cl::Hidden, cl::init(true));
256
257// This flag limits the number of instructions to be instrumented
258// in any given BB. Normally, this should be set to unlimited (INT_MAX),
259// but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
260// set it to 10000.
262 "asan-max-ins-per-bb", cl::init(10000),
263 cl::desc("maximal number of instructions to instrument in any given BB"),
264 cl::Hidden);
265
266// This flag may need to be replaced with -f[no]asan-stack.
267static cl::opt<bool> ClStack("asan-stack", cl::desc("Handle stack memory"),
268 cl::Hidden, cl::init(true));
270 "asan-max-inline-poisoning-size",
271 cl::desc(
272 "Inline shadow poisoning for blocks up to the given size in bytes."),
273 cl::Hidden, cl::init(64));
274
276 "asan-use-after-return",
277 cl::desc("Sets the mode of detection for stack-use-after-return."),
280 "Never detect stack use after return."),
283 "Detect stack use after return if "
284 "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."),
286 "Always detect stack use after return.")),
288
289static cl::opt<bool> ClRedzoneByvalArgs("asan-redzone-byval-args",
290 cl::desc("Create redzones for byval "
291 "arguments (extra copy "
292 "required)"), cl::Hidden,
293 cl::init(true));
294
295static cl::opt<bool> ClUseAfterScope("asan-use-after-scope",
296 cl::desc("Check stack-use-after-scope"),
297 cl::Hidden, cl::init(false));
298
299// This flag may need to be replaced with -f[no]asan-globals.
300static cl::opt<bool> ClGlobals("asan-globals",
301 cl::desc("Handle global objects"), cl::Hidden,
302 cl::init(true));
303
304static cl::opt<bool> ClInitializers("asan-initialization-order",
305 cl::desc("Handle C++ initializer order"),
306 cl::Hidden, cl::init(true));
307
309 "asan-detect-invalid-pointer-pair",
310 cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden,
311 cl::init(false));
312
314 "asan-detect-invalid-pointer-cmp",
315 cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden,
316 cl::init(false));
317
319 "asan-detect-invalid-pointer-sub",
320 cl::desc("Instrument - operations with pointer operands"), cl::Hidden,
321 cl::init(false));
322
324 "asan-realign-stack",
325 cl::desc("Realign stack to the value of this flag (power of two)"),
326 cl::Hidden, cl::init(32));
327
329 "asan-instrumentation-with-call-threshold",
330 cl::desc("If the function being instrumented contains more than "
331 "this number of memory accesses, use callbacks instead of "
332 "inline checks (-1 means never use callbacks)."),
333 cl::Hidden, cl::init(7000));
334
336 "asan-memory-access-callback-prefix",
337 cl::desc("Prefix for memory access callbacks"), cl::Hidden,
338 cl::init("__asan_"));
339
341 "asan-kernel-mem-intrinsic-prefix",
342 cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden,
343 cl::init(false));
344
345static cl::opt<bool>
346 ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas",
347 cl::desc("instrument dynamic allocas"),
348 cl::Hidden, cl::init(true));
349
351 "asan-skip-promotable-allocas",
352 cl::desc("Do not instrument promotable allocas"), cl::Hidden,
353 cl::init(true));
354
356 "asan-constructor-kind",
357 cl::desc("Sets the ASan constructor kind"),
358 cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"),
360 "Use global constructors")),
362// These flags allow to change the shadow mapping.
363// The shadow mapping looks like
364// Shadow = (Mem >> scale) + offset
365
366static cl::opt<int> ClMappingScale("asan-mapping-scale",
367 cl::desc("scale of asan shadow mapping"),
368 cl::Hidden, cl::init(0));
369
371 ClMappingOffset("asan-mapping-offset",
372 cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"),
373 cl::Hidden, cl::init(0));
374
375// Optimization flags. Not user visible, used mostly for testing
376// and benchmarking the tool.
377
378static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"),
379 cl::Hidden, cl::init(true));
380
381static cl::opt<bool> ClOptimizeCallbacks("asan-optimize-callbacks",
382 cl::desc("Optimize callbacks"),
383 cl::Hidden, cl::init(false));
384
386 "asan-opt-same-temp", cl::desc("Instrument the same temp just once"),
387 cl::Hidden, cl::init(true));
388
389static cl::opt<bool> ClOptGlobals("asan-opt-globals",
390 cl::desc("Don't instrument scalar globals"),
391 cl::Hidden, cl::init(true));
392
394 "asan-opt-stack", cl::desc("Don't instrument scalar stack variables"),
395 cl::Hidden, cl::init(false));
396
398 "asan-stack-dynamic-alloca",
399 cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden,
400 cl::init(true));
401
403 "asan-force-experiment",
404 cl::desc("Force optimization experiment (for testing)"), cl::Hidden,
405 cl::init(0));
406
407static cl::opt<bool>
408 ClUsePrivateAlias("asan-use-private-alias",
409 cl::desc("Use private aliases for global variables"),
410 cl::Hidden, cl::init(true));
411
412static cl::opt<bool>
413 ClUseOdrIndicator("asan-use-odr-indicator",
414 cl::desc("Use odr indicators to improve ODR reporting"),
415 cl::Hidden, cl::init(true));
416
417static cl::opt<bool>
418 ClUseGlobalsGC("asan-globals-live-support",
419 cl::desc("Use linker features to support dead "
420 "code stripping of globals"),
421 cl::Hidden, cl::init(true));
422
423// This is on by default even though there is a bug in gold:
424// https://sourceware.org/bugzilla/show_bug.cgi?id=19002
425static cl::opt<bool>
426 ClWithComdat("asan-with-comdat",
427 cl::desc("Place ASan constructors in comdat sections"),
428 cl::Hidden, cl::init(true));
429
431 "asan-destructor-kind",
432 cl::desc("Sets the ASan destructor kind. The default is to use the value "
433 "provided to the pass constructor"),
434 cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"),
436 "Use global destructors")),
438
439// Debug flags.
440
441static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
442 cl::init(0));
443
444static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"),
445 cl::Hidden, cl::init(0));
446
448 cl::desc("Debug func"));
449
450static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"),
451 cl::Hidden, cl::init(-1));
452
453static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug max inst"),
454 cl::Hidden, cl::init(-1));
455
456STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
457STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
458STATISTIC(NumOptimizedAccessesToGlobalVar,
459 "Number of optimized accesses to global vars");
460STATISTIC(NumOptimizedAccessesToStackVar,
461 "Number of optimized accesses to stack vars");
462
463namespace {
464
465/// This struct defines the shadow mapping using the rule:
466/// shadow = (mem >> Scale) ADD-or-OR Offset.
467/// If InGlobal is true, then
468/// extern char __asan_shadow[];
469/// shadow = (mem >> Scale) + &__asan_shadow
470struct ShadowMapping {
471 int Scale;
473 bool OrShadowOffset;
474 bool InGlobal;
475};
476
477} // end anonymous namespace
478
479static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize,
480 bool IsKasan) {
481 bool IsAndroid = TargetTriple.isAndroid();
482 bool IsIOS = TargetTriple.isiOS() || TargetTriple.isWatchOS() ||
483 TargetTriple.isDriverKit();
484 bool IsMacOS = TargetTriple.isMacOSX();
485 bool IsFreeBSD = TargetTriple.isOSFreeBSD();
486 bool IsNetBSD = TargetTriple.isOSNetBSD();
487 bool IsPS = TargetTriple.isPS();
488 bool IsLinux = TargetTriple.isOSLinux();
489 bool IsPPC64 = TargetTriple.getArch() == Triple::ppc64 ||
490 TargetTriple.getArch() == Triple::ppc64le;
491 bool IsSystemZ = TargetTriple.getArch() == Triple::systemz;
492 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
493 bool IsMIPSN32ABI = TargetTriple.isABIN32();
494 bool IsMIPS32 = TargetTriple.isMIPS32();
495 bool IsMIPS64 = TargetTriple.isMIPS64();
496 bool IsArmOrThumb = TargetTriple.isARM() || TargetTriple.isThumb();
497 bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64 ||
498 TargetTriple.getArch() == Triple::aarch64_be;
499 bool IsLoongArch64 = TargetTriple.isLoongArch64();
500 bool IsRISCV64 = TargetTriple.getArch() == Triple::riscv64;
501 bool IsWindows = TargetTriple.isOSWindows();
502 bool IsFuchsia = TargetTriple.isOSFuchsia();
503 bool IsAMDGPU = TargetTriple.isAMDGPU();
504 bool IsHaiku = TargetTriple.isOSHaiku();
505 bool IsWasm = TargetTriple.isWasm();
506
507 ShadowMapping Mapping;
508
509 Mapping.Scale = kDefaultShadowScale;
510 if (ClMappingScale.getNumOccurrences() > 0) {
511 Mapping.Scale = ClMappingScale;
512 }
513
514 if (LongSize == 32) {
515 if (IsAndroid)
516 Mapping.Offset = kDynamicShadowSentinel;
517 else if (IsMIPSN32ABI)
518 Mapping.Offset = kMIPS_ShadowOffsetN32;
519 else if (IsMIPS32)
520 Mapping.Offset = kMIPS32_ShadowOffset32;
521 else if (IsFreeBSD)
522 Mapping.Offset = kFreeBSD_ShadowOffset32;
523 else if (IsNetBSD)
524 Mapping.Offset = kNetBSD_ShadowOffset32;
525 else if (IsIOS)
526 Mapping.Offset = kDynamicShadowSentinel;
527 else if (IsWindows)
528 Mapping.Offset = kWindowsShadowOffset32;
529 else if (IsWasm)
530 Mapping.Offset = kWebAssemblyShadowOffset;
531 else
532 Mapping.Offset = kDefaultShadowOffset32;
533 } else { // LongSize == 64
534 // Fuchsia is always PIE, which means that the beginning of the address
535 // space is always available.
536 if (IsFuchsia)
537 Mapping.Offset = 0;
538 else if (IsPPC64)
539 Mapping.Offset = kPPC64_ShadowOffset64;
540 else if (IsSystemZ)
541 Mapping.Offset = kSystemZ_ShadowOffset64;
542 else if (IsFreeBSD && IsAArch64)
543 Mapping.Offset = kFreeBSDAArch64_ShadowOffset64;
544 else if (IsFreeBSD && !IsMIPS64) {
545 if (IsKasan)
546 Mapping.Offset = kFreeBSDKasan_ShadowOffset64;
547 else
548 Mapping.Offset = kFreeBSD_ShadowOffset64;
549 } else if (IsNetBSD) {
550 if (IsKasan)
551 Mapping.Offset = kNetBSDKasan_ShadowOffset64;
552 else
553 Mapping.Offset = kNetBSD_ShadowOffset64;
554 } else if (IsPS)
555 Mapping.Offset = kPS_ShadowOffset64;
556 else if (IsLinux && IsX86_64) {
557 if (IsKasan)
558 Mapping.Offset = kLinuxKasan_ShadowOffset64;
559 else
560 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
561 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
562 } else if (IsWindows && IsX86_64) {
563 Mapping.Offset = kWindowsShadowOffset64;
564 } else if (IsMIPS64)
565 Mapping.Offset = kMIPS64_ShadowOffset64;
566 else if (IsIOS)
567 Mapping.Offset = kDynamicShadowSentinel;
568 else if (IsMacOS && IsAArch64)
569 Mapping.Offset = kDynamicShadowSentinel;
570 else if (IsAArch64)
571 Mapping.Offset = kAArch64_ShadowOffset64;
572 else if (IsLoongArch64)
573 Mapping.Offset = kLoongArch64_ShadowOffset64;
574 else if (IsRISCV64)
575 Mapping.Offset = kRISCV64_ShadowOffset64;
576 else if (IsAMDGPU)
577 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
578 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
579 else if (IsHaiku && IsX86_64)
580 Mapping.Offset = (kSmallX86_64ShadowOffsetBase &
581 (kSmallX86_64ShadowOffsetAlignMask << Mapping.Scale));
582 else
583 Mapping.Offset = kDefaultShadowOffset64;
584 }
585
587 Mapping.Offset = kDynamicShadowSentinel;
588 }
589
590 if (ClMappingOffset.getNumOccurrences() > 0) {
591 Mapping.Offset = ClMappingOffset;
592 }
593
594 // OR-ing shadow offset if more efficient (at least on x86) if the offset
595 // is a power of two, but on ppc64 and loongarch64 we have to use add since
596 // the shadow offset is not necessarily 1/8-th of the address space. On
597 // SystemZ, we could OR the constant in a single instruction, but it's more
598 // efficient to load it once and use indexed addressing.
599 Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS &&
600 !IsRISCV64 && !IsLoongArch64 &&
601 !(Mapping.Offset & (Mapping.Offset - 1)) &&
602 Mapping.Offset != kDynamicShadowSentinel;
603 bool IsAndroidWithIfuncSupport =
604 IsAndroid && !TargetTriple.isAndroidVersionLT(21);
605 Mapping.InGlobal = ClWithIfunc && IsAndroidWithIfuncSupport && IsArmOrThumb;
606
607 return Mapping;
608}
609
610namespace llvm {
611void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize,
612 bool IsKasan, uint64_t *ShadowBase,
613 int *MappingScale, bool *OrShadowOffset) {
614 auto Mapping = getShadowMapping(TargetTriple, LongSize, IsKasan);
615 *ShadowBase = Mapping.Offset;
616 *MappingScale = Mapping.Scale;
617 *OrShadowOffset = Mapping.OrShadowOffset;
618}
619
621 // Sanitizer checks read from shadow, which invalidates memory(argmem: *).
622 //
623 // This is not only true for sanitized functions, because AttrInfer can
624 // infer those attributes on libc functions, which is not true if those
625 // are instrumented (Android) or intercepted.
626 //
627 // We might want to model ASan shadow memory more opaquely to get rid of
628 // this problem altogether, by hiding the shadow memory write in an
629 // intrinsic, essentially like in the AArch64StackTagging pass. But that's
630 // for another day.
631
632 // The API is weird. `onlyReadsMemory` actually means "does not write", and
633 // `onlyWritesMemory` actually means "does not read". So we reconstruct
634 // "accesses memory" && "does not read" <=> "writes".
635 bool Changed = false;
636 if (!F.doesNotAccessMemory()) {
637 bool WritesMemory = !F.onlyReadsMemory();
638 bool ReadsMemory = !F.onlyWritesMemory();
639 if ((WritesMemory && !ReadsMemory) || F.onlyAccessesArgMemory()) {
640 F.removeFnAttr(Attribute::Memory);
641 Changed = true;
642 }
643 }
644 if (ReadsArgMem) {
645 for (Argument &A : F.args()) {
646 if (A.hasAttribute(Attribute::WriteOnly)) {
647 A.removeAttr(Attribute::WriteOnly);
648 Changed = true;
649 }
650 }
651 }
652 if (Changed) {
653 // nobuiltin makes sure later passes don't restore assumptions about
654 // the function.
655 F.addFnAttr(Attribute::NoBuiltin);
656 }
657}
658
664
672
673} // namespace llvm
674
675static uint64_t getRedzoneSizeForScale(int MappingScale) {
676 // Redzone used for stack and globals is at least 32 bytes.
677 // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
678 return std::max(32U, 1U << MappingScale);
679}
680
682 if (TargetTriple.isOSEmscripten()) {
684 } else {
686 }
687}
688
689static Twine genName(StringRef suffix) {
690 return Twine(kAsanGenPrefix) + suffix;
691}
692
693namespace {
694/// Helper RAII class to post-process inserted asan runtime calls during a
695/// pass on a single Function. Upon end of scope, detects and applies the
696/// required funclet OpBundle.
697class RuntimeCallInserter {
698 Function *OwnerFn = nullptr;
699 bool TrackInsertedCalls = false;
700 SmallVector<CallInst *> InsertedCalls;
701
702public:
703 RuntimeCallInserter(Function &Fn) : OwnerFn(&Fn) {
704 if (Fn.hasPersonalityFn()) {
705 auto Personality = classifyEHPersonality(Fn.getPersonalityFn());
706 if (isScopedEHPersonality(Personality))
707 TrackInsertedCalls = true;
708 }
709 }
710
711 ~RuntimeCallInserter() {
712 if (InsertedCalls.empty())
713 return;
714 assert(TrackInsertedCalls && "Calls were wrongly tracked");
715
716 DenseMap<BasicBlock *, ColorVector> BlockColors = colorEHFunclets(*OwnerFn);
717 for (CallInst *CI : InsertedCalls) {
718 BasicBlock *BB = CI->getParent();
719 assert(BB && "Instruction doesn't belong to a BasicBlock");
720 assert(BB->getParent() == OwnerFn &&
721 "Instruction doesn't belong to the expected Function!");
722
723 ColorVector &Colors = BlockColors[BB];
724 // funclet opbundles are only valid in monochromatic BBs.
725 // Note that unreachable BBs are seen as colorless by colorEHFunclets()
726 // and will be DCE'ed later.
727 if (Colors.empty())
728 continue;
729 if (Colors.size() != 1) {
730 OwnerFn->getContext().emitError(
731 "Instruction's BasicBlock is not monochromatic");
732 continue;
733 }
734
735 BasicBlock *Color = Colors.front();
736 BasicBlock::iterator EHPadIt = Color->getFirstNonPHIIt();
737
738 if (EHPadIt != Color->end() && EHPadIt->isEHPad()) {
739 // Replace CI with a clone with an added funclet OperandBundle
740 OperandBundleDef OB("funclet", &*EHPadIt);
742 OB, CI->getIterator());
743 NewCall->copyMetadata(*CI);
744 CI->replaceAllUsesWith(NewCall);
745 CI->eraseFromParent();
746 }
747 }
748 }
749
750 CallInst *createRuntimeCall(IRBuilder<> &IRB, FunctionCallee Callee,
751 ArrayRef<Value *> Args = {},
752 const Twine &Name = "") {
753 assert(IRB.GetInsertBlock()->getParent() == OwnerFn);
754
755 CallInst *Inst = IRB.CreateCall(Callee, Args, Name, nullptr);
756 if (TrackInsertedCalls)
757 InsertedCalls.push_back(Inst);
758 return Inst;
759 }
760};
761
762/// AddressSanitizer: instrument the code in module to find memory bugs.
763struct AddressSanitizer {
764 AddressSanitizer(Module &M, const StackSafetyGlobalInfo *SSGI,
765 int InstrumentationWithCallsThreshold,
766 uint32_t MaxInlinePoisoningSize, bool CompileKernel = false,
767 bool Recover = false, bool UseAfterScope = false,
768 AsanDetectStackUseAfterReturnMode UseAfterReturn =
769 AsanDetectStackUseAfterReturnMode::Runtime)
770 : M(M),
771 CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
772 : CompileKernel),
773 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
774 UseAfterScope(UseAfterScope || ClUseAfterScope),
775 UseAfterReturn(ClUseAfterReturn.getNumOccurrences() ? ClUseAfterReturn
776 : UseAfterReturn),
777 SSGI(SSGI),
778 InstrumentationWithCallsThreshold(
779 ClInstrumentationWithCallsThreshold.getNumOccurrences() > 0
781 : InstrumentationWithCallsThreshold),
782 MaxInlinePoisoningSize(ClMaxInlinePoisoningSize.getNumOccurrences() > 0
784 : MaxInlinePoisoningSize) {
785 C = &(M.getContext());
786 DL = &M.getDataLayout();
787 LongSize = M.getDataLayout().getPointerSizeInBits();
788 IntptrTy = Type::getIntNTy(*C, LongSize);
789 PtrTy = PointerType::getUnqual(*C);
790 Int32Ty = Type::getInt32Ty(*C);
791 TargetTriple = M.getTargetTriple();
792
793 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
794
795 assert(this->UseAfterReturn != AsanDetectStackUseAfterReturnMode::Invalid);
796 }
797
798 TypeSize getAllocaSizeInBytes(const AllocaInst &AI) const {
799 return *AI.getAllocationSize(AI.getDataLayout());
800 }
801
802 /// Check if we want (and can) handle this alloca.
803 bool isInterestingAlloca(const AllocaInst &AI);
804
805 bool ignoreAccess(Instruction *Inst, Value *Ptr);
807 Instruction *I, SmallVectorImpl<InterestingMemoryOperand> &Interesting,
808 const TargetTransformInfo *TTI);
809
810 void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
811 InterestingMemoryOperand &O, bool UseCalls,
812 const DataLayout &DL, RuntimeCallInserter &RTCI);
813 void instrumentPointerComparisonOrSubtraction(Instruction *I,
814 RuntimeCallInserter &RTCI);
815 void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
816 Value *Addr, MaybeAlign Alignment,
817 uint32_t TypeStoreSize, bool IsWrite,
818 Value *SizeArgument, bool UseCalls, uint32_t Exp,
819 RuntimeCallInserter &RTCI);
820 Instruction *instrumentAMDGPUAddress(Instruction *OrigIns,
821 Instruction *InsertBefore, Value *Addr,
822 uint32_t TypeStoreSize, bool IsWrite,
823 Value *SizeArgument);
824 Instruction *genAMDGPUReportBlock(IRBuilder<> &IRB, Value *Cond,
825 bool Recover);
826 void instrumentUnusualSizeOrAlignment(Instruction *I,
827 Instruction *InsertBefore, Value *Addr,
828 TypeSize TypeStoreSize, bool IsWrite,
829 Value *SizeArgument, bool UseCalls,
830 uint32_t Exp,
831 RuntimeCallInserter &RTCI);
832 void instrumentMaskedLoadOrStore(AddressSanitizer *Pass, const DataLayout &DL,
833 Type *IntptrTy, Value *Mask, Value *EVL,
834 Value *Stride, Instruction *I, Value *Addr,
835 MaybeAlign Alignment, unsigned Granularity,
836 Type *OpType, bool IsWrite,
837 Value *SizeArgument, bool UseCalls,
838 uint32_t Exp, RuntimeCallInserter &RTCI);
839 Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
840 Value *ShadowValue, uint32_t TypeStoreSize);
841 Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
842 bool IsWrite, size_t AccessSizeIndex,
843 Value *SizeArgument, uint32_t Exp,
844 RuntimeCallInserter &RTCI);
845 void instrumentMemIntrinsic(MemIntrinsic *MI, RuntimeCallInserter &RTCI);
846 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
847 bool suppressInstrumentationSiteForDebug(int &Instrumented);
848 bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI,
849 const TargetTransformInfo *TTI);
850 bool maybeInsertAsanInitAtFunctionEntry(Function &F);
851 bool maybeInsertDynamicShadowAtFunctionEntry(Function &F);
852 void markEscapedLocalAllocas(Function &F);
853
854private:
855 friend struct FunctionStackPoisoner;
856
857 void initializeCallbacks(const TargetLibraryInfo *TLI);
858
859 bool LooksLikeCodeInBug11395(Instruction *I);
860 bool GlobalIsLinkerInitialized(GlobalVariable *G);
861 bool isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis, Value *Addr,
862 TypeSize TypeStoreSize) const;
863
864 /// Helper to cleanup per-function state.
865 struct FunctionStateRAII {
866 AddressSanitizer *Pass;
867
868 FunctionStateRAII(AddressSanitizer *Pass) : Pass(Pass) {
869 assert(Pass->ProcessedAllocas.empty() &&
870 "last pass forgot to clear cache");
871 assert(!Pass->LocalDynamicShadow);
872 }
873
874 ~FunctionStateRAII() {
875 Pass->LocalDynamicShadow = nullptr;
876 Pass->ProcessedAllocas.clear();
877 }
878 };
879
880 Module &M;
881 LLVMContext *C;
882 const DataLayout *DL;
883 Triple TargetTriple;
884 int LongSize;
885 bool CompileKernel;
886 bool Recover;
887 bool UseAfterScope;
889 Type *IntptrTy;
890 Type *Int32Ty;
891 PointerType *PtrTy;
892 ShadowMapping Mapping;
893 FunctionCallee AsanHandleNoReturnFunc;
894 FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction;
895 Constant *AsanShadowGlobal;
896
897 // These arrays is indexed by AccessIsWrite, Experiment and log2(AccessSize).
898 FunctionCallee AsanErrorCallback[2][2][kNumberOfAccessSizes];
899 FunctionCallee AsanMemoryAccessCallback[2][2][kNumberOfAccessSizes];
900
901 // These arrays is indexed by AccessIsWrite and Experiment.
902 FunctionCallee AsanErrorCallbackSized[2][2];
903 FunctionCallee AsanMemoryAccessCallbackSized[2][2];
904
905 FunctionCallee AsanMemmove, AsanMemcpy, AsanMemset;
906 Value *LocalDynamicShadow = nullptr;
907 const StackSafetyGlobalInfo *SSGI;
908 DenseMap<const AllocaInst *, bool> ProcessedAllocas;
909
910 FunctionCallee AMDGPUAddressShared;
911 FunctionCallee AMDGPUAddressPrivate;
912 int InstrumentationWithCallsThreshold;
913 uint32_t MaxInlinePoisoningSize;
914};
915
916class ModuleAddressSanitizer {
917public:
918 ModuleAddressSanitizer(Module &M, bool InsertVersionCheck,
919 bool CompileKernel = false, bool Recover = false,
920 bool UseGlobalsGC = true, bool UseOdrIndicator = true,
921 AsanDtorKind DestructorKind = AsanDtorKind::Global,
922 AsanCtorKind ConstructorKind = AsanCtorKind::Global)
923 : M(M),
924 CompileKernel(ClEnableKasan.getNumOccurrences() > 0 ? ClEnableKasan
925 : CompileKernel),
926 InsertVersionCheck(ClInsertVersionCheck.getNumOccurrences() > 0
928 : InsertVersionCheck),
929 Recover(ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover),
930 UseGlobalsGC(UseGlobalsGC && ClUseGlobalsGC && !this->CompileKernel),
931 // Enable aliases as they should have no downside with ODR indicators.
932 UsePrivateAlias(ClUsePrivateAlias.getNumOccurrences() > 0
934 : UseOdrIndicator),
935 UseOdrIndicator(ClUseOdrIndicator.getNumOccurrences() > 0
937 : UseOdrIndicator),
938 // Not a typo: ClWithComdat is almost completely pointless without
939 // ClUseGlobalsGC (because then it only works on modules without
940 // globals, which are rare); it is a prerequisite for ClUseGlobalsGC;
941 // and both suffer from gold PR19002 for which UseGlobalsGC constructor
942 // argument is designed as workaround. Therefore, disable both
943 // ClWithComdat and ClUseGlobalsGC unless the frontend says it's ok to
944 // do globals-gc.
945 UseCtorComdat(UseGlobalsGC && ClWithComdat && !this->CompileKernel),
946 DestructorKind(DestructorKind),
947 ConstructorKind(ClConstructorKind.getNumOccurrences() > 0
949 : ConstructorKind) {
950 C = &(M.getContext());
951 int LongSize = M.getDataLayout().getPointerSizeInBits();
952 IntptrTy = Type::getIntNTy(*C, LongSize);
953 PtrTy = PointerType::getUnqual(*C);
954 TargetTriple = M.getTargetTriple();
955 Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
956
957 if (ClOverrideDestructorKind != AsanDtorKind::Invalid)
958 this->DestructorKind = ClOverrideDestructorKind;
959 assert(this->DestructorKind != AsanDtorKind::Invalid);
960 }
961
962 bool instrumentModule();
963
964private:
965 void initializeCallbacks();
966
967 void instrumentGlobals(IRBuilder<> &IRB, bool *CtorComdat);
968 void InstrumentGlobalsCOFF(IRBuilder<> &IRB,
969 ArrayRef<GlobalVariable *> ExtendedGlobals,
970 ArrayRef<Constant *> MetadataInitializers);
971 void instrumentGlobalsELF(IRBuilder<> &IRB,
972 ArrayRef<GlobalVariable *> ExtendedGlobals,
973 ArrayRef<Constant *> MetadataInitializers,
974 const std::string &UniqueModuleId);
975 void InstrumentGlobalsMachO(IRBuilder<> &IRB,
976 ArrayRef<GlobalVariable *> ExtendedGlobals,
977 ArrayRef<Constant *> MetadataInitializers);
978 void
979 InstrumentGlobalsWithMetadataArray(IRBuilder<> &IRB,
980 ArrayRef<GlobalVariable *> ExtendedGlobals,
981 ArrayRef<Constant *> MetadataInitializers);
982
983 GlobalVariable *CreateMetadataGlobal(Constant *Initializer,
984 StringRef OriginalName);
985 void SetComdatForGlobalMetadata(GlobalVariable *G, GlobalVariable *Metadata,
986 StringRef InternalSuffix);
987 Instruction *CreateAsanModuleDtor();
988
989 const GlobalVariable *getExcludedAliasedGlobal(const GlobalAlias &GA) const;
990 bool shouldInstrumentGlobal(GlobalVariable *G) const;
991 bool ShouldUseMachOGlobalsSection() const;
992 StringRef getGlobalMetadataSection() const;
993 void poisonOneInitializer(Function &GlobalInit);
994 void createInitializerPoisonCalls();
995 uint64_t getMinRedzoneSizeForGlobal() const {
996 return getRedzoneSizeForScale(Mapping.Scale);
997 }
998 uint64_t getRedzoneSizeForGlobal(uint64_t SizeInBytes) const;
999 int GetAsanVersion() const;
1000 GlobalVariable *getOrCreateModuleName();
1001
1002 Module &M;
1003 bool CompileKernel;
1004 bool InsertVersionCheck;
1005 bool Recover;
1006 bool UseGlobalsGC;
1007 bool UsePrivateAlias;
1008 bool UseOdrIndicator;
1009 bool UseCtorComdat;
1010 AsanDtorKind DestructorKind;
1011 AsanCtorKind ConstructorKind;
1012 Type *IntptrTy;
1013 PointerType *PtrTy;
1014 LLVMContext *C;
1015 Triple TargetTriple;
1016 ShadowMapping Mapping;
1017 FunctionCallee AsanPoisonGlobals;
1018 FunctionCallee AsanUnpoisonGlobals;
1019 FunctionCallee AsanRegisterGlobals;
1020 FunctionCallee AsanUnregisterGlobals;
1021 FunctionCallee AsanRegisterImageGlobals;
1022 FunctionCallee AsanUnregisterImageGlobals;
1023 FunctionCallee AsanRegisterElfGlobals;
1024 FunctionCallee AsanUnregisterElfGlobals;
1025
1026 Function *AsanCtorFunction = nullptr;
1027 Function *AsanDtorFunction = nullptr;
1028 GlobalVariable *ModuleName = nullptr;
1029};
1030
1031// Stack poisoning does not play well with exception handling.
1032// When an exception is thrown, we essentially bypass the code
1033// that unpoisones the stack. This is why the run-time library has
1034// to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire
1035// stack in the interceptor. This however does not work inside the
1036// actual function which catches the exception. Most likely because the
1037// compiler hoists the load of the shadow value somewhere too high.
1038// This causes asan to report a non-existing bug on 453.povray.
1039// It sounds like an LLVM bug.
1040struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
1041 Function &F;
1042 AddressSanitizer &ASan;
1043 RuntimeCallInserter &RTCI;
1044 DIBuilder DIB;
1045 LLVMContext *C;
1046 Type *IntptrTy;
1047 Type *IntptrPtrTy;
1048 ShadowMapping Mapping;
1049
1051 SmallVector<AllocaInst *, 16> StaticAllocasToMoveUp;
1052 SmallVector<Instruction *, 8> RetVec;
1053
1054 FunctionCallee AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1],
1055 AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1];
1056 FunctionCallee AsanSetShadowFunc[0x100] = {};
1057 FunctionCallee AsanPoisonStackMemoryFunc, AsanUnpoisonStackMemoryFunc;
1058 FunctionCallee AsanAllocaPoisonFunc, AsanAllocasUnpoisonFunc;
1059
1060 // Stores a place and arguments of poisoning/unpoisoning call for alloca.
1061 struct AllocaPoisonCall {
1062 IntrinsicInst *InsBefore;
1063 AllocaInst *AI;
1064 uint64_t Size;
1065 bool DoPoison;
1066 };
1067 SmallVector<AllocaPoisonCall, 8> DynamicAllocaPoisonCallVec;
1068 SmallVector<AllocaPoisonCall, 8> StaticAllocaPoisonCallVec;
1069
1070 SmallVector<AllocaInst *, 1> DynamicAllocaVec;
1071 SmallVector<IntrinsicInst *, 1> StackRestoreVec;
1072 AllocaInst *DynamicAllocaLayout = nullptr;
1073 IntrinsicInst *LocalEscapeCall = nullptr;
1074
1075 bool HasInlineAsm = false;
1076 bool HasReturnsTwiceCall = false;
1077 bool PoisonStack;
1078
1079 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan,
1080 RuntimeCallInserter &RTCI)
1081 : F(F), ASan(ASan), RTCI(RTCI),
1082 DIB(*F.getParent(), /*AllowUnresolved*/ false), C(ASan.C),
1083 IntptrTy(ASan.IntptrTy),
1084 IntptrPtrTy(PointerType::get(IntptrTy->getContext(), 0)),
1085 Mapping(ASan.Mapping),
1086 PoisonStack(ClStack && !F.getParent()->getTargetTriple().isAMDGPU()) {}
1087
1088 bool runOnFunction() {
1089 if (!PoisonStack)
1090 return false;
1091
1093 copyArgsPassedByValToAllocas();
1094
1095 // Collect alloca, ret, lifetime instructions etc.
1096 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB);
1097
1098 if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false;
1099
1100 initializeCallbacks(*F.getParent());
1101
1102 processDynamicAllocas();
1103 processStaticAllocas();
1104
1105 if (ClDebugStack) {
1106 LLVM_DEBUG(dbgs() << F);
1107 }
1108 return true;
1109 }
1110
1111 // Arguments marked with the "byval" attribute are implicitly copied without
1112 // using an alloca instruction. To produce redzones for those arguments, we
1113 // copy them a second time into memory allocated with an alloca instruction.
1114 void copyArgsPassedByValToAllocas();
1115
1116 // Finds all Alloca instructions and puts
1117 // poisoned red zones around all of them.
1118 // Then unpoison everything back before the function returns.
1119 void processStaticAllocas();
1120 void processDynamicAllocas();
1121
1122 void createDynamicAllocasInitStorage();
1123
1124 // ----------------------- Visitors.
1125 /// Collect all Ret instructions, or the musttail call instruction if it
1126 /// precedes the return instruction.
1127 void visitReturnInst(ReturnInst &RI) {
1128 if (CallInst *CI = RI.getParent()->getTerminatingMustTailCall())
1129 RetVec.push_back(CI);
1130 else
1131 RetVec.push_back(&RI);
1132 }
1133
1134 /// Collect all Resume instructions.
1135 void visitResumeInst(ResumeInst &RI) { RetVec.push_back(&RI); }
1136
1137 /// Collect all CatchReturnInst instructions.
1138 void visitCleanupReturnInst(CleanupReturnInst &CRI) { RetVec.push_back(&CRI); }
1139
1140 void unpoisonDynamicAllocasBeforeInst(Instruction *InstBefore,
1141 Value *SavedStack) {
1142 IRBuilder<> IRB(InstBefore);
1143 Value *DynamicAreaPtr = IRB.CreatePtrToInt(SavedStack, IntptrTy);
1144 // When we insert _asan_allocas_unpoison before @llvm.stackrestore, we
1145 // need to adjust extracted SP to compute the address of the most recent
1146 // alloca. We have a special @llvm.get.dynamic.area.offset intrinsic for
1147 // this purpose.
1148 if (!isa<ReturnInst>(InstBefore)) {
1149 Value *DynamicAreaOffset = IRB.CreateIntrinsic(
1150 Intrinsic::get_dynamic_area_offset, {IntptrTy}, {});
1151
1152 DynamicAreaPtr = IRB.CreateAdd(IRB.CreatePtrToInt(SavedStack, IntptrTy),
1153 DynamicAreaOffset);
1154 }
1155
1156 RTCI.createRuntimeCall(
1157 IRB, AsanAllocasUnpoisonFunc,
1158 {IRB.CreateLoad(IntptrTy, DynamicAllocaLayout), DynamicAreaPtr});
1159 }
1160
1161 // Unpoison dynamic allocas redzones.
1162 void unpoisonDynamicAllocas() {
1163 for (Instruction *Ret : RetVec)
1164 unpoisonDynamicAllocasBeforeInst(Ret, DynamicAllocaLayout);
1165
1166 for (Instruction *StackRestoreInst : StackRestoreVec)
1167 unpoisonDynamicAllocasBeforeInst(StackRestoreInst,
1168 StackRestoreInst->getOperand(0));
1169 }
1170
1171 // Deploy and poison redzones around dynamic alloca call. To do this, we
1172 // should replace this call with another one with changed parameters and
1173 // replace all its uses with new address, so
1174 // addr = alloca type, old_size, align
1175 // is replaced by
1176 // new_size = (old_size + additional_size) * sizeof(type)
1177 // tmp = alloca i8, new_size, max(align, 32)
1178 // addr = tmp + 32 (first 32 bytes are for the left redzone).
1179 // Additional_size is added to make new memory allocation contain not only
1180 // requested memory, but also left, partial and right redzones.
1181 void handleDynamicAllocaCall(AllocaInst *AI);
1182
1183 /// Collect Alloca instructions we want (and can) handle.
1184 void visitAllocaInst(AllocaInst &AI) {
1185 // FIXME: Handle scalable vectors instead of ignoring them.
1186 const Type *AllocaType = AI.getAllocatedType();
1187 const auto *STy = dyn_cast<StructType>(AllocaType);
1188 if (!ASan.isInterestingAlloca(AI) || isa<ScalableVectorType>(AllocaType) ||
1189 (STy && STy->containsHomogeneousScalableVectorTypes())) {
1190 if (AI.isStaticAlloca()) {
1191 // Skip over allocas that are present *before* the first instrumented
1192 // alloca, we don't want to move those around.
1193 if (AllocaVec.empty())
1194 return;
1195
1196 StaticAllocasToMoveUp.push_back(&AI);
1197 }
1198 return;
1199 }
1200
1201 if (!AI.isStaticAlloca())
1202 DynamicAllocaVec.push_back(&AI);
1203 else
1204 AllocaVec.push_back(&AI);
1205 }
1206
1207 /// Collect lifetime intrinsic calls to check for use-after-scope
1208 /// errors.
1209 void visitIntrinsicInst(IntrinsicInst &II) {
1210 Intrinsic::ID ID = II.getIntrinsicID();
1211 if (ID == Intrinsic::stackrestore) StackRestoreVec.push_back(&II);
1212 if (ID == Intrinsic::localescape) LocalEscapeCall = &II;
1213 if (!ASan.UseAfterScope)
1214 return;
1215 if (!II.isLifetimeStartOrEnd())
1216 return;
1217 // Find alloca instruction that corresponds to llvm.lifetime argument.
1218 AllocaInst *AI = dyn_cast<AllocaInst>(II.getArgOperand(0));
1219 // We're interested only in allocas we can handle.
1220 if (!AI || !ASan.isInterestingAlloca(*AI))
1221 return;
1222
1223 std::optional<TypeSize> Size = AI->getAllocationSize(AI->getDataLayout());
1224 // Check that size is known and can be stored in IntptrTy.
1225 // TODO: Add support for scalable vectors if possible.
1226 if (!Size || Size->isScalable() ||
1228 return;
1229
1230 bool DoPoison = (ID == Intrinsic::lifetime_end);
1231 AllocaPoisonCall APC = {&II, AI, *Size, DoPoison};
1232 if (AI->isStaticAlloca())
1233 StaticAllocaPoisonCallVec.push_back(APC);
1235 DynamicAllocaPoisonCallVec.push_back(APC);
1236 }
1237
1238 void visitCallBase(CallBase &CB) {
1239 if (CallInst *CI = dyn_cast<CallInst>(&CB)) {
1240 HasInlineAsm |= CI->isInlineAsm() && &CB != ASan.LocalDynamicShadow;
1241 HasReturnsTwiceCall |= CI->canReturnTwice();
1242 }
1243 }
1244
1245 // ---------------------- Helpers.
1246 void initializeCallbacks(Module &M);
1247
1248 // Copies bytes from ShadowBytes into shadow memory for indexes where
1249 // ShadowMask is not zero. If ShadowMask[i] is zero, we assume that
1250 // ShadowBytes[i] is constantly zero and doesn't need to be overwritten.
1251 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1252 IRBuilder<> &IRB, Value *ShadowBase);
1253 void copyToShadow(ArrayRef<uint8_t> ShadowMask, ArrayRef<uint8_t> ShadowBytes,
1254 size_t Begin, size_t End, IRBuilder<> &IRB,
1255 Value *ShadowBase);
1256 void copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
1257 ArrayRef<uint8_t> ShadowBytes, size_t Begin,
1258 size_t End, IRBuilder<> &IRB, Value *ShadowBase);
1259
1260 void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison);
1261
1262 Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L,
1263 bool Dynamic);
1264 PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue,
1265 Instruction *ThenTerm, Value *ValueIfFalse);
1266};
1267
1268} // end anonymous namespace
1269
1271 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
1273 OS, MapClassName2PassName);
1274 OS << '<';
1275 if (Options.CompileKernel)
1276 OS << "kernel;";
1277 if (Options.UseAfterScope)
1278 OS << "use-after-scope";
1279 OS << '>';
1280}
1281
1283 const AddressSanitizerOptions &Options, bool UseGlobalGC,
1284 bool UseOdrIndicator, AsanDtorKind DestructorKind,
1285 AsanCtorKind ConstructorKind)
1286 : Options(Options), UseGlobalGC(UseGlobalGC),
1287 UseOdrIndicator(UseOdrIndicator), DestructorKind(DestructorKind),
1288 ConstructorKind(ConstructorKind) {}
1289
1292 // Return early if nosanitize_address module flag is present for the module.
1293 // This implies that asan pass has already run before.
1294 if (checkIfAlreadyInstrumented(M, "nosanitize_address"))
1295 return PreservedAnalyses::all();
1296
1297 ModuleAddressSanitizer ModuleSanitizer(
1298 M, Options.InsertVersionCheck, Options.CompileKernel, Options.Recover,
1299 UseGlobalGC, UseOdrIndicator, DestructorKind, ConstructorKind);
1300 bool Modified = false;
1301 auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
1302 const StackSafetyGlobalInfo *const SSGI =
1303 ClUseStackSafety ? &MAM.getResult<StackSafetyGlobalAnalysis>(M) : nullptr;
1304 for (Function &F : M) {
1305 if (F.empty())
1306 continue;
1307 if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage)
1308 continue;
1309 if (!ClDebugFunc.empty() && ClDebugFunc == F.getName())
1310 continue;
1311 if (F.getName().starts_with("__asan_"))
1312 continue;
1313 if (F.isPresplitCoroutine())
1314 continue;
1315 AddressSanitizer FunctionSanitizer(
1316 M, SSGI, Options.InstrumentationWithCallsThreshold,
1317 Options.MaxInlinePoisoningSize, Options.CompileKernel, Options.Recover,
1318 Options.UseAfterScope, Options.UseAfterReturn);
1319 const TargetLibraryInfo &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
1320 const TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F);
1321 Modified |= FunctionSanitizer.instrumentFunction(F, &TLI, &TTI);
1322 }
1323 Modified |= ModuleSanitizer.instrumentModule();
1324 if (!Modified)
1325 return PreservedAnalyses::all();
1326
1328 // GlobalsAA is considered stateless and does not get invalidated unless
1329 // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers
1330 // make changes that require GlobalsAA to be invalidated.
1331 PA.abandon<GlobalsAA>();
1332 return PA;
1333}
1334
1336 size_t Res = llvm::countr_zero(TypeSize / 8);
1338 return Res;
1339}
1340
1341/// Check if \p G has been created by a trusted compiler pass.
1343 // Do not instrument @llvm.global_ctors, @llvm.used, etc.
1344 if (G->getName().starts_with("llvm.") ||
1345 // Do not instrument gcov counter arrays.
1346 G->getName().starts_with("__llvm_gcov_ctr") ||
1347 // Do not instrument rtti proxy symbols for function sanitizer.
1348 G->getName().starts_with("__llvm_rtti_proxy"))
1349 return true;
1350
1351 // Do not instrument asan globals.
1352 if (G->getName().starts_with(kAsanGenPrefix) ||
1353 G->getName().starts_with(kSanCovGenPrefix) ||
1354 G->getName().starts_with(kODRGenPrefix))
1355 return true;
1356
1357 return false;
1358}
1359
1361 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1362 unsigned int AddrSpace = PtrTy->getPointerAddressSpace();
1363 if (AddrSpace == 3 || AddrSpace == 5)
1364 return true;
1365 return false;
1366}
1367
1368Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
1369 // Shadow >> scale
1370 Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
1371 if (Mapping.Offset == 0) return Shadow;
1372 // (Shadow >> scale) | offset
1373 Value *ShadowBase;
1374 if (LocalDynamicShadow)
1375 ShadowBase = LocalDynamicShadow;
1376 else
1377 ShadowBase = ConstantInt::get(IntptrTy, Mapping.Offset);
1378 if (Mapping.OrShadowOffset)
1379 return IRB.CreateOr(Shadow, ShadowBase);
1380 else
1381 return IRB.CreateAdd(Shadow, ShadowBase);
1382}
1383
1384// Instrument memset/memmove/memcpy
1385void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI,
1386 RuntimeCallInserter &RTCI) {
1388 if (isa<MemTransferInst>(MI)) {
1389 RTCI.createRuntimeCall(
1390 IRB, isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
1391 {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
1392 IRB.CreateAddrSpaceCast(MI->getOperand(1), PtrTy),
1393 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1394 } else if (isa<MemSetInst>(MI)) {
1395 RTCI.createRuntimeCall(
1396 IRB, AsanMemset,
1397 {IRB.CreateAddrSpaceCast(MI->getOperand(0), PtrTy),
1398 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
1399 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
1400 }
1401 MI->eraseFromParent();
1402}
1403
1404/// Check if we want (and can) handle this alloca.
1405bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
1406 auto [It, Inserted] = ProcessedAllocas.try_emplace(&AI);
1407
1408 if (!Inserted)
1409 return It->getSecond();
1410
1411 bool IsInteresting =
1412 (AI.getAllocatedType()->isSized() &&
1413 // alloca() may be called with 0 size, ignore it.
1414 ((!AI.isStaticAlloca()) || !getAllocaSizeInBytes(AI).isZero()) &&
1415 // We are only interested in allocas not promotable to registers.
1416 // Promotable allocas are common under -O0.
1418 // inalloca allocas are not treated as static, and we don't want
1419 // dynamic alloca instrumentation for them as well.
1420 !AI.isUsedWithInAlloca() &&
1421 // swifterror allocas are register promoted by ISel
1422 !AI.isSwiftError() &&
1423 // safe allocas are not interesting
1424 !(SSGI && SSGI->isSafe(AI)));
1425
1426 It->second = IsInteresting;
1427 return IsInteresting;
1428}
1429
1430bool AddressSanitizer::ignoreAccess(Instruction *Inst, Value *Ptr) {
1431 // Instrument accesses from different address spaces only for AMDGPU.
1432 Type *PtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1433 if (PtrTy->getPointerAddressSpace() != 0 &&
1434 !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(Ptr)))
1435 return true;
1436
1437 // Ignore swifterror addresses.
1438 // swifterror memory addresses are mem2reg promoted by instruction
1439 // selection. As such they cannot have regular uses like an instrumentation
1440 // function and it makes no sense to track them as memory.
1441 if (Ptr->isSwiftError())
1442 return true;
1443
1444 // Treat memory accesses to promotable allocas as non-interesting since they
1445 // will not cause memory violations. This greatly speeds up the instrumented
1446 // executable at -O0.
1447 if (auto AI = dyn_cast_or_null<AllocaInst>(Ptr))
1448 if (ClSkipPromotableAllocas && !isInterestingAlloca(*AI))
1449 return true;
1450
1451 if (SSGI != nullptr && SSGI->stackAccessIsSafe(*Inst) &&
1453 return true;
1454
1455 return false;
1456}
1457
1458void AddressSanitizer::getInterestingMemoryOperands(
1460 const TargetTransformInfo *TTI) {
1461 // Do not instrument the load fetching the dynamic shadow address.
1462 if (LocalDynamicShadow == I)
1463 return;
1464
1465 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1466 if (!ClInstrumentReads || ignoreAccess(I, LI->getPointerOperand()))
1467 return;
1468 Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
1469 LI->getType(), LI->getAlign());
1470 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
1471 if (!ClInstrumentWrites || ignoreAccess(I, SI->getPointerOperand()))
1472 return;
1473 Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
1474 SI->getValueOperand()->getType(), SI->getAlign());
1475 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
1476 if (!ClInstrumentAtomics || ignoreAccess(I, RMW->getPointerOperand()))
1477 return;
1478 Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
1479 RMW->getValOperand()->getType(), std::nullopt);
1480 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
1481 if (!ClInstrumentAtomics || ignoreAccess(I, XCHG->getPointerOperand()))
1482 return;
1483 Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
1484 XCHG->getCompareOperand()->getType(),
1485 std::nullopt);
1486 } else if (auto CI = dyn_cast<CallInst>(I)) {
1487 switch (CI->getIntrinsicID()) {
1488 case Intrinsic::masked_load:
1489 case Intrinsic::masked_store:
1490 case Intrinsic::masked_gather:
1491 case Intrinsic::masked_scatter: {
1492 bool IsWrite = CI->getType()->isVoidTy();
1493 // Masked store has an initial operand for the value.
1494 unsigned OpOffset = IsWrite ? 1 : 0;
1495 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1496 return;
1497
1498 auto BasePtr = CI->getOperand(OpOffset);
1499 if (ignoreAccess(I, BasePtr))
1500 return;
1501 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1502 MaybeAlign Alignment = Align(1);
1503 // Otherwise no alignment guarantees. We probably got Undef.
1504 if (auto *Op = dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset)))
1505 Alignment = Op->getMaybeAlignValue();
1506 Value *Mask = CI->getOperand(2 + OpOffset);
1507 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask);
1508 break;
1509 }
1510 case Intrinsic::masked_expandload:
1511 case Intrinsic::masked_compressstore: {
1512 bool IsWrite = CI->getIntrinsicID() == Intrinsic::masked_compressstore;
1513 unsigned OpOffset = IsWrite ? 1 : 0;
1514 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1515 return;
1516 auto BasePtr = CI->getOperand(OpOffset);
1517 if (ignoreAccess(I, BasePtr))
1518 return;
1519 MaybeAlign Alignment = BasePtr->getPointerAlignment(*DL);
1520 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1521
1522 IRBuilder IB(I);
1523 Value *Mask = CI->getOperand(1 + OpOffset);
1524 // Use the popcount of Mask as the effective vector length.
1525 Type *ExtTy = VectorType::get(IntptrTy, cast<VectorType>(Ty));
1526 Value *ExtMask = IB.CreateZExt(Mask, ExtTy);
1527 Value *EVL = IB.CreateAddReduce(ExtMask);
1528 Value *TrueMask = ConstantInt::get(Mask->getType(), 1);
1529 Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, TrueMask,
1530 EVL);
1531 break;
1532 }
1533 case Intrinsic::vp_load:
1534 case Intrinsic::vp_store:
1535 case Intrinsic::experimental_vp_strided_load:
1536 case Intrinsic::experimental_vp_strided_store: {
1537 auto *VPI = cast<VPIntrinsic>(CI);
1538 unsigned IID = CI->getIntrinsicID();
1539 bool IsWrite = CI->getType()->isVoidTy();
1540 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1541 return;
1542 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1543 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1544 MaybeAlign Alignment = VPI->getOperand(PtrOpNo)->getPointerAlignment(*DL);
1545 Value *Stride = nullptr;
1546 if (IID == Intrinsic::experimental_vp_strided_store ||
1547 IID == Intrinsic::experimental_vp_strided_load) {
1548 Stride = VPI->getOperand(PtrOpNo + 1);
1549 // Use the pointer alignment as the element alignment if the stride is a
1550 // mutiple of the pointer alignment. Otherwise, the element alignment
1551 // should be Align(1).
1552 unsigned PointerAlign = Alignment.valueOrOne().value();
1553 if (!isa<ConstantInt>(Stride) ||
1554 cast<ConstantInt>(Stride)->getZExtValue() % PointerAlign != 0)
1555 Alignment = Align(1);
1556 }
1557 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
1558 VPI->getMaskParam(), VPI->getVectorLengthParam(),
1559 Stride);
1560 break;
1561 }
1562 case Intrinsic::vp_gather:
1563 case Intrinsic::vp_scatter: {
1564 auto *VPI = cast<VPIntrinsic>(CI);
1565 unsigned IID = CI->getIntrinsicID();
1566 bool IsWrite = IID == Intrinsic::vp_scatter;
1567 if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
1568 return;
1569 unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
1570 Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
1571 MaybeAlign Alignment = VPI->getPointerAlignment();
1572 Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
1573 VPI->getMaskParam(),
1574 VPI->getVectorLengthParam());
1575 break;
1576 }
1577 default:
1578 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1579 MemIntrinsicInfo IntrInfo;
1580 if (TTI->getTgtMemIntrinsic(II, IntrInfo))
1581 Interesting = IntrInfo.InterestingOperands;
1582 return;
1583 }
1584 for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
1585 if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
1586 ignoreAccess(I, CI->getArgOperand(ArgNo)))
1587 continue;
1588 Type *Ty = CI->getParamByValType(ArgNo);
1589 Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
1590 }
1591 }
1592 }
1593}
1594
1595static bool isPointerOperand(Value *V) {
1596 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V);
1597}
1598
1599// This is a rough heuristic; it may cause both false positives and
1600// false negatives. The proper implementation requires cooperation with
1601// the frontend.
1603 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) {
1604 if (!Cmp->isRelational())
1605 return false;
1606 } else {
1607 return false;
1608 }
1609 return isPointerOperand(I->getOperand(0)) &&
1610 isPointerOperand(I->getOperand(1));
1611}
1612
1613// This is a rough heuristic; it may cause both false positives and
1614// false negatives. The proper implementation requires cooperation with
1615// the frontend.
1618 if (BO->getOpcode() != Instruction::Sub)
1619 return false;
1620 } else {
1621 return false;
1622 }
1623 return isPointerOperand(I->getOperand(0)) &&
1624 isPointerOperand(I->getOperand(1));
1625}
1626
1627bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
1628 // If a global variable does not have dynamic initialization we don't
1629 // have to instrument it. However, if a global does not have initializer
1630 // at all, we assume it has dynamic initializer (in other TU).
1631 if (!G->hasInitializer())
1632 return false;
1633
1634 if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().IsDynInit)
1635 return false;
1636
1637 return true;
1638}
1639
1640void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
1641 Instruction *I, RuntimeCallInserter &RTCI) {
1642 IRBuilder<> IRB(I);
1643 FunctionCallee F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
1644 Value *Param[2] = {I->getOperand(0), I->getOperand(1)};
1645 for (Value *&i : Param) {
1646 if (i->getType()->isPointerTy())
1647 i = IRB.CreatePointerCast(i, IntptrTy);
1648 }
1649 RTCI.createRuntimeCall(IRB, F, Param);
1650}
1651
1652static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
1653 Instruction *InsertBefore, Value *Addr,
1654 MaybeAlign Alignment, unsigned Granularity,
1655 TypeSize TypeStoreSize, bool IsWrite,
1656 Value *SizeArgument, bool UseCalls,
1657 uint32_t Exp, RuntimeCallInserter &RTCI) {
1658 // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
1659 // if the data is properly aligned.
1660 if (!TypeStoreSize.isScalable()) {
1661 const auto FixedSize = TypeStoreSize.getFixedValue();
1662 switch (FixedSize) {
1663 case 8:
1664 case 16:
1665 case 32:
1666 case 64:
1667 case 128:
1668 if (!Alignment || *Alignment >= Granularity ||
1669 *Alignment >= FixedSize / 8)
1670 return Pass->instrumentAddress(I, InsertBefore, Addr, Alignment,
1671 FixedSize, IsWrite, nullptr, UseCalls,
1672 Exp, RTCI);
1673 }
1674 }
1675 Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeStoreSize,
1676 IsWrite, nullptr, UseCalls, Exp, RTCI);
1677}
1678
1679void AddressSanitizer::instrumentMaskedLoadOrStore(
1680 AddressSanitizer *Pass, const DataLayout &DL, Type *IntptrTy, Value *Mask,
1681 Value *EVL, Value *Stride, Instruction *I, Value *Addr,
1682 MaybeAlign Alignment, unsigned Granularity, Type *OpType, bool IsWrite,
1683 Value *SizeArgument, bool UseCalls, uint32_t Exp,
1684 RuntimeCallInserter &RTCI) {
1685 auto *VTy = cast<VectorType>(OpType);
1686 TypeSize ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType());
1687 auto Zero = ConstantInt::get(IntptrTy, 0);
1688
1689 IRBuilder IB(I);
1690 Instruction *LoopInsertBefore = I;
1691 if (EVL) {
1692 // The end argument of SplitBlockAndInsertForLane is assumed bigger
1693 // than zero, so we should check whether EVL is zero here.
1694 Type *EVLType = EVL->getType();
1695 Value *IsEVLZero = IB.CreateICmpNE(EVL, ConstantInt::get(EVLType, 0));
1696 LoopInsertBefore = SplitBlockAndInsertIfThen(IsEVLZero, I, false);
1697 IB.SetInsertPoint(LoopInsertBefore);
1698 // Cast EVL to IntptrTy.
1699 EVL = IB.CreateZExtOrTrunc(EVL, IntptrTy);
1700 // To avoid undefined behavior for extracting with out of range index, use
1701 // the minimum of evl and element count as trip count.
1702 Value *EC = IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1703 EVL = IB.CreateBinaryIntrinsic(Intrinsic::umin, EVL, EC);
1704 } else {
1705 EVL = IB.CreateElementCount(IntptrTy, VTy->getElementCount());
1706 }
1707
1708 // Cast Stride to IntptrTy.
1709 if (Stride)
1710 Stride = IB.CreateZExtOrTrunc(Stride, IntptrTy);
1711
1712 SplitBlockAndInsertForEachLane(EVL, LoopInsertBefore->getIterator(),
1713 [&](IRBuilderBase &IRB, Value *Index) {
1714 Value *MaskElem = IRB.CreateExtractElement(Mask, Index);
1715 if (auto *MaskElemC = dyn_cast<ConstantInt>(MaskElem)) {
1716 if (MaskElemC->isZero())
1717 // No check
1718 return;
1719 // Unconditional check
1720 } else {
1721 // Conditional check
1722 Instruction *ThenTerm = SplitBlockAndInsertIfThen(
1723 MaskElem, &*IRB.GetInsertPoint(), false);
1724 IRB.SetInsertPoint(ThenTerm);
1725 }
1726
1727 Value *InstrumentedAddress;
1728 if (isa<VectorType>(Addr->getType())) {
1729 assert(
1730 cast<VectorType>(Addr->getType())->getElementType()->isPointerTy() &&
1731 "Expected vector of pointer.");
1732 InstrumentedAddress = IRB.CreateExtractElement(Addr, Index);
1733 } else if (Stride) {
1734 Index = IRB.CreateMul(Index, Stride);
1735 InstrumentedAddress = IRB.CreatePtrAdd(Addr, Index);
1736 } else {
1737 InstrumentedAddress = IRB.CreateGEP(VTy, Addr, {Zero, Index});
1738 }
1739 doInstrumentAddress(Pass, I, &*IRB.GetInsertPoint(), InstrumentedAddress,
1740 Alignment, Granularity, ElemTypeSize, IsWrite,
1741 SizeArgument, UseCalls, Exp, RTCI);
1742 });
1743}
1744
1745void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis,
1746 InterestingMemoryOperand &O, bool UseCalls,
1747 const DataLayout &DL,
1748 RuntimeCallInserter &RTCI) {
1749 Value *Addr = O.getPtr();
1750
1751 // Optimization experiments.
1752 // The experiments can be used to evaluate potential optimizations that remove
1753 // instrumentation (assess false negatives). Instead of completely removing
1754 // some instrumentation, you set Exp to a non-zero value (mask of optimization
1755 // experiments that want to remove instrumentation of this instruction).
1756 // If Exp is non-zero, this pass will emit special calls into runtime
1757 // (e.g. __asan_report_exp_load1 instead of __asan_report_load1). These calls
1758 // make runtime terminate the program in a special way (with a different
1759 // exit status). Then you run the new compiler on a buggy corpus, collect
1760 // the special terminations (ideally, you don't see them at all -- no false
1761 // negatives) and make the decision on the optimization.
1762 uint32_t Exp = ClForceExperiment;
1763
1764 if (ClOpt && ClOptGlobals) {
1765 // If initialization order checking is disabled, a simple access to a
1766 // dynamically initialized global is always valid.
1768 if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) &&
1769 isSafeAccess(ObjSizeVis, Addr, O.TypeStoreSize)) {
1770 NumOptimizedAccessesToGlobalVar++;
1771 return;
1772 }
1773 }
1774
1775 if (ClOpt && ClOptStack) {
1776 // A direct inbounds access to a stack variable is always valid.
1778 isSafeAccess(ObjSizeVis, Addr, O.TypeStoreSize)) {
1779 NumOptimizedAccessesToStackVar++;
1780 return;
1781 }
1782 }
1783
1784 if (O.IsWrite)
1785 NumInstrumentedWrites++;
1786 else
1787 NumInstrumentedReads++;
1788
1789 if (O.MaybeByteOffset) {
1790 Type *Ty = Type::getInt8Ty(*C);
1791 IRBuilder IB(O.getInsn());
1792
1793 Value *OffsetOp = O.MaybeByteOffset;
1794 if (TargetTriple.isRISCV()) {
1795 Type *OffsetTy = OffsetOp->getType();
1796 // RVV indexed loads/stores zero-extend offset operands which are narrower
1797 // than XLEN to XLEN.
1798 if (OffsetTy->getScalarType()->getIntegerBitWidth() <
1799 static_cast<unsigned>(LongSize)) {
1800 VectorType *OrigType = cast<VectorType>(OffsetTy);
1801 Type *ExtendTy = VectorType::get(IntptrTy, OrigType);
1802 OffsetOp = IB.CreateZExt(OffsetOp, ExtendTy);
1803 }
1804 }
1805 Addr = IB.CreateGEP(Ty, Addr, {OffsetOp});
1806 }
1807
1808 unsigned Granularity = 1 << Mapping.Scale;
1809 if (O.MaybeMask) {
1810 instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.MaybeEVL,
1811 O.MaybeStride, O.getInsn(), Addr, O.Alignment,
1812 Granularity, O.OpType, O.IsWrite, nullptr,
1813 UseCalls, Exp, RTCI);
1814 } else {
1815 doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment,
1816 Granularity, O.TypeStoreSize, O.IsWrite, nullptr,
1817 UseCalls, Exp, RTCI);
1818 }
1819}
1820
1821Instruction *AddressSanitizer::generateCrashCode(Instruction *InsertBefore,
1822 Value *Addr, bool IsWrite,
1823 size_t AccessSizeIndex,
1824 Value *SizeArgument,
1825 uint32_t Exp,
1826 RuntimeCallInserter &RTCI) {
1827 InstrumentationIRBuilder IRB(InsertBefore);
1828 Value *ExpVal = Exp == 0 ? nullptr : ConstantInt::get(IRB.getInt32Ty(), Exp);
1829 CallInst *Call = nullptr;
1830 if (SizeArgument) {
1831 if (Exp == 0)
1832 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][0],
1833 {Addr, SizeArgument});
1834 else
1835 Call = RTCI.createRuntimeCall(IRB, AsanErrorCallbackSized[IsWrite][1],
1836 {Addr, SizeArgument, ExpVal});
1837 } else {
1838 if (Exp == 0)
1839 Call = RTCI.createRuntimeCall(
1840 IRB, AsanErrorCallback[IsWrite][0][AccessSizeIndex], Addr);
1841 else
1842 Call = RTCI.createRuntimeCall(
1843 IRB, AsanErrorCallback[IsWrite][1][AccessSizeIndex], {Addr, ExpVal});
1844 }
1845
1847 return Call;
1848}
1849
1850Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
1851 Value *ShadowValue,
1852 uint32_t TypeStoreSize) {
1853 size_t Granularity = static_cast<size_t>(1) << Mapping.Scale;
1854 // Addr & (Granularity - 1)
1855 Value *LastAccessedByte =
1856 IRB.CreateAnd(AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
1857 // (Addr & (Granularity - 1)) + size - 1
1858 if (TypeStoreSize / 8 > 1)
1859 LastAccessedByte = IRB.CreateAdd(
1860 LastAccessedByte, ConstantInt::get(IntptrTy, TypeStoreSize / 8 - 1));
1861 // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
1862 LastAccessedByte =
1863 IRB.CreateIntCast(LastAccessedByte, ShadowValue->getType(), false);
1864 // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
1865 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
1866}
1867
1868Instruction *AddressSanitizer::instrumentAMDGPUAddress(
1869 Instruction *OrigIns, Instruction *InsertBefore, Value *Addr,
1870 uint32_t TypeStoreSize, bool IsWrite, Value *SizeArgument) {
1871 // Do not instrument unsupported addrspaces.
1873 return nullptr;
1874 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
1875 // Follow host instrumentation for global and constant addresses.
1876 if (PtrTy->getPointerAddressSpace() != 0)
1877 return InsertBefore;
1878 // Instrument generic addresses in supported addressspaces.
1879 IRBuilder<> IRB(InsertBefore);
1880 Value *IsShared = IRB.CreateCall(AMDGPUAddressShared, {Addr});
1881 Value *IsPrivate = IRB.CreateCall(AMDGPUAddressPrivate, {Addr});
1882 Value *IsSharedOrPrivate = IRB.CreateOr(IsShared, IsPrivate);
1883 Value *Cmp = IRB.CreateNot(IsSharedOrPrivate);
1884 Value *AddrSpaceZeroLanding =
1885 SplitBlockAndInsertIfThen(Cmp, InsertBefore, false);
1886 InsertBefore = cast<Instruction>(AddrSpaceZeroLanding);
1887 return InsertBefore;
1888}
1889
1890Instruction *AddressSanitizer::genAMDGPUReportBlock(IRBuilder<> &IRB,
1891 Value *Cond, bool Recover) {
1892 Module &M = *IRB.GetInsertBlock()->getModule();
1893 Value *ReportCond = Cond;
1894 if (!Recover) {
1895 auto Ballot = M.getOrInsertFunction(kAMDGPUBallotName, IRB.getInt64Ty(),
1896 IRB.getInt1Ty());
1897 ReportCond = IRB.CreateIsNotNull(IRB.CreateCall(Ballot, {Cond}));
1898 }
1899
1900 auto *Trm =
1901 SplitBlockAndInsertIfThen(ReportCond, &*IRB.GetInsertPoint(), false,
1903 Trm->getParent()->setName("asan.report");
1904
1905 if (Recover)
1906 return Trm;
1907
1908 Trm = SplitBlockAndInsertIfThen(Cond, Trm, false);
1909 IRB.SetInsertPoint(Trm);
1910 return IRB.CreateCall(
1911 M.getOrInsertFunction(kAMDGPUUnreachableName, IRB.getVoidTy()), {});
1912}
1913
1914void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
1915 Instruction *InsertBefore, Value *Addr,
1916 MaybeAlign Alignment,
1917 uint32_t TypeStoreSize, bool IsWrite,
1918 Value *SizeArgument, bool UseCalls,
1919 uint32_t Exp,
1920 RuntimeCallInserter &RTCI) {
1921 if (TargetTriple.isAMDGPU()) {
1922 InsertBefore = instrumentAMDGPUAddress(OrigIns, InsertBefore, Addr,
1923 TypeStoreSize, IsWrite, SizeArgument);
1924 if (!InsertBefore)
1925 return;
1926 }
1927
1928 InstrumentationIRBuilder IRB(InsertBefore);
1929 size_t AccessSizeIndex = TypeStoreSizeToSizeIndex(TypeStoreSize);
1930
1931 if (UseCalls && ClOptimizeCallbacks) {
1932 const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
1933 IRB.CreateIntrinsic(Intrinsic::asan_check_memaccess, {},
1934 {IRB.CreatePointerCast(Addr, PtrTy),
1935 ConstantInt::get(Int32Ty, AccessInfo.Packed)});
1936 return;
1937 }
1938
1939 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
1940 if (UseCalls) {
1941 if (Exp == 0)
1942 RTCI.createRuntimeCall(
1943 IRB, AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex], AddrLong);
1944 else
1945 RTCI.createRuntimeCall(
1946 IRB, AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
1947 {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)});
1948 return;
1949 }
1950
1951 Type *ShadowTy =
1952 IntegerType::get(*C, std::max(8U, TypeStoreSize >> Mapping.Scale));
1953 Type *ShadowPtrTy = PointerType::get(*C, 0);
1954 Value *ShadowPtr = memToShadow(AddrLong, IRB);
1955 const uint64_t ShadowAlign =
1956 std::max<uint64_t>(Alignment.valueOrOne().value() >> Mapping.Scale, 1);
1957 Value *ShadowValue = IRB.CreateAlignedLoad(
1958 ShadowTy, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy), Align(ShadowAlign));
1959
1960 Value *Cmp = IRB.CreateIsNotNull(ShadowValue);
1961 size_t Granularity = 1ULL << Mapping.Scale;
1962 Instruction *CrashTerm = nullptr;
1963
1964 bool GenSlowPath = (ClAlwaysSlowPath || (TypeStoreSize < 8 * Granularity));
1965
1966 if (TargetTriple.isAMDGCN()) {
1967 if (GenSlowPath) {
1968 auto *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1969 Cmp = IRB.CreateAnd(Cmp, Cmp2);
1970 }
1971 CrashTerm = genAMDGPUReportBlock(IRB, Cmp, Recover);
1972 } else if (GenSlowPath) {
1973 // We use branch weights for the slow path check, to indicate that the slow
1974 // path is rarely taken. This seems to be the case for SPEC benchmarks.
1976 Cmp, InsertBefore, false, MDBuilder(*C).createUnlikelyBranchWeights());
1977 assert(cast<BranchInst>(CheckTerm)->isUnconditional());
1978 BasicBlock *NextBB = CheckTerm->getSuccessor(0);
1979 IRB.SetInsertPoint(CheckTerm);
1980 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeStoreSize);
1981 if (Recover) {
1982 CrashTerm = SplitBlockAndInsertIfThen(Cmp2, CheckTerm, false);
1983 } else {
1984 BasicBlock *CrashBlock =
1985 BasicBlock::Create(*C, "", NextBB->getParent(), NextBB);
1986 CrashTerm = new UnreachableInst(*C, CrashBlock);
1987 BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2);
1988 ReplaceInstWithInst(CheckTerm, NewTerm);
1989 }
1990 } else {
1991 CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, !Recover);
1992 }
1993
1994 Instruction *Crash = generateCrashCode(
1995 CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument, Exp, RTCI);
1996 if (OrigIns->getDebugLoc())
1997 Crash->setDebugLoc(OrigIns->getDebugLoc());
1998}
1999
2000// Instrument unusual size or unusual alignment.
2001// We can not do it with a single check, so we do 1-byte check for the first
2002// and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
2003// to report the actual access size.
2004void AddressSanitizer::instrumentUnusualSizeOrAlignment(
2005 Instruction *I, Instruction *InsertBefore, Value *Addr,
2006 TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls,
2007 uint32_t Exp, RuntimeCallInserter &RTCI) {
2008 InstrumentationIRBuilder IRB(InsertBefore);
2009 Value *NumBits = IRB.CreateTypeSize(IntptrTy, TypeStoreSize);
2010 Value *Size = IRB.CreateLShr(NumBits, ConstantInt::get(IntptrTy, 3));
2011
2012 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
2013 if (UseCalls) {
2014 if (Exp == 0)
2015 RTCI.createRuntimeCall(IRB, AsanMemoryAccessCallbackSized[IsWrite][0],
2016 {AddrLong, Size});
2017 else
2018 RTCI.createRuntimeCall(
2019 IRB, AsanMemoryAccessCallbackSized[IsWrite][1],
2020 {AddrLong, Size, ConstantInt::get(IRB.getInt32Ty(), Exp)});
2021 } else {
2022 Value *SizeMinusOne = IRB.CreateSub(Size, ConstantInt::get(IntptrTy, 1));
2023 Value *LastByte = IRB.CreateIntToPtr(
2024 IRB.CreateAdd(AddrLong, SizeMinusOne),
2025 Addr->getType());
2026 instrumentAddress(I, InsertBefore, Addr, {}, 8, IsWrite, Size, false, Exp,
2027 RTCI);
2028 instrumentAddress(I, InsertBefore, LastByte, {}, 8, IsWrite, Size, false,
2029 Exp, RTCI);
2030 }
2031}
2032
2033void ModuleAddressSanitizer::poisonOneInitializer(Function &GlobalInit) {
2034 // Set up the arguments to our poison/unpoison functions.
2035 IRBuilder<> IRB(&GlobalInit.front(),
2036 GlobalInit.front().getFirstInsertionPt());
2037
2038 // Add a call to poison all external globals before the given function starts.
2039 Value *ModuleNameAddr =
2040 ConstantExpr::getPointerCast(getOrCreateModuleName(), IntptrTy);
2041 IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr);
2042
2043 // Add calls to unpoison all globals before each return instruction.
2044 for (auto &BB : GlobalInit)
2046 CallInst::Create(AsanUnpoisonGlobals, "", RI->getIterator());
2047}
2048
2049void ModuleAddressSanitizer::createInitializerPoisonCalls() {
2050 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
2051 if (!GV)
2052 return;
2053
2055 if (!CA)
2056 return;
2057
2058 for (Use &OP : CA->operands()) {
2059 if (isa<ConstantAggregateZero>(OP)) continue;
2061
2062 // Must have a function or null ptr.
2063 if (Function *F = dyn_cast<Function>(CS->getOperand(1))) {
2064 if (F->getName() == kAsanModuleCtorName) continue;
2065 auto *Priority = cast<ConstantInt>(CS->getOperand(0));
2066 // Don't instrument CTORs that will run before asan.module_ctor.
2067 if (Priority->getLimitedValue() <= GetCtorAndDtorPriority(TargetTriple))
2068 continue;
2069 poisonOneInitializer(*F);
2070 }
2071 }
2072}
2073
2074const GlobalVariable *
2075ModuleAddressSanitizer::getExcludedAliasedGlobal(const GlobalAlias &GA) const {
2076 // In case this function should be expanded to include rules that do not just
2077 // apply when CompileKernel is true, either guard all existing rules with an
2078 // 'if (CompileKernel) { ... }' or be absolutely sure that all these rules
2079 // should also apply to user space.
2080 assert(CompileKernel && "Only expecting to be called when compiling kernel");
2081
2082 const Constant *C = GA.getAliasee();
2083
2084 // When compiling the kernel, globals that are aliased by symbols prefixed
2085 // by "__" are special and cannot be padded with a redzone.
2086 if (GA.getName().starts_with("__"))
2087 return dyn_cast<GlobalVariable>(C->stripPointerCastsAndAliases());
2088
2089 return nullptr;
2090}
2091
2092bool ModuleAddressSanitizer::shouldInstrumentGlobal(GlobalVariable *G) const {
2093 Type *Ty = G->getValueType();
2094 LLVM_DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
2095
2096 if (G->hasSanitizerMetadata() && G->getSanitizerMetadata().NoAddress)
2097 return false;
2098 if (!Ty->isSized()) return false;
2099 if (!G->hasInitializer()) return false;
2100 // Globals in address space 1 and 4 are supported for AMDGPU.
2101 if (G->getAddressSpace() &&
2102 !(TargetTriple.isAMDGPU() && !isUnsupportedAMDGPUAddrspace(G)))
2103 return false;
2104 if (GlobalWasGeneratedByCompiler(G)) return false; // Our own globals.
2105 // Two problems with thread-locals:
2106 // - The address of the main thread's copy can't be computed at link-time.
2107 // - Need to poison all copies, not just the main thread's one.
2108 if (G->isThreadLocal()) return false;
2109 // For now, just ignore this Global if the alignment is large.
2110 if (G->getAlign() && *G->getAlign() > getMinRedzoneSizeForGlobal()) return false;
2111
2112 // For non-COFF targets, only instrument globals known to be defined by this
2113 // TU.
2114 // FIXME: We can instrument comdat globals on ELF if we are using the
2115 // GC-friendly metadata scheme.
2116 if (!TargetTriple.isOSBinFormatCOFF()) {
2117 if (!G->hasExactDefinition() || G->hasComdat())
2118 return false;
2119 } else {
2120 // On COFF, don't instrument non-ODR linkages.
2121 if (G->isInterposable())
2122 return false;
2123 // If the global has AvailableExternally linkage, then it is not in this
2124 // module, which means it does not need to be instrumented.
2125 if (G->hasAvailableExternallyLinkage())
2126 return false;
2127 }
2128
2129 // If a comdat is present, it must have a selection kind that implies ODR
2130 // semantics: no duplicates, any, or exact match.
2131 if (Comdat *C = G->getComdat()) {
2132 switch (C->getSelectionKind()) {
2133 case Comdat::Any:
2134 case Comdat::ExactMatch:
2136 break;
2137 case Comdat::Largest:
2138 case Comdat::SameSize:
2139 return false;
2140 }
2141 }
2142
2143 if (G->hasSection()) {
2144 // The kernel uses explicit sections for mostly special global variables
2145 // that we should not instrument. E.g. the kernel may rely on their layout
2146 // without redzones, or remove them at link time ("discard.*"), etc.
2147 if (CompileKernel)
2148 return false;
2149
2150 StringRef Section = G->getSection();
2151
2152 // Globals from llvm.metadata aren't emitted, do not instrument them.
2153 if (Section == "llvm.metadata") return false;
2154 // Do not instrument globals from special LLVM sections.
2155 if (Section.contains("__llvm") || Section.contains("__LLVM"))
2156 return false;
2157
2158 // Do not instrument function pointers to initialization and termination
2159 // routines: dynamic linker will not properly handle redzones.
2160 if (Section.starts_with(".preinit_array") ||
2161 Section.starts_with(".init_array") ||
2162 Section.starts_with(".fini_array")) {
2163 return false;
2164 }
2165
2166 // Do not instrument user-defined sections (with names resembling
2167 // valid C identifiers)
2168 if (TargetTriple.isOSBinFormatELF()) {
2169 if (llvm::all_of(Section,
2170 [](char c) { return llvm::isAlnum(c) || c == '_'; }))
2171 return false;
2172 }
2173
2174 // On COFF, if the section name contains '$', it is highly likely that the
2175 // user is using section sorting to create an array of globals similar to
2176 // the way initialization callbacks are registered in .init_array and
2177 // .CRT$XCU. The ATL also registers things in .ATL$__[azm]. Adding redzones
2178 // to such globals is counterproductive, because the intent is that they
2179 // will form an array, and out-of-bounds accesses are expected.
2180 // See https://github.com/google/sanitizers/issues/305
2181 // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
2182 if (TargetTriple.isOSBinFormatCOFF() && Section.contains('$')) {
2183 LLVM_DEBUG(dbgs() << "Ignoring global in sorted section (contains '$'): "
2184 << *G << "\n");
2185 return false;
2186 }
2187
2188 if (TargetTriple.isOSBinFormatMachO()) {
2189 StringRef ParsedSegment, ParsedSection;
2190 unsigned TAA = 0, StubSize = 0;
2191 bool TAAParsed;
2193 Section, ParsedSegment, ParsedSection, TAA, TAAParsed, StubSize));
2194
2195 // Ignore the globals from the __OBJC section. The ObjC runtime assumes
2196 // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
2197 // them.
2198 if (ParsedSegment == "__OBJC" ||
2199 (ParsedSegment == "__DATA" && ParsedSection.starts_with("__objc_"))) {
2200 LLVM_DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
2201 return false;
2202 }
2203 // See https://github.com/google/sanitizers/issues/32
2204 // Constant CFString instances are compiled in the following way:
2205 // -- the string buffer is emitted into
2206 // __TEXT,__cstring,cstring_literals
2207 // -- the constant NSConstantString structure referencing that buffer
2208 // is placed into __DATA,__cfstring
2209 // Therefore there's no point in placing redzones into __DATA,__cfstring.
2210 // Moreover, it causes the linker to crash on OS X 10.7
2211 if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") {
2212 LLVM_DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n");
2213 return false;
2214 }
2215 // The linker merges the contents of cstring_literals and removes the
2216 // trailing zeroes.
2217 if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) {
2218 LLVM_DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
2219 return false;
2220 }
2221 }
2222 }
2223
2224 if (CompileKernel) {
2225 // Globals that prefixed by "__" are special and cannot be padded with a
2226 // redzone.
2227 if (G->getName().starts_with("__"))
2228 return false;
2229 }
2230
2231 return true;
2232}
2233
2234// On Mach-O platforms, we emit global metadata in a separate section of the
2235// binary in order to allow the linker to properly dead strip. This is only
2236// supported on recent versions of ld64.
2237bool ModuleAddressSanitizer::ShouldUseMachOGlobalsSection() const {
2238 if (!TargetTriple.isOSBinFormatMachO())
2239 return false;
2240
2241 if (TargetTriple.isMacOSX() && !TargetTriple.isMacOSXVersionLT(10, 11))
2242 return true;
2243 if (TargetTriple.isiOS() /* or tvOS */ && !TargetTriple.isOSVersionLT(9))
2244 return true;
2245 if (TargetTriple.isWatchOS() && !TargetTriple.isOSVersionLT(2))
2246 return true;
2247 if (TargetTriple.isDriverKit())
2248 return true;
2249 if (TargetTriple.isXROS())
2250 return true;
2251
2252 return false;
2253}
2254
2255StringRef ModuleAddressSanitizer::getGlobalMetadataSection() const {
2256 switch (TargetTriple.getObjectFormat()) {
2257 case Triple::COFF: return ".ASAN$GL";
2258 case Triple::ELF: return "asan_globals";
2259 case Triple::MachO: return "__DATA,__asan_globals,regular";
2260 case Triple::Wasm:
2261 case Triple::GOFF:
2262 case Triple::SPIRV:
2263 case Triple::XCOFF:
2266 "ModuleAddressSanitizer not implemented for object file format");
2268 break;
2269 }
2270 llvm_unreachable("unsupported object format");
2271}
2272
2273void ModuleAddressSanitizer::initializeCallbacks() {
2274 IRBuilder<> IRB(*C);
2275
2276 // Declare our poisoning and unpoisoning functions.
2277 AsanPoisonGlobals =
2278 M.getOrInsertFunction(kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy);
2279 AsanUnpoisonGlobals =
2280 M.getOrInsertFunction(kAsanUnpoisonGlobalsName, IRB.getVoidTy());
2281
2282 // Declare functions that register/unregister globals.
2283 AsanRegisterGlobals = M.getOrInsertFunction(
2284 kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2285 AsanUnregisterGlobals = M.getOrInsertFunction(
2286 kAsanUnregisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy);
2287
2288 // Declare the functions that find globals in a shared object and then invoke
2289 // the (un)register function on them.
2290 AsanRegisterImageGlobals = M.getOrInsertFunction(
2291 kAsanRegisterImageGlobalsName, IRB.getVoidTy(), IntptrTy);
2292 AsanUnregisterImageGlobals = M.getOrInsertFunction(
2294
2295 AsanRegisterElfGlobals =
2296 M.getOrInsertFunction(kAsanRegisterElfGlobalsName, IRB.getVoidTy(),
2297 IntptrTy, IntptrTy, IntptrTy);
2298 AsanUnregisterElfGlobals =
2299 M.getOrInsertFunction(kAsanUnregisterElfGlobalsName, IRB.getVoidTy(),
2300 IntptrTy, IntptrTy, IntptrTy);
2301}
2302
2303// Put the metadata and the instrumented global in the same group. This ensures
2304// that the metadata is discarded if the instrumented global is discarded.
2305void ModuleAddressSanitizer::SetComdatForGlobalMetadata(
2306 GlobalVariable *G, GlobalVariable *Metadata, StringRef InternalSuffix) {
2307 Module &M = *G->getParent();
2308 Comdat *C = G->getComdat();
2309 if (!C) {
2310 if (!G->hasName()) {
2311 // If G is unnamed, it must be internal. Give it an artificial name
2312 // so we can put it in a comdat.
2313 assert(G->hasLocalLinkage());
2314 G->setName(genName("anon_global"));
2315 }
2316
2317 if (!InternalSuffix.empty() && G->hasLocalLinkage()) {
2318 std::string Name = std::string(G->getName());
2319 Name += InternalSuffix;
2320 C = M.getOrInsertComdat(Name);
2321 } else {
2322 C = M.getOrInsertComdat(G->getName());
2323 }
2324
2325 // Make this IMAGE_COMDAT_SELECT_NODUPLICATES on COFF. Also upgrade private
2326 // linkage to internal linkage so that a symbol table entry is emitted. This
2327 // is necessary in order to create the comdat group.
2328 if (TargetTriple.isOSBinFormatCOFF()) {
2329 C->setSelectionKind(Comdat::NoDeduplicate);
2330 if (G->hasPrivateLinkage())
2331 G->setLinkage(GlobalValue::InternalLinkage);
2332 }
2333 G->setComdat(C);
2334 }
2335
2336 assert(G->hasComdat());
2337 Metadata->setComdat(G->getComdat());
2338}
2339
2340// Create a separate metadata global and put it in the appropriate ASan
2341// global registration section.
2343ModuleAddressSanitizer::CreateMetadataGlobal(Constant *Initializer,
2344 StringRef OriginalName) {
2345 auto Linkage = TargetTriple.isOSBinFormatMachO()
2349 M, Initializer->getType(), false, Linkage, Initializer,
2350 Twine("__asan_global_") + GlobalValue::dropLLVMManglingEscape(OriginalName));
2351 Metadata->setSection(getGlobalMetadataSection());
2352 // Place metadata in a large section for x86-64 ELF binaries to mitigate
2353 // relocation pressure.
2355 return Metadata;
2356}
2357
2358Instruction *ModuleAddressSanitizer::CreateAsanModuleDtor() {
2359 AsanDtorFunction = Function::createWithDefaultAttr(
2362 AsanDtorFunction->addFnAttr(Attribute::NoUnwind);
2363 // Ensure Dtor cannot be discarded, even if in a comdat.
2364 appendToUsed(M, {AsanDtorFunction});
2365 BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction);
2366
2367 return ReturnInst::Create(*C, AsanDtorBB);
2368}
2369
2370void ModuleAddressSanitizer::InstrumentGlobalsCOFF(
2371 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2372 ArrayRef<Constant *> MetadataInitializers) {
2373 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2374 auto &DL = M.getDataLayout();
2375
2376 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2377 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2378 Constant *Initializer = MetadataInitializers[i];
2379 GlobalVariable *G = ExtendedGlobals[i];
2380 GlobalVariable *Metadata = CreateMetadataGlobal(Initializer, G->getName());
2381 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2382 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2383 MetadataGlobals[i] = Metadata;
2384
2385 // The MSVC linker always inserts padding when linking incrementally. We
2386 // cope with that by aligning each struct to its size, which must be a power
2387 // of two.
2388 unsigned SizeOfGlobalStruct = DL.getTypeAllocSize(Initializer->getType());
2389 assert(isPowerOf2_32(SizeOfGlobalStruct) &&
2390 "global metadata will not be padded appropriately");
2391 Metadata->setAlignment(assumeAligned(SizeOfGlobalStruct));
2392
2393 SetComdatForGlobalMetadata(G, Metadata, "");
2394 }
2395
2396 // Update llvm.compiler.used, adding the new metadata globals. This is
2397 // needed so that during LTO these variables stay alive.
2398 if (!MetadataGlobals.empty())
2399 appendToCompilerUsed(M, MetadataGlobals);
2400}
2401
2402void ModuleAddressSanitizer::instrumentGlobalsELF(
2403 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2404 ArrayRef<Constant *> MetadataInitializers,
2405 const std::string &UniqueModuleId) {
2406 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2407
2408 // Putting globals in a comdat changes the semantic and potentially cause
2409 // false negative odr violations at link time. If odr indicators are used, we
2410 // keep the comdat sections, as link time odr violations will be dectected on
2411 // the odr indicator symbols.
2412 bool UseComdatForGlobalsGC = UseOdrIndicator && !UniqueModuleId.empty();
2413
2414 SmallVector<GlobalValue *, 16> MetadataGlobals(ExtendedGlobals.size());
2415 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2416 GlobalVariable *G = ExtendedGlobals[i];
2418 CreateMetadataGlobal(MetadataInitializers[i], G->getName());
2419 MDNode *MD = MDNode::get(M.getContext(), ValueAsMetadata::get(G));
2420 Metadata->setMetadata(LLVMContext::MD_associated, MD);
2421 MetadataGlobals[i] = Metadata;
2422
2423 if (UseComdatForGlobalsGC)
2424 SetComdatForGlobalMetadata(G, Metadata, UniqueModuleId);
2425 }
2426
2427 // Update llvm.compiler.used, adding the new metadata globals. This is
2428 // needed so that during LTO these variables stay alive.
2429 if (!MetadataGlobals.empty())
2430 appendToCompilerUsed(M, MetadataGlobals);
2431
2432 // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2433 // to look up the loaded image that contains it. Second, we can store in it
2434 // whether registration has already occurred, to prevent duplicate
2435 // registration.
2436 //
2437 // Common linkage ensures that there is only one global per shared library.
2438 GlobalVariable *RegisteredFlag = new GlobalVariable(
2439 M, IntptrTy, false, GlobalVariable::CommonLinkage,
2440 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2442
2443 // Create start and stop symbols.
2444 GlobalVariable *StartELFMetadata = new GlobalVariable(
2445 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2446 "__start_" + getGlobalMetadataSection());
2448 GlobalVariable *StopELFMetadata = new GlobalVariable(
2449 M, IntptrTy, false, GlobalVariable::ExternalWeakLinkage, nullptr,
2450 "__stop_" + getGlobalMetadataSection());
2452
2453 // Create a call to register the globals with the runtime.
2454 if (ConstructorKind == AsanCtorKind::Global)
2455 IRB.CreateCall(AsanRegisterElfGlobals,
2456 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2457 IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2458 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2459
2460 // We also need to unregister globals at the end, e.g., when a shared library
2461 // gets closed.
2462 if (DestructorKind != AsanDtorKind::None && !MetadataGlobals.empty()) {
2463 IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2464 IrbDtor.CreateCall(AsanUnregisterElfGlobals,
2465 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy),
2466 IRB.CreatePointerCast(StartELFMetadata, IntptrTy),
2467 IRB.CreatePointerCast(StopELFMetadata, IntptrTy)});
2468 }
2469}
2470
2471void ModuleAddressSanitizer::InstrumentGlobalsMachO(
2472 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2473 ArrayRef<Constant *> MetadataInitializers) {
2474 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2475
2476 // On recent Mach-O platforms, use a structure which binds the liveness of
2477 // the global variable to the metadata struct. Keep the list of "Liveness" GV
2478 // created to be added to llvm.compiler.used
2479 StructType *LivenessTy = StructType::get(IntptrTy, IntptrTy);
2480 SmallVector<GlobalValue *, 16> LivenessGlobals(ExtendedGlobals.size());
2481
2482 for (size_t i = 0; i < ExtendedGlobals.size(); i++) {
2483 Constant *Initializer = MetadataInitializers[i];
2484 GlobalVariable *G = ExtendedGlobals[i];
2485 GlobalVariable *Metadata = CreateMetadataGlobal(Initializer, G->getName());
2486
2487 // On recent Mach-O platforms, we emit the global metadata in a way that
2488 // allows the linker to properly strip dead globals.
2489 auto LivenessBinder =
2490 ConstantStruct::get(LivenessTy, Initializer->getAggregateElement(0u),
2492 GlobalVariable *Liveness = new GlobalVariable(
2493 M, LivenessTy, false, GlobalVariable::InternalLinkage, LivenessBinder,
2494 Twine("__asan_binder_") + G->getName());
2495 Liveness->setSection("__DATA,__asan_liveness,regular,live_support");
2496 LivenessGlobals[i] = Liveness;
2497 }
2498
2499 // Update llvm.compiler.used, adding the new liveness globals. This is
2500 // needed so that during LTO these variables stay alive. The alternative
2501 // would be to have the linker handling the LTO symbols, but libLTO
2502 // current API does not expose access to the section for each symbol.
2503 if (!LivenessGlobals.empty())
2504 appendToCompilerUsed(M, LivenessGlobals);
2505
2506 // RegisteredFlag serves two purposes. First, we can pass it to dladdr()
2507 // to look up the loaded image that contains it. Second, we can store in it
2508 // whether registration has already occurred, to prevent duplicate
2509 // registration.
2510 //
2511 // common linkage ensures that there is only one global per shared library.
2512 GlobalVariable *RegisteredFlag = new GlobalVariable(
2513 M, IntptrTy, false, GlobalVariable::CommonLinkage,
2514 ConstantInt::get(IntptrTy, 0), kAsanGlobalsRegisteredFlagName);
2516
2517 if (ConstructorKind == AsanCtorKind::Global)
2518 IRB.CreateCall(AsanRegisterImageGlobals,
2519 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2520
2521 // We also need to unregister globals at the end, e.g., when a shared library
2522 // gets closed.
2523 if (DestructorKind != AsanDtorKind::None) {
2524 IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2525 IrbDtor.CreateCall(AsanUnregisterImageGlobals,
2526 {IRB.CreatePointerCast(RegisteredFlag, IntptrTy)});
2527 }
2528}
2529
2530void ModuleAddressSanitizer::InstrumentGlobalsWithMetadataArray(
2531 IRBuilder<> &IRB, ArrayRef<GlobalVariable *> ExtendedGlobals,
2532 ArrayRef<Constant *> MetadataInitializers) {
2533 assert(ExtendedGlobals.size() == MetadataInitializers.size());
2534 unsigned N = ExtendedGlobals.size();
2535 assert(N > 0);
2536
2537 // On platforms that don't have a custom metadata section, we emit an array
2538 // of global metadata structures.
2539 ArrayType *ArrayOfGlobalStructTy =
2540 ArrayType::get(MetadataInitializers[0]->getType(), N);
2541 auto AllGlobals = new GlobalVariable(
2542 M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage,
2543 ConstantArray::get(ArrayOfGlobalStructTy, MetadataInitializers), "");
2544 if (Mapping.Scale > 3)
2545 AllGlobals->setAlignment(Align(1ULL << Mapping.Scale));
2546
2547 if (ConstructorKind == AsanCtorKind::Global)
2548 IRB.CreateCall(AsanRegisterGlobals,
2549 {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2550 ConstantInt::get(IntptrTy, N)});
2551
2552 // We also need to unregister globals at the end, e.g., when a shared library
2553 // gets closed.
2554 if (DestructorKind != AsanDtorKind::None) {
2555 IRBuilder<> IrbDtor(CreateAsanModuleDtor());
2556 IrbDtor.CreateCall(AsanUnregisterGlobals,
2557 {IRB.CreatePointerCast(AllGlobals, IntptrTy),
2558 ConstantInt::get(IntptrTy, N)});
2559 }
2560}
2561
2562// This function replaces all global variables with new variables that have
2563// trailing redzones. It also creates a function that poisons
2564// redzones and inserts this function into llvm.global_ctors.
2565// Sets *CtorComdat to true if the global registration code emitted into the
2566// asan constructor is comdat-compatible.
2567void ModuleAddressSanitizer::instrumentGlobals(IRBuilder<> &IRB,
2568 bool *CtorComdat) {
2569 // Build set of globals that are aliased by some GA, where
2570 // getExcludedAliasedGlobal(GA) returns the relevant GlobalVariable.
2571 SmallPtrSet<const GlobalVariable *, 16> AliasedGlobalExclusions;
2572 if (CompileKernel) {
2573 for (auto &GA : M.aliases()) {
2574 if (const GlobalVariable *GV = getExcludedAliasedGlobal(GA))
2575 AliasedGlobalExclusions.insert(GV);
2576 }
2577 }
2578
2579 SmallVector<GlobalVariable *, 16> GlobalsToChange;
2580 for (auto &G : M.globals()) {
2581 if (!AliasedGlobalExclusions.count(&G) && shouldInstrumentGlobal(&G))
2582 GlobalsToChange.push_back(&G);
2583 }
2584
2585 size_t n = GlobalsToChange.size();
2586 auto &DL = M.getDataLayout();
2587
2588 // A global is described by a structure
2589 // size_t beg;
2590 // size_t size;
2591 // size_t size_with_redzone;
2592 // const char *name;
2593 // const char *module_name;
2594 // size_t has_dynamic_init;
2595 // size_t padding_for_windows_msvc_incremental_link;
2596 // size_t odr_indicator;
2597 // We initialize an array of such structures and pass it to a run-time call.
2598 StructType *GlobalStructTy =
2599 StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy,
2600 IntptrTy, IntptrTy, IntptrTy);
2602 SmallVector<Constant *, 16> Initializers(n);
2603
2604 for (size_t i = 0; i < n; i++) {
2605 GlobalVariable *G = GlobalsToChange[i];
2606
2608 if (G->hasSanitizerMetadata())
2609 MD = G->getSanitizerMetadata();
2610
2611 // The runtime library tries demangling symbol names in the descriptor but
2612 // functionality like __cxa_demangle may be unavailable (e.g.
2613 // -static-libstdc++). So we demangle the symbol names here.
2614 std::string NameForGlobal = G->getName().str();
2617 /*AllowMerging*/ true, genName("global"));
2618
2619 Type *Ty = G->getValueType();
2620 const uint64_t SizeInBytes = DL.getTypeAllocSize(Ty);
2621 const uint64_t RightRedzoneSize = getRedzoneSizeForGlobal(SizeInBytes);
2622 Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
2623
2624 StructType *NewTy = StructType::get(Ty, RightRedZoneTy);
2625 Constant *NewInitializer = ConstantStruct::get(
2626 NewTy, G->getInitializer(), Constant::getNullValue(RightRedZoneTy));
2627
2628 // Create a new global variable with enough space for a redzone.
2629 GlobalValue::LinkageTypes Linkage = G->getLinkage();
2630 if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage)
2632 GlobalVariable *NewGlobal = new GlobalVariable(
2633 M, NewTy, G->isConstant(), Linkage, NewInitializer, "", G,
2634 G->getThreadLocalMode(), G->getAddressSpace());
2635 NewGlobal->copyAttributesFrom(G);
2636 NewGlobal->setComdat(G->getComdat());
2637 NewGlobal->setAlignment(Align(getMinRedzoneSizeForGlobal()));
2638 // Don't fold globals with redzones. ODR violation detector and redzone
2639 // poisoning implicitly creates a dependence on the global's address, so it
2640 // is no longer valid for it to be marked unnamed_addr.
2642
2643 // Move null-terminated C strings to "__asan_cstring" section on Darwin.
2644 if (TargetTriple.isOSBinFormatMachO() && !G->hasSection() &&
2645 G->isConstant()) {
2646 auto Seq = dyn_cast<ConstantDataSequential>(G->getInitializer());
2647 if (Seq && Seq->isCString())
2648 NewGlobal->setSection("__TEXT,__asan_cstring,regular");
2649 }
2650
2651 // Transfer the debug info and type metadata. The payload starts at offset
2652 // zero so we can copy the metadata over as is.
2653 NewGlobal->copyMetadata(G, 0);
2654
2655 Value *Indices2[2];
2656 Indices2[0] = IRB.getInt32(0);
2657 Indices2[1] = IRB.getInt32(0);
2658
2660 ConstantExpr::getGetElementPtr(NewTy, NewGlobal, Indices2, true));
2661 NewGlobal->takeName(G);
2662 G->eraseFromParent();
2663 NewGlobals[i] = NewGlobal;
2664
2665 Constant *ODRIndicator = ConstantPointerNull::get(PtrTy);
2666 GlobalValue *InstrumentedGlobal = NewGlobal;
2667
2668 bool CanUsePrivateAliases =
2669 TargetTriple.isOSBinFormatELF() || TargetTriple.isOSBinFormatMachO() ||
2670 TargetTriple.isOSBinFormatWasm();
2671 if (CanUsePrivateAliases && UsePrivateAlias) {
2672 // Create local alias for NewGlobal to avoid crash on ODR between
2673 // instrumented and non-instrumented libraries.
2674 InstrumentedGlobal =
2676 }
2677
2678 // ODR should not happen for local linkage.
2679 if (NewGlobal->hasLocalLinkage()) {
2680 ODRIndicator =
2681 ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, -1), PtrTy);
2682 } else if (UseOdrIndicator) {
2683 // With local aliases, we need to provide another externally visible
2684 // symbol __odr_asan_XXX to detect ODR violation.
2685 auto *ODRIndicatorSym =
2686 new GlobalVariable(M, IRB.getInt8Ty(), false, Linkage,
2688 kODRGenPrefix + NameForGlobal, nullptr,
2689 NewGlobal->getThreadLocalMode());
2690
2691 // Set meaningful attributes for indicator symbol.
2692 ODRIndicatorSym->setVisibility(NewGlobal->getVisibility());
2693 ODRIndicatorSym->setDLLStorageClass(NewGlobal->getDLLStorageClass());
2694 ODRIndicatorSym->setAlignment(Align(1));
2695 ODRIndicator = ODRIndicatorSym;
2696 }
2697
2698 Constant *Initializer = ConstantStruct::get(
2699 GlobalStructTy,
2700 ConstantExpr::getPointerCast(InstrumentedGlobal, IntptrTy),
2701 ConstantInt::get(IntptrTy, SizeInBytes),
2702 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
2703 ConstantExpr::getPointerCast(Name, IntptrTy),
2704 ConstantExpr::getPointerCast(getOrCreateModuleName(), IntptrTy),
2705 ConstantInt::get(IntptrTy, MD.IsDynInit),
2706 Constant::getNullValue(IntptrTy),
2707 ConstantExpr::getPointerCast(ODRIndicator, IntptrTy));
2708
2709 LLVM_DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
2710
2711 Initializers[i] = Initializer;
2712 }
2713
2714 // Add instrumented globals to llvm.compiler.used list to avoid LTO from
2715 // ConstantMerge'ing them.
2716 SmallVector<GlobalValue *, 16> GlobalsToAddToUsedList;
2717 for (size_t i = 0; i < n; i++) {
2718 GlobalVariable *G = NewGlobals[i];
2719 if (G->getName().empty()) continue;
2720 GlobalsToAddToUsedList.push_back(G);
2721 }
2722 appendToCompilerUsed(M, ArrayRef<GlobalValue *>(GlobalsToAddToUsedList));
2723
2724 if (UseGlobalsGC && TargetTriple.isOSBinFormatELF()) {
2725 // Use COMDAT and register globals even if n == 0 to ensure that (a) the
2726 // linkage unit will only have one module constructor, and (b) the register
2727 // function will be called. The module destructor is not created when n ==
2728 // 0.
2729 *CtorComdat = true;
2730 instrumentGlobalsELF(IRB, NewGlobals, Initializers, getUniqueModuleId(&M));
2731 } else if (n == 0) {
2732 // When UseGlobalsGC is false, COMDAT can still be used if n == 0, because
2733 // all compile units will have identical module constructor/destructor.
2734 *CtorComdat = TargetTriple.isOSBinFormatELF();
2735 } else {
2736 *CtorComdat = false;
2737 if (UseGlobalsGC && TargetTriple.isOSBinFormatCOFF()) {
2738 InstrumentGlobalsCOFF(IRB, NewGlobals, Initializers);
2739 } else if (UseGlobalsGC && ShouldUseMachOGlobalsSection()) {
2740 InstrumentGlobalsMachO(IRB, NewGlobals, Initializers);
2741 } else {
2742 InstrumentGlobalsWithMetadataArray(IRB, NewGlobals, Initializers);
2743 }
2744 }
2745
2746 // Create calls for poisoning before initializers run and unpoisoning after.
2747 if (ClInitializers)
2748 createInitializerPoisonCalls();
2749
2750 LLVM_DEBUG(dbgs() << M);
2751}
2752
2753uint64_t
2754ModuleAddressSanitizer::getRedzoneSizeForGlobal(uint64_t SizeInBytes) const {
2755 constexpr uint64_t kMaxRZ = 1 << 18;
2756 const uint64_t MinRZ = getMinRedzoneSizeForGlobal();
2757
2758 uint64_t RZ = 0;
2759 if (SizeInBytes <= MinRZ / 2) {
2760 // Reduce redzone size for small size objects, e.g. int, char[1]. MinRZ is
2761 // at least 32 bytes, optimize when SizeInBytes is less than or equal to
2762 // half of MinRZ.
2763 RZ = MinRZ - SizeInBytes;
2764 } else {
2765 // Calculate RZ, where MinRZ <= RZ <= MaxRZ, and RZ ~ 1/4 * SizeInBytes.
2766 RZ = std::clamp((SizeInBytes / MinRZ / 4) * MinRZ, MinRZ, kMaxRZ);
2767
2768 // Round up to multiple of MinRZ.
2769 if (SizeInBytes % MinRZ)
2770 RZ += MinRZ - (SizeInBytes % MinRZ);
2771 }
2772
2773 assert((RZ + SizeInBytes) % MinRZ == 0);
2774
2775 return RZ;
2776}
2777
2778int ModuleAddressSanitizer::GetAsanVersion() const {
2779 int LongSize = M.getDataLayout().getPointerSizeInBits();
2780 bool isAndroid = M.getTargetTriple().isAndroid();
2781 int Version = 8;
2782 // 32-bit Android is one version ahead because of the switch to dynamic
2783 // shadow.
2784 Version += (LongSize == 32 && isAndroid);
2785 return Version;
2786}
2787
2788GlobalVariable *ModuleAddressSanitizer::getOrCreateModuleName() {
2789 if (!ModuleName) {
2790 // We shouldn't merge same module names, as this string serves as unique
2791 // module ID in runtime.
2792 ModuleName =
2793 createPrivateGlobalForString(M, M.getModuleIdentifier(),
2794 /*AllowMerging*/ false, genName("module"));
2795 }
2796 return ModuleName;
2797}
2798
2799bool ModuleAddressSanitizer::instrumentModule() {
2800 initializeCallbacks();
2801
2802 for (Function &F : M)
2803 removeASanIncompatibleFnAttributes(F, /*ReadsArgMem=*/false);
2804
2805 // Create a module constructor. A destructor is created lazily because not all
2806 // platforms, and not all modules need it.
2807 if (ConstructorKind == AsanCtorKind::Global) {
2808 if (CompileKernel) {
2809 // The kernel always builds with its own runtime, and therefore does not
2810 // need the init and version check calls.
2811 AsanCtorFunction = createSanitizerCtor(M, kAsanModuleCtorName);
2812 } else {
2813 std::string AsanVersion = std::to_string(GetAsanVersion());
2814 std::string VersionCheckName =
2815 InsertVersionCheck ? (kAsanVersionCheckNamePrefix + AsanVersion) : "";
2816 std::tie(AsanCtorFunction, std::ignore) =
2818 M, kAsanModuleCtorName, kAsanInitName, /*InitArgTypes=*/{},
2819 /*InitArgs=*/{}, VersionCheckName);
2820 }
2821 }
2822
2823 bool CtorComdat = true;
2824 if (ClGlobals) {
2825 assert(AsanCtorFunction || ConstructorKind == AsanCtorKind::None);
2826 if (AsanCtorFunction) {
2827 IRBuilder<> IRB(AsanCtorFunction->getEntryBlock().getTerminator());
2828 instrumentGlobals(IRB, &CtorComdat);
2829 } else {
2830 IRBuilder<> IRB(*C);
2831 instrumentGlobals(IRB, &CtorComdat);
2832 }
2833 }
2834
2835 const uint64_t Priority = GetCtorAndDtorPriority(TargetTriple);
2836
2837 // Put the constructor and destructor in comdat if both
2838 // (1) global instrumentation is not TU-specific
2839 // (2) target is ELF.
2840 if (UseCtorComdat && TargetTriple.isOSBinFormatELF() && CtorComdat) {
2841 if (AsanCtorFunction) {
2842 AsanCtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleCtorName));
2843 appendToGlobalCtors(M, AsanCtorFunction, Priority, AsanCtorFunction);
2844 }
2845 if (AsanDtorFunction) {
2846 AsanDtorFunction->setComdat(M.getOrInsertComdat(kAsanModuleDtorName));
2847 appendToGlobalDtors(M, AsanDtorFunction, Priority, AsanDtorFunction);
2848 }
2849 } else {
2850 if (AsanCtorFunction)
2851 appendToGlobalCtors(M, AsanCtorFunction, Priority);
2852 if (AsanDtorFunction)
2853 appendToGlobalDtors(M, AsanDtorFunction, Priority);
2854 }
2855
2856 return true;
2857}
2858
2859void AddressSanitizer::initializeCallbacks(const TargetLibraryInfo *TLI) {
2860 IRBuilder<> IRB(*C);
2861 // Create __asan_report* callbacks.
2862 // IsWrite, TypeSize and Exp are encoded in the function name.
2863 for (int Exp = 0; Exp < 2; Exp++) {
2864 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
2865 const std::string TypeStr = AccessIsWrite ? "store" : "load";
2866 const std::string ExpStr = Exp ? "exp_" : "";
2867 const std::string EndingStr = Recover ? "_noabort" : "";
2868
2869 SmallVector<Type *, 3> Args2 = {IntptrTy, IntptrTy};
2870 SmallVector<Type *, 2> Args1{1, IntptrTy};
2871 AttributeList AL2;
2872 AttributeList AL1;
2873 if (Exp) {
2874 Type *ExpType = Type::getInt32Ty(*C);
2875 Args2.push_back(ExpType);
2876 Args1.push_back(ExpType);
2877 if (auto AK = TLI->getExtAttrForI32Param(false)) {
2878 AL2 = AL2.addParamAttribute(*C, 2, AK);
2879 AL1 = AL1.addParamAttribute(*C, 1, AK);
2880 }
2881 }
2882 AsanErrorCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2883 kAsanReportErrorTemplate + ExpStr + TypeStr + "_n" + EndingStr,
2884 FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
2885
2886 AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] = M.getOrInsertFunction(
2887 ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr,
2888 FunctionType::get(IRB.getVoidTy(), Args2, false), AL2);
2889
2890 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
2891 AccessSizeIndex++) {
2892 const std::string Suffix = TypeStr + itostr(1ULL << AccessSizeIndex);
2893 AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2894 M.getOrInsertFunction(
2895 kAsanReportErrorTemplate + ExpStr + Suffix + EndingStr,
2896 FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
2897
2898 AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] =
2899 M.getOrInsertFunction(
2900 ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr,
2901 FunctionType::get(IRB.getVoidTy(), Args1, false), AL1);
2902 }
2903 }
2904 }
2905
2906 const std::string MemIntrinCallbackPrefix =
2907 (CompileKernel && !ClKasanMemIntrinCallbackPrefix)
2908 ? std::string("")
2910 AsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
2911 PtrTy, PtrTy, PtrTy, IntptrTy);
2912 AsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy", PtrTy,
2913 PtrTy, PtrTy, IntptrTy);
2914 AsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
2915 TLI->getAttrList(C, {1}, /*Signed=*/false),
2916 PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
2917
2918 AsanHandleNoReturnFunc =
2919 M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy());
2920
2921 AsanPtrCmpFunction =
2922 M.getOrInsertFunction(kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy);
2923 AsanPtrSubFunction =
2924 M.getOrInsertFunction(kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy);
2925 if (Mapping.InGlobal)
2926 AsanShadowGlobal = M.getOrInsertGlobal("__asan_shadow",
2927 ArrayType::get(IRB.getInt8Ty(), 0));
2928
2929 AMDGPUAddressShared =
2930 M.getOrInsertFunction(kAMDGPUAddressSharedName, IRB.getInt1Ty(), PtrTy);
2931 AMDGPUAddressPrivate =
2932 M.getOrInsertFunction(kAMDGPUAddressPrivateName, IRB.getInt1Ty(), PtrTy);
2933}
2934
2935bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
2936 // For each NSObject descendant having a +load method, this method is invoked
2937 // by the ObjC runtime before any of the static constructors is called.
2938 // Therefore we need to instrument such methods with a call to __asan_init
2939 // at the beginning in order to initialize our runtime before any access to
2940 // the shadow memory.
2941 // We cannot just ignore these methods, because they may call other
2942 // instrumented functions.
2943 if (F.getName().contains(" load]")) {
2944 FunctionCallee AsanInitFunction =
2945 declareSanitizerInitFunction(*F.getParent(), kAsanInitName, {});
2946 IRBuilder<> IRB(&F.front(), F.front().begin());
2947 IRB.CreateCall(AsanInitFunction, {});
2948 return true;
2949 }
2950 return false;
2951}
2952
2953bool AddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) {
2954 // Generate code only when dynamic addressing is needed.
2955 if (Mapping.Offset != kDynamicShadowSentinel)
2956 return false;
2957
2958 IRBuilder<> IRB(&F.front().front());
2959 if (Mapping.InGlobal) {
2961 // An empty inline asm with input reg == output reg.
2962 // An opaque pointer-to-int cast, basically.
2964 FunctionType::get(IntptrTy, {AsanShadowGlobal->getType()}, false),
2965 StringRef(""), StringRef("=r,0"),
2966 /*hasSideEffects=*/false);
2967 LocalDynamicShadow =
2968 IRB.CreateCall(Asm, {AsanShadowGlobal}, ".asan.shadow");
2969 } else {
2970 LocalDynamicShadow =
2971 IRB.CreatePointerCast(AsanShadowGlobal, IntptrTy, ".asan.shadow");
2972 }
2973 } else {
2974 Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
2976 LocalDynamicShadow = IRB.CreateLoad(IntptrTy, GlobalDynamicAddress);
2977 }
2978 return true;
2979}
2980
2981void AddressSanitizer::markEscapedLocalAllocas(Function &F) {
2982 // Find the one possible call to llvm.localescape and pre-mark allocas passed
2983 // to it as uninteresting. This assumes we haven't started processing allocas
2984 // yet. This check is done up front because iterating the use list in
2985 // isInterestingAlloca would be algorithmically slower.
2986 assert(ProcessedAllocas.empty() && "must process localescape before allocas");
2987
2988 // Try to get the declaration of llvm.localescape. If it's not in the module,
2989 // we can exit early.
2990 if (!F.getParent()->getFunction("llvm.localescape")) return;
2991
2992 // Look for a call to llvm.localescape call in the entry block. It can't be in
2993 // any other block.
2994 for (Instruction &I : F.getEntryBlock()) {
2996 if (II && II->getIntrinsicID() == Intrinsic::localescape) {
2997 // We found a call. Mark all the allocas passed in as uninteresting.
2998 for (Value *Arg : II->args()) {
2999 AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
3000 assert(AI && AI->isStaticAlloca() &&
3001 "non-static alloca arg to localescape");
3002 ProcessedAllocas[AI] = false;
3003 }
3004 break;
3005 }
3006 }
3007}
3008
3009bool AddressSanitizer::suppressInstrumentationSiteForDebug(int &Instrumented) {
3010 bool ShouldInstrument =
3011 ClDebugMin < 0 || ClDebugMax < 0 ||
3012 (Instrumented >= ClDebugMin && Instrumented <= ClDebugMax);
3013 Instrumented++;
3014 return !ShouldInstrument;
3015}
3016
3017bool AddressSanitizer::instrumentFunction(Function &F,
3018 const TargetLibraryInfo *TLI,
3019 const TargetTransformInfo *TTI) {
3020 bool FunctionModified = false;
3021
3022 // Do not apply any instrumentation for naked functions.
3023 if (F.hasFnAttribute(Attribute::Naked))
3024 return FunctionModified;
3025
3026 // If needed, insert __asan_init before checking for SanitizeAddress attr.
3027 // This function needs to be called even if the function body is not
3028 // instrumented.
3029 if (maybeInsertAsanInitAtFunctionEntry(F))
3030 FunctionModified = true;
3031
3032 // Leave if the function doesn't need instrumentation.
3033 if (!F.hasFnAttribute(Attribute::SanitizeAddress)) return FunctionModified;
3034
3035 if (F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
3036 return FunctionModified;
3037
3038 LLVM_DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
3039
3040 initializeCallbacks(TLI);
3041
3042 FunctionStateRAII CleanupObj(this);
3043
3044 RuntimeCallInserter RTCI(F);
3045
3046 FunctionModified |= maybeInsertDynamicShadowAtFunctionEntry(F);
3047
3048 // We can't instrument allocas used with llvm.localescape. Only static allocas
3049 // can be passed to that intrinsic.
3050 markEscapedLocalAllocas(F);
3051
3052 // We want to instrument every address only once per basic block (unless there
3053 // are calls between uses).
3054 SmallPtrSet<Value *, 16> TempsToInstrument;
3055 SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
3056 SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
3057 SmallVector<Instruction *, 8> NoReturnCalls;
3059 SmallVector<Instruction *, 16> PointerComparisonsOrSubtracts;
3060
3061 // Fill the set of memory operations to instrument.
3062 for (auto &BB : F) {
3063 AllBlocks.push_back(&BB);
3064 TempsToInstrument.clear();
3065 int NumInsnsPerBB = 0;
3066 for (auto &Inst : BB) {
3067 if (LooksLikeCodeInBug11395(&Inst)) return false;
3068 // Skip instructions inserted by another instrumentation.
3069 if (Inst.hasMetadata(LLVMContext::MD_nosanitize))
3070 continue;
3071 SmallVector<InterestingMemoryOperand, 1> InterestingOperands;
3072 getInterestingMemoryOperands(&Inst, InterestingOperands, TTI);
3073
3074 if (!InterestingOperands.empty()) {
3075 for (auto &Operand : InterestingOperands) {
3076 if (ClOpt && ClOptSameTemp) {
3077 Value *Ptr = Operand.getPtr();
3078 // If we have a mask, skip instrumentation if we've already
3079 // instrumented the full object. But don't add to TempsToInstrument
3080 // because we might get another load/store with a different mask.
3081 if (Operand.MaybeMask) {
3082 if (TempsToInstrument.count(Ptr))
3083 continue; // We've seen this (whole) temp in the current BB.
3084 } else {
3085 if (!TempsToInstrument.insert(Ptr).second)
3086 continue; // We've seen this temp in the current BB.
3087 }
3088 }
3089 OperandsToInstrument.push_back(Operand);
3090 NumInsnsPerBB++;
3091 }
3092 } else if (((ClInvalidPointerPairs || ClInvalidPointerCmp) &&
3096 PointerComparisonsOrSubtracts.push_back(&Inst);
3097 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst)) {
3098 // ok, take it.
3099 IntrinToInstrument.push_back(MI);
3100 NumInsnsPerBB++;
3101 } else {
3102 if (auto *CB = dyn_cast<CallBase>(&Inst)) {
3103 // A call inside BB.
3104 TempsToInstrument.clear();
3105 if (CB->doesNotReturn())
3106 NoReturnCalls.push_back(CB);
3107 }
3108 if (CallInst *CI = dyn_cast<CallInst>(&Inst))
3110 }
3111 if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break;
3112 }
3113 }
3114
3115 bool UseCalls = (InstrumentationWithCallsThreshold >= 0 &&
3116 OperandsToInstrument.size() + IntrinToInstrument.size() >
3117 (unsigned)InstrumentationWithCallsThreshold);
3118 const DataLayout &DL = F.getDataLayout();
3119 ObjectSizeOffsetVisitor ObjSizeVis(DL, TLI, F.getContext());
3120
3121 // Instrument.
3122 int NumInstrumented = 0;
3123 for (auto &Operand : OperandsToInstrument) {
3124 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3125 instrumentMop(ObjSizeVis, Operand, UseCalls,
3126 F.getDataLayout(), RTCI);
3127 FunctionModified = true;
3128 }
3129 for (auto *Inst : IntrinToInstrument) {
3130 if (!suppressInstrumentationSiteForDebug(NumInstrumented))
3131 instrumentMemIntrinsic(Inst, RTCI);
3132 FunctionModified = true;
3133 }
3134
3135 FunctionStackPoisoner FSP(F, *this, RTCI);
3136 bool ChangedStack = FSP.runOnFunction();
3137
3138 // We must unpoison the stack before NoReturn calls (throw, _exit, etc).
3139 // See e.g. https://github.com/google/sanitizers/issues/37
3140 for (auto *CI : NoReturnCalls) {
3141 IRBuilder<> IRB(CI);
3142 RTCI.createRuntimeCall(IRB, AsanHandleNoReturnFunc, {});
3143 }
3144
3145 for (auto *Inst : PointerComparisonsOrSubtracts) {
3146 instrumentPointerComparisonOrSubtraction(Inst, RTCI);
3147 FunctionModified = true;
3148 }
3149
3150 if (ChangedStack || !NoReturnCalls.empty())
3151 FunctionModified = true;
3152
3153 LLVM_DEBUG(dbgs() << "ASAN done instrumenting: " << FunctionModified << " "
3154 << F << "\n");
3155
3156 return FunctionModified;
3157}
3158
3159// Workaround for bug 11395: we don't want to instrument stack in functions
3160// with large assembly blobs (32-bit only), otherwise reg alloc may crash.
3161// FIXME: remove once the bug 11395 is fixed.
3162bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
3163 if (LongSize != 32) return false;
3165 if (!CI || !CI->isInlineAsm()) return false;
3166 if (CI->arg_size() <= 5)
3167 return false;
3168 // We have inline assembly with quite a few arguments.
3169 return true;
3170}
3171
3172void FunctionStackPoisoner::initializeCallbacks(Module &M) {
3173 IRBuilder<> IRB(*C);
3174 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always ||
3175 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
3176 const char *MallocNameTemplate =
3177 ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Always
3180 for (int Index = 0; Index <= kMaxAsanStackMallocSizeClass; Index++) {
3181 std::string Suffix = itostr(Index);
3182 AsanStackMallocFunc[Index] = M.getOrInsertFunction(
3183 MallocNameTemplate + Suffix, IntptrTy, IntptrTy);
3184 AsanStackFreeFunc[Index] =
3185 M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix,
3186 IRB.getVoidTy(), IntptrTy, IntptrTy);
3187 }
3188 }
3189 if (ASan.UseAfterScope) {
3190 AsanPoisonStackMemoryFunc = M.getOrInsertFunction(
3191 kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
3192 AsanUnpoisonStackMemoryFunc = M.getOrInsertFunction(
3193 kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy);
3194 }
3195
3196 for (size_t Val : {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0xf1, 0xf2,
3197 0xf3, 0xf5, 0xf8}) {
3198 std::ostringstream Name;
3200 Name << std::setw(2) << std::setfill('0') << std::hex << Val;
3201 AsanSetShadowFunc[Val] =
3202 M.getOrInsertFunction(Name.str(), IRB.getVoidTy(), IntptrTy, IntptrTy);
3203 }
3204
3205 AsanAllocaPoisonFunc = M.getOrInsertFunction(
3206 kAsanAllocaPoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
3207 AsanAllocasUnpoisonFunc = M.getOrInsertFunction(
3208 kAsanAllocasUnpoison, IRB.getVoidTy(), IntptrTy, IntptrTy);
3209}
3210
3211void FunctionStackPoisoner::copyToShadowInline(ArrayRef<uint8_t> ShadowMask,
3212 ArrayRef<uint8_t> ShadowBytes,
3213 size_t Begin, size_t End,
3214 IRBuilder<> &IRB,
3215 Value *ShadowBase) {
3216 if (Begin >= End)
3217 return;
3218
3219 const size_t LargestStoreSizeInBytes =
3220 std::min<size_t>(sizeof(uint64_t), ASan.LongSize / 8);
3221
3222 const bool IsLittleEndian = F.getDataLayout().isLittleEndian();
3223
3224 // Poison given range in shadow using larges store size with out leading and
3225 // trailing zeros in ShadowMask. Zeros never change, so they need neither
3226 // poisoning nor up-poisoning. Still we don't mind if some of them get into a
3227 // middle of a store.
3228 for (size_t i = Begin; i < End;) {
3229 if (!ShadowMask[i]) {
3230 assert(!ShadowBytes[i]);
3231 ++i;
3232 continue;
3233 }
3234
3235 size_t StoreSizeInBytes = LargestStoreSizeInBytes;
3236 // Fit store size into the range.
3237 while (StoreSizeInBytes > End - i)
3238 StoreSizeInBytes /= 2;
3239
3240 // Minimize store size by trimming trailing zeros.
3241 for (size_t j = StoreSizeInBytes - 1; j && !ShadowMask[i + j]; --j) {
3242 while (j <= StoreSizeInBytes / 2)
3243 StoreSizeInBytes /= 2;
3244 }
3245
3246 uint64_t Val = 0;
3247 for (size_t j = 0; j < StoreSizeInBytes; j++) {
3248 if (IsLittleEndian)
3249 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
3250 else
3251 Val = (Val << 8) | ShadowBytes[i + j];
3252 }
3253
3254 Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
3255 Value *Poison = IRB.getIntN(StoreSizeInBytes * 8, Val);
3258 Align(1));
3259
3260 i += StoreSizeInBytes;
3261 }
3262}
3263
3264void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
3265 ArrayRef<uint8_t> ShadowBytes,
3266 IRBuilder<> &IRB, Value *ShadowBase) {
3267 copyToShadow(ShadowMask, ShadowBytes, 0, ShadowMask.size(), IRB, ShadowBase);
3268}
3269
3270void FunctionStackPoisoner::copyToShadow(ArrayRef<uint8_t> ShadowMask,
3271 ArrayRef<uint8_t> ShadowBytes,
3272 size_t Begin, size_t End,
3273 IRBuilder<> &IRB, Value *ShadowBase) {
3274 assert(ShadowMask.size() == ShadowBytes.size());
3275 size_t Done = Begin;
3276 for (size_t i = Begin, j = Begin + 1; i < End; i = j++) {
3277 if (!ShadowMask[i]) {
3278 assert(!ShadowBytes[i]);
3279 continue;
3280 }
3281 uint8_t Val = ShadowBytes[i];
3282 if (!AsanSetShadowFunc[Val])
3283 continue;
3284
3285 // Skip same values.
3286 for (; j < End && ShadowMask[j] && Val == ShadowBytes[j]; ++j) {
3287 }
3288
3289 if (j - i >= ASan.MaxInlinePoisoningSize) {
3290 copyToShadowInline(ShadowMask, ShadowBytes, Done, i, IRB, ShadowBase);
3291 RTCI.createRuntimeCall(
3292 IRB, AsanSetShadowFunc[Val],
3293 {IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i)),
3294 ConstantInt::get(IntptrTy, j - i)});
3295 Done = j;
3296 }
3297 }
3298
3299 copyToShadowInline(ShadowMask, ShadowBytes, Done, End, IRB, ShadowBase);
3300}
3301
3302// Fake stack allocator (asan_fake_stack.h) has 11 size classes
3303// for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass
3304static int StackMallocSizeClass(uint64_t LocalStackSize) {
3305 assert(LocalStackSize <= kMaxStackMallocSize);
3306 uint64_t MaxSize = kMinStackMallocSize;
3307 for (int i = 0;; i++, MaxSize *= 2)
3308 if (LocalStackSize <= MaxSize) return i;
3309 llvm_unreachable("impossible LocalStackSize");
3310}
3311
3312void FunctionStackPoisoner::copyArgsPassedByValToAllocas() {
3313 Instruction *CopyInsertPoint = &F.front().front();
3314 if (CopyInsertPoint == ASan.LocalDynamicShadow) {
3315 // Insert after the dynamic shadow location is determined
3316 CopyInsertPoint = CopyInsertPoint->getNextNode();
3317 assert(CopyInsertPoint);
3318 }
3319 IRBuilder<> IRB(CopyInsertPoint);
3320 const DataLayout &DL = F.getDataLayout();
3321 for (Argument &Arg : F.args()) {
3322 if (Arg.hasByValAttr()) {
3323 Type *Ty = Arg.getParamByValType();
3324 const Align Alignment =
3325 DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty);
3326
3327 AllocaInst *AI = IRB.CreateAlloca(
3328 Ty, nullptr,
3329 (Arg.hasName() ? Arg.getName() : "Arg" + Twine(Arg.getArgNo())) +
3330 ".byval");
3331 AI->setAlignment(Alignment);
3332 Arg.replaceAllUsesWith(AI);
3333
3334 uint64_t AllocSize = DL.getTypeAllocSize(Ty);
3335 IRB.CreateMemCpy(AI, Alignment, &Arg, Alignment, AllocSize);
3336 }
3337 }
3338}
3339
3340PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond,
3341 Value *ValueIfTrue,
3342 Instruction *ThenTerm,
3343 Value *ValueIfFalse) {
3344 PHINode *PHI = IRB.CreatePHI(IntptrTy, 2);
3345 BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent();
3346 PHI->addIncoming(ValueIfFalse, CondBlock);
3347 BasicBlock *ThenBlock = ThenTerm->getParent();
3348 PHI->addIncoming(ValueIfTrue, ThenBlock);
3349 return PHI;
3350}
3351
3352Value *FunctionStackPoisoner::createAllocaForLayout(
3353 IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) {
3354 AllocaInst *Alloca;
3355 if (Dynamic) {
3356 Alloca = IRB.CreateAlloca(IRB.getInt8Ty(),
3357 ConstantInt::get(IRB.getInt64Ty(), L.FrameSize),
3358 "MyAlloca");
3359 } else {
3360 Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize),
3361 nullptr, "MyAlloca");
3362 assert(Alloca->isStaticAlloca());
3363 }
3364 assert((ClRealignStack & (ClRealignStack - 1)) == 0);
3365 uint64_t FrameAlignment = std::max(L.FrameAlignment, uint64_t(ClRealignStack));
3366 Alloca->setAlignment(Align(FrameAlignment));
3367 return IRB.CreatePointerCast(Alloca, IntptrTy);
3368}
3369
3370void FunctionStackPoisoner::createDynamicAllocasInitStorage() {
3371 BasicBlock &FirstBB = *F.begin();
3372 IRBuilder<> IRB(dyn_cast<Instruction>(FirstBB.begin()));
3373 DynamicAllocaLayout = IRB.CreateAlloca(IntptrTy, nullptr);
3374 IRB.CreateStore(Constant::getNullValue(IntptrTy), DynamicAllocaLayout);
3375 DynamicAllocaLayout->setAlignment(Align(32));
3376}
3377
3378void FunctionStackPoisoner::processDynamicAllocas() {
3379 if (!ClInstrumentDynamicAllocas || DynamicAllocaVec.empty()) {
3380 assert(DynamicAllocaPoisonCallVec.empty());
3381 return;
3382 }
3383
3384 // Insert poison calls for lifetime intrinsics for dynamic allocas.
3385 for (const auto &APC : DynamicAllocaPoisonCallVec) {
3386 assert(APC.InsBefore);
3387 assert(APC.AI);
3388 assert(ASan.isInterestingAlloca(*APC.AI));
3389 assert(!APC.AI->isStaticAlloca());
3390
3391 IRBuilder<> IRB(APC.InsBefore);
3392 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
3393 // Dynamic allocas will be unpoisoned unconditionally below in
3394 // unpoisonDynamicAllocas.
3395 // Flag that we need unpoison static allocas.
3396 }
3397
3398 // Handle dynamic allocas.
3399 createDynamicAllocasInitStorage();
3400 for (auto &AI : DynamicAllocaVec)
3401 handleDynamicAllocaCall(AI);
3402 unpoisonDynamicAllocas();
3403}
3404
3405/// Collect instructions in the entry block after \p InsBefore which initialize
3406/// permanent storage for a function argument. These instructions must remain in
3407/// the entry block so that uninitialized values do not appear in backtraces. An
3408/// added benefit is that this conserves spill slots. This does not move stores
3409/// before instrumented / "interesting" allocas.
3411 AddressSanitizer &ASan, Instruction &InsBefore,
3412 SmallVectorImpl<Instruction *> &InitInsts) {
3413 Instruction *Start = InsBefore.getNextNode();
3414 for (Instruction *It = Start; It; It = It->getNextNode()) {
3415 // Argument initialization looks like:
3416 // 1) store <Argument>, <Alloca> OR
3417 // 2) <CastArgument> = cast <Argument> to ...
3418 // store <CastArgument> to <Alloca>
3419 // Do not consider any other kind of instruction.
3420 //
3421 // Note: This covers all known cases, but may not be exhaustive. An
3422 // alternative to pattern-matching stores is to DFS over all Argument uses:
3423 // this might be more general, but is probably much more complicated.
3424 if (isa<AllocaInst>(It) || isa<CastInst>(It))
3425 continue;
3426 if (auto *Store = dyn_cast<StoreInst>(It)) {
3427 // The store destination must be an alloca that isn't interesting for
3428 // ASan to instrument. These are moved up before InsBefore, and they're
3429 // not interesting because allocas for arguments can be mem2reg'd.
3430 auto *Alloca = dyn_cast<AllocaInst>(Store->getPointerOperand());
3431 if (!Alloca || ASan.isInterestingAlloca(*Alloca))
3432 continue;
3433
3434 Value *Val = Store->getValueOperand();
3435 bool IsDirectArgInit = isa<Argument>(Val);
3436 bool IsArgInitViaCast =
3437 isa<CastInst>(Val) &&
3438 isa<Argument>(cast<CastInst>(Val)->getOperand(0)) &&
3439 // Check that the cast appears directly before the store. Otherwise
3440 // moving the cast before InsBefore may break the IR.
3441 Val == It->getPrevNode();
3442 bool IsArgInit = IsDirectArgInit || IsArgInitViaCast;
3443 if (!IsArgInit)
3444 continue;
3445
3446 if (IsArgInitViaCast)
3447 InitInsts.push_back(cast<Instruction>(Val));
3448 InitInsts.push_back(Store);
3449 continue;
3450 }
3451
3452 // Do not reorder past unknown instructions: argument initialization should
3453 // only involve casts and stores.
3454 return;
3455 }
3456}
3457
3459 // Alloca could have been renamed for uniqueness. Its true name will have been
3460 // recorded as an annotation.
3461 if (AI->hasMetadata(LLVMContext::MD_annotation)) {
3462 MDTuple *AllocaAnnotations =
3463 cast<MDTuple>(AI->getMetadata(LLVMContext::MD_annotation));
3464 for (auto &Annotation : AllocaAnnotations->operands()) {
3465 if (!isa<MDTuple>(Annotation))
3466 continue;
3467 auto AnnotationTuple = cast<MDTuple>(Annotation);
3468 for (unsigned Index = 0; Index < AnnotationTuple->getNumOperands();
3469 Index++) {
3470 // All annotations are strings
3471 auto MetadataString =
3472 cast<MDString>(AnnotationTuple->getOperand(Index));
3473 if (MetadataString->getString() == "alloca_name_altered")
3474 return cast<MDString>(AnnotationTuple->getOperand(Index + 1))
3475 ->getString();
3476 }
3477 }
3478 }
3479 return AI->getName();
3480}
3481
3482void FunctionStackPoisoner::processStaticAllocas() {
3483 if (AllocaVec.empty()) {
3484 assert(StaticAllocaPoisonCallVec.empty());
3485 return;
3486 }
3487
3488 int StackMallocIdx = -1;
3489 DebugLoc EntryDebugLocation;
3490 if (auto SP = F.getSubprogram())
3491 EntryDebugLocation =
3492 DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP);
3493
3494 Instruction *InsBefore = AllocaVec[0];
3495 IRBuilder<> IRB(InsBefore);
3496
3497 // Make sure non-instrumented allocas stay in the entry block. Otherwise,
3498 // debug info is broken, because only entry-block allocas are treated as
3499 // regular stack slots.
3500 auto InsBeforeB = InsBefore->getParent();
3501 assert(InsBeforeB == &F.getEntryBlock());
3502 for (auto *AI : StaticAllocasToMoveUp)
3503 if (AI->getParent() == InsBeforeB)
3504 AI->moveBefore(InsBefore->getIterator());
3505
3506 // Move stores of arguments into entry-block allocas as well. This prevents
3507 // extra stack slots from being generated (to house the argument values until
3508 // they can be stored into the allocas). This also prevents uninitialized
3509 // values from being shown in backtraces.
3510 SmallVector<Instruction *, 8> ArgInitInsts;
3511 findStoresToUninstrumentedArgAllocas(ASan, *InsBefore, ArgInitInsts);
3512 for (Instruction *ArgInitInst : ArgInitInsts)
3513 ArgInitInst->moveBefore(InsBefore->getIterator());
3514
3515 // If we have a call to llvm.localescape, keep it in the entry block.
3516 if (LocalEscapeCall)
3517 LocalEscapeCall->moveBefore(InsBefore->getIterator());
3518
3520 SVD.reserve(AllocaVec.size());
3521 for (AllocaInst *AI : AllocaVec) {
3524 ASan.getAllocaSizeInBytes(*AI),
3525 0,
3526 AI->getAlign().value(),
3527 AI,
3528 0,
3529 0};
3530 SVD.push_back(D);
3531 }
3532
3533 // Minimal header size (left redzone) is 4 pointers,
3534 // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms.
3535 uint64_t Granularity = 1ULL << Mapping.Scale;
3536 uint64_t MinHeaderSize = std::max((uint64_t)ASan.LongSize / 2, Granularity);
3537 const ASanStackFrameLayout &L =
3538 ComputeASanStackFrameLayout(SVD, Granularity, MinHeaderSize);
3539
3540 // Build AllocaToSVDMap for ASanStackVariableDescription lookup.
3542 for (auto &Desc : SVD)
3543 AllocaToSVDMap[Desc.AI] = &Desc;
3544
3545 // Update SVD with information from lifetime intrinsics.
3546 for (const auto &APC : StaticAllocaPoisonCallVec) {
3547 assert(APC.InsBefore);
3548 assert(APC.AI);
3549 assert(ASan.isInterestingAlloca(*APC.AI));
3550 assert(APC.AI->isStaticAlloca());
3551
3552 ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3553 Desc.LifetimeSize = Desc.Size;
3554 if (const DILocation *FnLoc = EntryDebugLocation.get()) {
3555 if (const DILocation *LifetimeLoc = APC.InsBefore->getDebugLoc().get()) {
3556 if (LifetimeLoc->getFile() == FnLoc->getFile())
3557 if (unsigned Line = LifetimeLoc->getLine())
3558 Desc.Line = std::min(Desc.Line ? Desc.Line : Line, Line);
3559 }
3560 }
3561 }
3562
3563 auto DescriptionString = ComputeASanStackFrameDescription(SVD);
3564 LLVM_DEBUG(dbgs() << DescriptionString << " --- " << L.FrameSize << "\n");
3565 uint64_t LocalStackSize = L.FrameSize;
3566 bool DoStackMalloc =
3567 ASan.UseAfterReturn != AsanDetectStackUseAfterReturnMode::Never &&
3568 !ASan.CompileKernel && LocalStackSize <= kMaxStackMallocSize;
3569 bool DoDynamicAlloca = ClDynamicAllocaStack;
3570 // Don't do dynamic alloca or stack malloc if:
3571 // 1) There is inline asm: too often it makes assumptions on which registers
3572 // are available.
3573 // 2) There is a returns_twice call (typically setjmp), which is
3574 // optimization-hostile, and doesn't play well with introduced indirect
3575 // register-relative calculation of local variable addresses.
3576 DoDynamicAlloca &= !HasInlineAsm && !HasReturnsTwiceCall;
3577 DoStackMalloc &= !HasInlineAsm && !HasReturnsTwiceCall;
3578
3579 Value *StaticAlloca =
3580 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false);
3581
3582 Value *FakeStack;
3583 Value *LocalStackBase;
3584 Value *LocalStackBaseAlloca;
3585 uint8_t DIExprFlags = DIExpression::ApplyOffset;
3586
3587 if (DoStackMalloc) {
3588 LocalStackBaseAlloca =
3589 IRB.CreateAlloca(IntptrTy, nullptr, "asan_local_stack_base");
3590 if (ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode::Runtime) {
3591 // void *FakeStack = __asan_option_detect_stack_use_after_return
3592 // ? __asan_stack_malloc_N(LocalStackSize)
3593 // : nullptr;
3594 // void *LocalStackBase = (FakeStack) ? FakeStack :
3595 // alloca(LocalStackSize);
3596 Constant *OptionDetectUseAfterReturn = F.getParent()->getOrInsertGlobal(
3598 Value *UseAfterReturnIsEnabled = IRB.CreateICmpNE(
3599 IRB.CreateLoad(IRB.getInt32Ty(), OptionDetectUseAfterReturn),
3601 Instruction *Term =
3602 SplitBlockAndInsertIfThen(UseAfterReturnIsEnabled, InsBefore, false);
3603 IRBuilder<> IRBIf(Term);
3604 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3605 assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass);
3606 Value *FakeStackValue =
3607 RTCI.createRuntimeCall(IRBIf, AsanStackMallocFunc[StackMallocIdx],
3608 ConstantInt::get(IntptrTy, LocalStackSize));
3609 IRB.SetInsertPoint(InsBefore);
3610 FakeStack = createPHI(IRB, UseAfterReturnIsEnabled, FakeStackValue, Term,
3611 ConstantInt::get(IntptrTy, 0));
3612 } else {
3613 // assert(ASan.UseAfterReturn == AsanDetectStackUseAfterReturnMode:Always)
3614 // void *FakeStack = __asan_stack_malloc_N(LocalStackSize);
3615 // void *LocalStackBase = (FakeStack) ? FakeStack :
3616 // alloca(LocalStackSize);
3617 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
3618 FakeStack =
3619 RTCI.createRuntimeCall(IRB, AsanStackMallocFunc[StackMallocIdx],
3620 ConstantInt::get(IntptrTy, LocalStackSize));
3621 }
3622 Value *NoFakeStack =
3623 IRB.CreateICmpEQ(FakeStack, Constant::getNullValue(IntptrTy));
3624 Instruction *Term =
3625 SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false);
3626 IRBuilder<> IRBIf(Term);
3627 Value *AllocaValue =
3628 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca;
3629
3630 IRB.SetInsertPoint(InsBefore);
3631 LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack);
3632 IRB.CreateStore(LocalStackBase, LocalStackBaseAlloca);
3633 DIExprFlags |= DIExpression::DerefBefore;
3634 } else {
3635 // void *FakeStack = nullptr;
3636 // void *LocalStackBase = alloca(LocalStackSize);
3637 FakeStack = ConstantInt::get(IntptrTy, 0);
3638 LocalStackBase =
3639 DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca;
3640 LocalStackBaseAlloca = LocalStackBase;
3641 }
3642
3643 // It shouldn't matter whether we pass an `alloca` or a `ptrtoint` as the
3644 // dbg.declare address opereand, but passing a `ptrtoint` seems to confuse
3645 // later passes and can result in dropped variable coverage in debug info.
3646 Value *LocalStackBaseAllocaPtr =
3647 isa<PtrToIntInst>(LocalStackBaseAlloca)
3648 ? cast<PtrToIntInst>(LocalStackBaseAlloca)->getPointerOperand()
3649 : LocalStackBaseAlloca;
3650 assert(isa<AllocaInst>(LocalStackBaseAllocaPtr) &&
3651 "Variable descriptions relative to ASan stack base will be dropped");
3652
3653 // Replace Alloca instructions with base+offset.
3654 SmallVector<Value *> NewAllocaPtrs;
3655 for (const auto &Desc : SVD) {
3656 AllocaInst *AI = Desc.AI;
3657 replaceDbgDeclare(AI, LocalStackBaseAllocaPtr, DIB, DIExprFlags,
3658 Desc.Offset);
3659 Value *NewAllocaPtr = IRB.CreateIntToPtr(
3660 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)),
3661 AI->getType());
3662 AI->replaceAllUsesWith(NewAllocaPtr);
3663 NewAllocaPtrs.push_back(NewAllocaPtr);
3664 }
3665
3666 // The left-most redzone has enough space for at least 4 pointers.
3667 // Write the Magic value to redzone[0].
3668 Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy);
3669 IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic),
3670 BasePlus0);
3671 // Write the frame description constant to redzone[1].
3672 Value *BasePlus1 = IRB.CreateIntToPtr(
3673 IRB.CreateAdd(LocalStackBase,
3674 ConstantInt::get(IntptrTy, ASan.LongSize / 8)),
3675 IntptrPtrTy);
3676 GlobalVariable *StackDescriptionGlobal =
3677 createPrivateGlobalForString(*F.getParent(), DescriptionString,
3678 /*AllowMerging*/ true, genName("stack"));
3679 Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy);
3680 IRB.CreateStore(Description, BasePlus1);
3681 // Write the PC to redzone[2].
3682 Value *BasePlus2 = IRB.CreateIntToPtr(
3683 IRB.CreateAdd(LocalStackBase,
3684 ConstantInt::get(IntptrTy, 2 * ASan.LongSize / 8)),
3685 IntptrPtrTy);
3686 IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2);
3687
3688 const auto &ShadowAfterScope = GetShadowBytesAfterScope(SVD, L);
3689
3690 // Poison the stack red zones at the entry.
3691 Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB);
3692 // As mask we must use most poisoned case: red zones and after scope.
3693 // As bytes we can use either the same or just red zones only.
3694 copyToShadow(ShadowAfterScope, ShadowAfterScope, IRB, ShadowBase);
3695
3696 if (!StaticAllocaPoisonCallVec.empty()) {
3697 const auto &ShadowInScope = GetShadowBytes(SVD, L);
3698
3699 // Poison static allocas near lifetime intrinsics.
3700 for (const auto &APC : StaticAllocaPoisonCallVec) {
3701 const ASanStackVariableDescription &Desc = *AllocaToSVDMap[APC.AI];
3702 assert(Desc.Offset % L.Granularity == 0);
3703 size_t Begin = Desc.Offset / L.Granularity;
3704 size_t End = Begin + (APC.Size + L.Granularity - 1) / L.Granularity;
3705
3706 IRBuilder<> IRB(APC.InsBefore);
3707 copyToShadow(ShadowAfterScope,
3708 APC.DoPoison ? ShadowAfterScope : ShadowInScope, Begin, End,
3709 IRB, ShadowBase);
3710 }
3711 }
3712
3713 // Remove lifetime markers now that these are no longer allocas.
3714 for (Value *NewAllocaPtr : NewAllocaPtrs) {
3715 for (User *U : make_early_inc_range(NewAllocaPtr->users())) {
3716 auto *I = cast<Instruction>(U);
3717 if (I->isLifetimeStartOrEnd())
3718 I->eraseFromParent();
3719 }
3720 }
3721
3722 SmallVector<uint8_t, 64> ShadowClean(ShadowAfterScope.size(), 0);
3723 SmallVector<uint8_t, 64> ShadowAfterReturn;
3724
3725 // (Un)poison the stack before all ret instructions.
3726 for (Instruction *Ret : RetVec) {
3727 IRBuilder<> IRBRet(Ret);
3728 // Mark the current frame as retired.
3729 IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
3730 BasePlus0);
3731 if (DoStackMalloc) {
3732 assert(StackMallocIdx >= 0);
3733 // if FakeStack != 0 // LocalStackBase == FakeStack
3734 // // In use-after-return mode, poison the whole stack frame.
3735 // if StackMallocIdx <= 4
3736 // // For small sizes inline the whole thing:
3737 // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize);
3738 // **SavedFlagPtr(FakeStack) = 0
3739 // else
3740 // __asan_stack_free_N(FakeStack, LocalStackSize)
3741 // else
3742 // <This is not a fake stack; unpoison the redzones>
3743 Value *Cmp =
3744 IRBRet.CreateICmpNE(FakeStack, Constant::getNullValue(IntptrTy));
3745 Instruction *ThenTerm, *ElseTerm;
3746 SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm);
3747
3748 IRBuilder<> IRBPoison(ThenTerm);
3749 if (ASan.MaxInlinePoisoningSize != 0 && StackMallocIdx <= 4) {
3750 int ClassSize = kMinStackMallocSize << StackMallocIdx;
3751 ShadowAfterReturn.resize(ClassSize / L.Granularity,
3753 copyToShadow(ShadowAfterReturn, ShadowAfterReturn, IRBPoison,
3754 ShadowBase);
3755 Value *SavedFlagPtrPtr = IRBPoison.CreateAdd(
3756 FakeStack,
3757 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
3758 Value *SavedFlagPtr = IRBPoison.CreateLoad(
3759 IntptrTy, IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
3760 IRBPoison.CreateStore(
3761 Constant::getNullValue(IRBPoison.getInt8Ty()),
3762 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getPtrTy()));
3763 } else {
3764 // For larger frames call __asan_stack_free_*.
3765 RTCI.createRuntimeCall(
3766 IRBPoison, AsanStackFreeFunc[StackMallocIdx],
3767 {FakeStack, ConstantInt::get(IntptrTy, LocalStackSize)});
3768 }
3769
3770 IRBuilder<> IRBElse(ElseTerm);
3771 copyToShadow(ShadowAfterScope, ShadowClean, IRBElse, ShadowBase);
3772 } else {
3773 copyToShadow(ShadowAfterScope, ShadowClean, IRBRet, ShadowBase);
3774 }
3775 }
3776
3777 // We are done. Remove the old unused alloca instructions.
3778 for (auto *AI : AllocaVec)
3779 AI->eraseFromParent();
3780}
3781
3782void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
3783 IRBuilder<> &IRB, bool DoPoison) {
3784 // For now just insert the call to ASan runtime.
3785 Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
3786 Value *SizeArg = ConstantInt::get(IntptrTy, Size);
3787 RTCI.createRuntimeCall(
3788 IRB, DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
3789 {AddrArg, SizeArg});
3790}
3791
3792// Handling llvm.lifetime intrinsics for a given %alloca:
3793// (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca.
3794// (2) if %size is constant, poison memory for llvm.lifetime.end (to detect
3795// invalid accesses) and unpoison it for llvm.lifetime.start (the memory
3796// could be poisoned by previous llvm.lifetime.end instruction, as the
3797// variable may go in and out of scope several times, e.g. in loops).
3798// (3) if we poisoned at least one %alloca in a function,
3799// unpoison the whole stack frame at function exit.
3800void FunctionStackPoisoner::handleDynamicAllocaCall(AllocaInst *AI) {
3801 IRBuilder<> IRB(AI);
3802
3803 const Align Alignment = std::max(Align(kAllocaRzSize), AI->getAlign());
3804 const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1;
3805
3806 Value *Zero = Constant::getNullValue(IntptrTy);
3807 Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize);
3808 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
3809
3810 // Since we need to extend alloca with additional memory to locate
3811 // redzones, and OldSize is number of allocated blocks with
3812 // ElementSize size, get allocated memory size in bytes by
3813 // OldSize * ElementSize.
3814 const unsigned ElementSize =
3815 F.getDataLayout().getTypeAllocSize(AI->getAllocatedType());
3816 Value *OldSize =
3817 IRB.CreateMul(IRB.CreateIntCast(AI->getArraySize(), IntptrTy, false),
3818 ConstantInt::get(IntptrTy, ElementSize));
3819
3820 // PartialSize = OldSize % 32
3821 Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask);
3822
3823 // Misalign = kAllocaRzSize - PartialSize;
3824 Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize);
3825
3826 // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0;
3827 Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize);
3828 Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero);
3829
3830 // AdditionalChunkSize = Alignment + PartialPadding + kAllocaRzSize
3831 // Alignment is added to locate left redzone, PartialPadding for possible
3832 // partial redzone and kAllocaRzSize for right redzone respectively.
3833 Value *AdditionalChunkSize = IRB.CreateAdd(
3834 ConstantInt::get(IntptrTy, Alignment.value() + kAllocaRzSize),
3835 PartialPadding);
3836
3837 Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize);
3838
3839 // Insert new alloca with new NewSize and Alignment params.
3840 AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize);
3841 NewAlloca->setAlignment(Alignment);
3842
3843 // NewAddress = Address + Alignment
3844 Value *NewAddress =
3845 IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy),
3846 ConstantInt::get(IntptrTy, Alignment.value()));
3847
3848 // Insert __asan_alloca_poison call for new created alloca.
3849 RTCI.createRuntimeCall(IRB, AsanAllocaPoisonFunc, {NewAddress, OldSize});
3850
3851 // Store the last alloca's address to DynamicAllocaLayout. We'll need this
3852 // for unpoisoning stuff.
3853 IRB.CreateStore(IRB.CreatePtrToInt(NewAlloca, IntptrTy), DynamicAllocaLayout);
3854
3855 Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType());
3856
3857 // Remove lifetime markers now that this is no longer an alloca.
3858 for (User *U : make_early_inc_range(AI->users())) {
3859 auto *I = cast<Instruction>(U);
3860 if (I->isLifetimeStartOrEnd())
3861 I->eraseFromParent();
3862 }
3863
3864 // Replace all uses of AddessReturnedByAlloca with NewAddressPtr.
3865 AI->replaceAllUsesWith(NewAddressPtr);
3866
3867 // We are done. Erase old alloca from parent.
3868 AI->eraseFromParent();
3869}
3870
3871// isSafeAccess returns true if Addr is always inbounds with respect to its
3872// base object. For example, it is a field access or an array access with
3873// constant inbounds index.
3874bool AddressSanitizer::isSafeAccess(ObjectSizeOffsetVisitor &ObjSizeVis,
3875 Value *Addr, TypeSize TypeStoreSize) const {
3876 if (TypeStoreSize.isScalable())
3877 // TODO: We can use vscale_range to convert a scalable value to an
3878 // upper bound on the access size.
3879 return false;
3880
3881 SizeOffsetAPInt SizeOffset = ObjSizeVis.compute(Addr);
3882 if (!SizeOffset.bothKnown())
3883 return false;
3884
3885 uint64_t Size = SizeOffset.Size.getZExtValue();
3886 int64_t Offset = SizeOffset.Offset.getSExtValue();
3887
3888 // Three checks are required to ensure safety:
3889 // . Offset >= 0 (since the offset is given from the base ptr)
3890 // . Size >= Offset (unsigned)
3891 // . Size - Offset >= NeededSize (unsigned)
3892 return Offset >= 0 && Size >= uint64_t(Offset) &&
3893 Size - uint64_t(Offset) >= TypeStoreSize / 8;
3894}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static cl::opt< bool > ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden, cl::init(true), cl::desc("Use Stack Safety analysis results"))
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void findStoresToUninstrumentedArgAllocas(AddressSanitizer &ASan, Instruction &InsBefore, SmallVectorImpl< Instruction * > &InitInsts)
Collect instructions in the entry block after InsBefore which initialize permanent storage for a func...
static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I, Instruction *InsertBefore, Value *Addr, MaybeAlign Alignment, unsigned Granularity, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp, RuntimeCallInserter &RTCI)
static const uint64_t kDefaultShadowScale
const char kAMDGPUUnreachableName[]
constexpr size_t kAccessSizeIndexMask
static cl::opt< int > ClDebugMin("asan-debug-min", cl::desc("Debug min inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClUsePrivateAlias("asan-use-private-alias", cl::desc("Use private aliases for global variables"), cl::Hidden, cl::init(true))
static const uint64_t kPS_ShadowOffset64
static const uint64_t kFreeBSD_ShadowOffset32
constexpr size_t kIsWriteShift
static const uint64_t kSmallX86_64ShadowOffsetAlignMask
static bool isInterestingPointerSubtraction(Instruction *I)
const char kAMDGPUAddressSharedName[]
const char kAsanStackFreeNameTemplate[]
constexpr size_t kCompileKernelMask
static cl::opt< bool > ClForceDynamicShadow("asan-force-dynamic-shadow", cl::desc("Load shadow address into a local variable for each function"), cl::Hidden, cl::init(false))
const char kAsanOptionDetectUseAfterReturn[]
static cl::opt< std::string > ClMemoryAccessCallbackPrefix("asan-memory-access-callback-prefix", cl::desc("Prefix for memory access callbacks"), cl::Hidden, cl::init("__asan_"))
static const uint64_t kRISCV64_ShadowOffset64
static cl::opt< bool > ClInsertVersionCheck("asan-guard-against-version-mismatch", cl::desc("Guard against compiler/runtime version mismatch."), cl::Hidden, cl::init(true))
const char kAsanSetShadowPrefix[]
static cl::opt< AsanDtorKind > ClOverrideDestructorKind("asan-destructor-kind", cl::desc("Sets the ASan destructor kind. The default is to use the value " "provided to the pass constructor"), cl::values(clEnumValN(AsanDtorKind::None, "none", "No destructors"), clEnumValN(AsanDtorKind::Global, "global", "Use global destructors")), cl::init(AsanDtorKind::Invalid), cl::Hidden)
static Twine genName(StringRef suffix)
static cl::opt< bool > ClInstrumentWrites("asan-instrument-writes", cl::desc("instrument write instructions"), cl::Hidden, cl::init(true))
const char kAsanPtrCmp[]
static uint64_t GetCtorAndDtorPriority(Triple &TargetTriple)
const char kAsanStackMallocNameTemplate[]
static cl::opt< bool > ClInstrumentByval("asan-instrument-byval", cl::desc("instrument byval call arguments"), cl::Hidden, cl::init(true))
const char kAsanInitName[]
static cl::opt< bool > ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClRedzoneByvalArgs("asan-redzone-byval-args", cl::desc("Create redzones for byval " "arguments (extra copy " "required)"), cl::Hidden, cl::init(true))
static const uint64_t kWindowsShadowOffset64
const char kAsanGenPrefix[]
constexpr size_t kIsWriteMask
static uint64_t getRedzoneSizeForScale(int MappingScale)
static const uint64_t kDefaultShadowOffset64
static cl::opt< bool > ClOptimizeCallbacks("asan-optimize-callbacks", cl::desc("Optimize callbacks"), cl::Hidden, cl::init(false))
const char kAsanUnregisterGlobalsName[]
static const uint64_t kAsanCtorAndDtorPriority
const char kAsanUnpoisonGlobalsName[]
static cl::opt< bool > ClWithIfuncSuppressRemat("asan-with-ifunc-suppress-remat", cl::desc("Suppress rematerialization of dynamic shadow address by passing " "it through inline asm in prologue."), cl::Hidden, cl::init(true))
static cl::opt< int > ClDebugStack("asan-debug-stack", cl::desc("debug stack"), cl::Hidden, cl::init(0))
const char kAsanUnregisterElfGlobalsName[]
static bool isUnsupportedAMDGPUAddrspace(Value *Addr)
const char kAsanRegisterImageGlobalsName[]
static const uint64_t kWebAssemblyShadowOffset
static cl::opt< bool > ClOpt("asan-opt", cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true))
static const uint64_t kAllocaRzSize
const char kODRGenPrefix[]
static const uint64_t kSystemZ_ShadowOffset64
static const uint64_t kDefaultShadowOffset32
const char kAsanShadowMemoryDynamicAddress[]
static cl::opt< bool > ClUseOdrIndicator("asan-use-odr-indicator", cl::desc("Use odr indicators to improve ODR reporting"), cl::Hidden, cl::init(true))
static bool GlobalWasGeneratedByCompiler(GlobalVariable *G)
Check if G has been created by a trusted compiler pass.
const char kAsanStackMallocAlwaysNameTemplate[]
static cl::opt< bool > ClInvalidPointerCmp("asan-detect-invalid-pointer-cmp", cl::desc("Instrument <, <=, >, >= with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kAsanEmscriptenCtorAndDtorPriority
static cl::opt< int > ClInstrumentationWithCallsThreshold("asan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented contains more than " "this number of memory accesses, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(7000))
static cl::opt< int > ClDebugMax("asan-debug-max", cl::desc("Debug max inst"), cl::Hidden, cl::init(-1))
static cl::opt< bool > ClInvalidPointerSub("asan-detect-invalid-pointer-sub", cl::desc("Instrument - operations with pointer operands"), cl::Hidden, cl::init(false))
static const uint64_t kFreeBSD_ShadowOffset64
static cl::opt< uint32_t > ClForceExperiment("asan-force-experiment", cl::desc("Force optimization experiment (for testing)"), cl::Hidden, cl::init(0))
const char kSanCovGenPrefix[]
static const uint64_t kFreeBSDKasan_ShadowOffset64
const char kAsanModuleDtorName[]
static const uint64_t kDynamicShadowSentinel
static bool isInterestingPointerComparison(Instruction *I)
static cl::opt< bool > ClStack("asan-stack", cl::desc("Handle stack memory"), cl::Hidden, cl::init(true))
static const uint64_t kMIPS64_ShadowOffset64
static const uint64_t kLinuxKasan_ShadowOffset64
static int StackMallocSizeClass(uint64_t LocalStackSize)
static cl::opt< uint32_t > ClMaxInlinePoisoningSize("asan-max-inline-poisoning-size", cl::desc("Inline shadow poisoning for blocks up to the given size in bytes."), cl::Hidden, cl::init(64))
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClUseAfterScope("asan-use-after-scope", cl::desc("Check stack-use-after-scope"), cl::Hidden, cl::init(false))
constexpr size_t kAccessSizeIndexShift
static cl::opt< int > ClMappingScale("asan-mapping-scale", cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0))
const char kAsanPoisonStackMemoryName[]
static cl::opt< bool > ClEnableKasan("asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< std::string > ClDebugFunc("asan-debug-func", cl::Hidden, cl::desc("Debug func"))
static cl::opt< bool > ClUseGlobalsGC("asan-globals-live-support", cl::desc("Use linker features to support dead " "code stripping of globals"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
const char kAsanUnpoisonStackMemoryName[]
static const uint64_t kLoongArch64_ShadowOffset64
const char kAsanRegisterGlobalsName[]
static cl::opt< bool > ClInstrumentDynamicAllocas("asan-instrument-dynamic-allocas", cl::desc("instrument dynamic allocas"), cl::Hidden, cl::init(true))
const char kAsanModuleCtorName[]
const char kAsanGlobalsRegisteredFlagName[]
static const size_t kMaxStackMallocSize
static cl::opt< bool > ClRecover("asan-recover", cl::desc("Enable recovery mode (continue-after-error)."), cl::Hidden, cl::init(false))
static cl::opt< bool > ClOptSameTemp("asan-opt-same-temp", cl::desc("Instrument the same temp just once"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDynamicAllocaStack("asan-stack-dynamic-alloca", cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClOptStack("asan-opt-stack", cl::desc("Don't instrument scalar stack variables"), cl::Hidden, cl::init(false))
static const uint64_t kMIPS_ShadowOffsetN32
const char kAsanUnregisterImageGlobalsName[]
static cl::opt< AsanDetectStackUseAfterReturnMode > ClUseAfterReturn("asan-use-after-return", cl::desc("Sets the mode of detection for stack-use-after-return."), cl::values(clEnumValN(AsanDetectStackUseAfterReturnMode::Never, "never", "Never detect stack use after return."), clEnumValN(AsanDetectStackUseAfterReturnMode::Runtime, "runtime", "Detect stack use after return if " "binary flag 'ASAN_OPTIONS=detect_stack_use_after_return' is set."), clEnumValN(AsanDetectStackUseAfterReturnMode::Always, "always", "Always detect stack use after return.")), cl::Hidden, cl::init(AsanDetectStackUseAfterReturnMode::Runtime))
static cl::opt< bool > ClOptGlobals("asan-opt-globals", cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true))
static const uintptr_t kCurrentStackFrameMagic
static ShadowMapping getShadowMapping(const Triple &TargetTriple, int LongSize, bool IsKasan)
static const uint64_t kPPC64_ShadowOffset64
static cl::opt< AsanCtorKind > ClConstructorKind("asan-constructor-kind", cl::desc("Sets the ASan constructor kind"), cl::values(clEnumValN(AsanCtorKind::None, "none", "No constructors"), clEnumValN(AsanCtorKind::Global, "global", "Use global constructors")), cl::init(AsanCtorKind::Global), cl::Hidden)
static const int kMaxAsanStackMallocSizeClass
static const uint64_t kMIPS32_ShadowOffset32
static cl::opt< bool > ClAlwaysSlowPath("asan-always-slow-path", cl::desc("use instrumentation with slow path for all accesses"), cl::Hidden, cl::init(false))
static const uint64_t kNetBSD_ShadowOffset32
static const uint64_t kFreeBSDAArch64_ShadowOffset64
static const uint64_t kSmallX86_64ShadowOffsetBase
static cl::opt< bool > ClInitializers("asan-initialization-order", cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true))
static const uint64_t kNetBSD_ShadowOffset64
const char kAsanPtrSub[]
static cl::opt< unsigned > ClRealignStack("asan-realign-stack", cl::desc("Realign stack to the value of this flag (power of two)"), cl::Hidden, cl::init(32))
static const uint64_t kWindowsShadowOffset32
static cl::opt< bool > ClInstrumentReads("asan-instrument-reads", cl::desc("instrument read instructions"), cl::Hidden, cl::init(true))
static size_t TypeStoreSizeToSizeIndex(uint32_t TypeSize)
const char kAsanAllocaPoison[]
constexpr size_t kCompileKernelShift
static cl::opt< bool > ClWithIfunc("asan-with-ifunc", cl::desc("Access dynamic shadow through an ifunc global on " "platforms that support this"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKasanMemIntrinCallbackPrefix("asan-kernel-mem-intrinsic-prefix", cl::desc("Use prefix for memory intrinsics in KASAN mode"), cl::Hidden, cl::init(false))
const char kAsanVersionCheckNamePrefix[]
const char kAMDGPUAddressPrivateName[]
static const uint64_t kNetBSDKasan_ShadowOffset64
const char kAMDGPUBallotName[]
const char kAsanRegisterElfGlobalsName[]
static cl::opt< uint64_t > ClMappingOffset("asan-mapping-offset", cl::desc("offset of asan shadow mapping [EXPERIMENTAL]"), cl::Hidden, cl::init(0))
const char kAsanReportErrorTemplate[]
static cl::opt< bool > ClWithComdat("asan-with-comdat", cl::desc("Place ASan constructors in comdat sections"), cl::Hidden, cl::init(true))
static StringRef getAllocaName(AllocaInst *AI)
static cl::opt< bool > ClSkipPromotableAllocas("asan-skip-promotable-allocas", cl::desc("Do not instrument promotable allocas"), cl::Hidden, cl::init(true))
static cl::opt< int > ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb", cl::init(10000), cl::desc("maximal number of instructions to instrument in any given BB"), cl::Hidden)
static const uintptr_t kRetiredStackFrameMagic
static cl::opt< bool > ClUseStackSafety("asan-use-stack-safety", cl::Hidden, cl::init(true), cl::Hidden, cl::desc("Use Stack Safety analysis results"), cl::Optional)
const char kAsanPoisonGlobalsName[]
const char kAsanHandleNoReturnName[]
static const size_t kMinStackMallocSize
static cl::opt< int > ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, cl::init(0))
const char kAsanAllocasUnpoison[]
static const uint64_t kAArch64_ShadowOffset64
static cl::opt< bool > ClInvalidPointerPairs("asan-detect-invalid-pointer-pair", cl::desc("Instrument <, <=, >, >=, - with pointer operands"), cl::Hidden, cl::init(false))
Function Alias Analysis false
This file contains the simple types necessary to represent the attributes associated with functions a...
static bool isPointerOperand(Value *I, User *U)
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DXIL Finalize Linkage
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
This is the interface for a simple mod/ref and alias analysis over globals.
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
This defines the Use class.
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
Definition Lint.cpp:539
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
#define G(x, y, z)
Definition MD5.cpp:56
Machine Check Debug Module
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
if(PassOpts->AAPipeline)
const SmallVectorImpl< MachineOperand > & Cond
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
#define OP(OPC)
Definition Instruction.h:46
Shrink Wrap Pass
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
This pass exposes codegen information to IR-level passes.
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1540
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1562
LLVM_ABI AddressSanitizerPass(const AddressSanitizerOptions &Options, bool UseGlobalGC=true, bool UseOdrIndicator=true, AsanDtorKind DestructorKind=AsanDtorKind::Global, AsanCtorKind ConstructorKind=AsanCtorKind::Global)
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
LLVM_ABI void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
LLVM_ABI bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition ArrayRef.h:147
Class to represent array types.
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:459
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
LLVM_ABI const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
bool isInlineAsm() const
Check if this call is an inline asm statement.
void setCannotMerge()
static LLVM_ABI CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
bool doesNotReturn() const
Determine if the call cannot return.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
@ Largest
The linker will choose the largest COMDAT.
Definition Comdat.h:39
@ SameSize
The data referenced by the COMDAT must be the same size.
Definition Comdat.h:41
@ Any
The linker may choose any COMDAT.
Definition Comdat.h:37
@ NoDeduplicate
No deduplication is performed.
Definition Comdat.h:40
@ ExactMatch
The data referenced by the COMDAT must be the same.
Definition Comdat.h:38
ConstantArray - Constant Array Declarations.
Definition Constants.h:433
static LLVM_ABI Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition Constants.h:1274
static LLVM_ABI bool isValueValidForType(Type *Ty, uint64_t V)
This static method returns true if the type Ty is big enough to represent the value V.
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
A debug info location.
Definition DebugLoc.h:124
LLVM_ABI DILocation * get() const
Get the underlying DILocation.
Definition DebugLoc.cpp:50
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
const BasicBlock & front() const
Definition Function.h:858
static Function * createWithDefaultAttr(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Creates a function with some attributes recorded in llvm.module.flags and the LLVMContext applied.
Definition Function.cpp:380
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition Function.h:903
const Constant * getAliasee() const
Definition GlobalAlias.h:87
static LLVM_ABI GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition Globals.cpp:597
LLVM_ABI void copyMetadata(const GlobalObject *Src, unsigned Offset)
Copy metadata from Src, adjusting offsets by Offset.
LLVM_ABI void setComdat(Comdat *C)
Definition Globals.cpp:214
LLVM_ABI void setSection(StringRef S)
Change the section for this global.
Definition Globals.cpp:275
VisibilityTypes getVisibility() const
void setUnnamedAddr(UnnamedAddr Val)
bool hasLocalLinkage() const
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
ThreadLocalMode getThreadLocalMode() const
@ HiddenVisibility
The GV is hidden.
Definition GlobalValue.h:69
void setVisibility(VisibilityTypes V)
LinkageTypes
An enumeration for the kinds of linkage for global values.
Definition GlobalValue.h:52
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ CommonLinkage
Tentative definitions.
Definition GlobalValue.h:63
@ InternalLinkage
Rename collisions when linking (static functions).
Definition GlobalValue.h:60
@ AvailableExternallyLinkage
Available for inspection, not emission.
Definition GlobalValue.h:54
@ ExternalWeakLinkage
ExternalWeak linkage description.
Definition GlobalValue.h:62
DLLStorageClassTypes getDLLStorageClass() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
LLVM_ABI void copyAttributesFrom(const GlobalVariable *Src)
copyAttributesFrom - copy all additional attributes (those not needed to create a GlobalVariable) fro...
Definition Globals.cpp:552
void setAlignment(Align Align)
Sets the alignment attribute of the GlobalVariable.
Analysis pass providing a never-invalidated alias analysis result.
This instruction compares its operands according to the predicate given to the constructor.
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Definition IRBuilder.h:1830
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Definition IRBuilder.h:547
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Definition IRBuilder.h:1864
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memcpy between the specified pointers.
Definition IRBuilder.h:687
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2251
Value * CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2357
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Definition IRBuilder.h:202
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2199
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition IRBuilder.h:1513
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition IRBuilder.h:562
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:2036
BasicBlock * GetInsertBlock() const
Definition IRBuilder.h:201
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Definition IRBuilder.h:567
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2333
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Definition IRBuilder.h:1923
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition IRBuilder.h:522
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition IRBuilder.h:2494
Value * CreateNot(Value *V, const Twine &Name="")
Definition IRBuilder.h:1805
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2329
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1420
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
Definition IRBuilder.h:533
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition IRBuilder.h:1847
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1551
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Definition IRBuilder.h:1860
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1403
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2194
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
Definition IRBuilder.h:2651
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2508
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition IRBuilder.h:2277
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
Type * getVoidTy()
Fetch the type representing void.
Definition IRBuilder.h:600
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Definition IRBuilder.h:1883
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Definition IRBuilder.h:1573
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition IRBuilder.h:552
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2209
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1437
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2780
static LLVM_ABI InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition InlineAsm.cpp:43
Base class for instruction visitors.
Definition InstVisitor.h:78
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:319
A wrapper class for inspecting calls to intrinsic functions.
An instruction for reading from memory.
static Error ParseSectionSpecifier(StringRef Spec, StringRef &Segment, StringRef &Section, unsigned &TAA, bool &TAAParsed, unsigned &StubSize)
Parse the section specifier indicated by "Spec".
LLVM_ABI MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
Definition MDBuilder.cpp:48
Metadata node.
Definition Metadata.h:1077
ArrayRef< MDOperand > operands() const
Definition Metadata.h:1439
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1561
Tuple of metadata.
Definition Metadata.h:1489
This is the common base class for memset/memcpy/memmove.
Root of the metadata hierarchy.
Definition Metadata.h:63
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
Evaluate the size and offset of an object pointed to by a Value* statically.
LLVM_ABI SizeOffsetAPInt compute(Value *V)
Pass interface - Implemented by all 'passes'.
Definition Pass.h:99
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition Analysis.h:115
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & abandon()
Mark an analysis as abandoned.
Definition Analysis.h:171
Return a value (possibly void), from a function.
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This pass performs the global (interprocedural) stack safety analysis (new pass manager).
bool stackAccessIsSafe(const Instruction &I) const
bool isSafe(const AllocaInst &AI) const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
Class to represent struct types.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition Type.cpp:414
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
EltTy front() const
unsigned size() const
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
bool isAndroidVersionLT(unsigned Major) const
Definition Triple.h:821
bool isThumb() const
Tests whether the target is Thumb (little and big endian).
Definition Triple.h:909
bool isDriverKit() const
Is this an Apple DriverKit triple.
Definition Triple.h:600
bool isOSNetBSD() const
Definition Triple.h:630
bool isAndroid() const
Tests whether the target is Android.
Definition Triple.h:819
bool isABIN32() const
Definition Triple.h:1134
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
Definition Triple.h:1030
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition Triple.h:411
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
Definition Triple.h:1019
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
Definition Triple.h:1025
bool isOSWindows() const
Tests whether the OS is Windows.
Definition Triple.h:679
@ UnknownObjectFormat
Definition Triple.h:318
bool isARM() const
Tests whether the target is ARM (little and big endian).
Definition Triple.h:914
bool isOSLinux() const
Tests whether the OS is Linux.
Definition Triple.h:728
bool isAMDGPU() const
Definition Triple.h:906
bool isMacOSX() const
Is this a Mac OS X triple.
Definition Triple.h:566
bool isOSFreeBSD() const
Definition Triple.h:638
bool isOSEmscripten() const
Tests whether the OS is Emscripten.
Definition Triple.h:748
bool isWatchOS() const
Is this an Apple watchOS triple.
Definition Triple.h:585
bool isiOS() const
Is this an iOS triple.
Definition Triple.h:575
bool isPS() const
Tests whether the target is the PS4 or PS5 platform.
Definition Triple.h:816
bool isWasm() const
Tests whether the target is wasm (32- and 64-bit).
Definition Triple.h:1118
bool isOSFuchsia() const
Definition Triple.h:642
bool isOSHaiku() const
Tests whether the OS is Haiku.
Definition Triple.h:669
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI unsigned getIntegerBitWidth() const
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:297
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:281
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:295
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:292
Value * getOperand(unsigned i) const
Definition User.h:232
static LLVM_ABI ValueAsMetadata * get(Value *V)
Definition Metadata.cpp:502
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:546
iterator_range< user_iterator > users()
Definition Value.h:426
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:396
Base class of all SIMD vector types.
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:200
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:169
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
Changed
This file contains the declaration of the Comdat class, which represents a single COMDAT in LLVM.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void getInterestingMemoryOperands(Module &M, Instruction *I, SmallVectorImpl< InterestingMemoryOperand > &Interesting)
Get all the memory operands from the instruction that needs to be instrumented.
void instrumentAddress(Module &M, IRBuilder<> &IRB, Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, Align Alignment, TypeSize TypeStoreSize, bool IsWrite, Value *SizeArgument, bool UseCalls, bool Recover, int AsanScale, int AsanOffset)
Instrument the memory operand Addr.
uint64_t getRedzoneSizeForGlobal(int AsanScale, uint64_t SizeInBytes)
Given SizeInBytes of the Value to be instrunmented, Returns the redzone size corresponding to it.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
@ S_CSTRING_LITERALS
S_CSTRING_LITERALS - Section with literal C strings.
Definition MachO.h:131
@ OB
OB - OneByte - Set if this instruction has a one byte opcode.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
uint64_t getAllocaSizeInBytes(const AllocaInst &AI)
Context & getContext() const
Definition BasicBlock.h:99
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
@ Offset
Definition DWP.cpp:477
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1705
LLVM_ABI SmallVector< uint8_t, 64 > GetShadowBytesAfterScope(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
LLVM_ABI GlobalVariable * createPrivateGlobalForString(Module &M, StringRef Str, bool AllowMerging, Twine NamePrefix="")
LLVM_ABI AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
@ Done
Definition Threading.h:60
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
LLVM_ABI Function * createSanitizerCtor(Module &M, StringRef CtorName)
Creates sanitizer constructor function.
AsanDetectStackUseAfterReturnMode
Mode of ASan detect stack use after return.
@ Always
Always detect stack use after return.
@ Never
Never detect stack use after return.
@ Runtime
Detect stack use after return if not disabled runtime with (ASAN_OPTIONS=detect_stack_use_after_retur...
LLVM_ABI DenseMap< BasicBlock *, ColorVector > colorEHFunclets(Function &F)
If an EH funclet personality is in use (see isFuncletEHPersonality), this will recompute which blocks...
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
Op::Description Desc
LLVM_ABI bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
LLVM_ABI SmallString< 64 > ComputeASanStackFrameDescription(const SmallVectorImpl< ASanStackVariableDescription > &Vars)
LLVM_ABI SmallVector< uint8_t, 64 > GetShadowBytes(const SmallVectorImpl< ASanStackVariableDescription > &Vars, const ASanStackFrameLayout &Layout)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:186
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:759
LLVM_ABI FunctionCallee declareSanitizerInitFunction(Module &M, StringRef InitName, ArrayRef< Type * > InitArgTypes, bool Weak=false)
FunctionAddr VTableAddr uintptr_t uintptr_t Version
Definition InstrProf.h:302
LLVM_ABI std::string getUniqueModuleId(Module *M)
Produce a unique identifier for this module by taking the MD5 sum of the names of the module's strong...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:288
LLVM_ABI std::pair< Function *, FunctionCallee > createSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function, and calls sanitizer's init function from it.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void SplitBlockAndInsertIfThenElse(Value *Cond, BasicBlock::iterator SplitBefore, Instruction **ThenTerm, Instruction **ElseTerm, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr)
SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen, but also creates the ElseBlock...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
bool isAlnum(char C)
Checks whether character C is either a decimal digit or an uppercase or lowercase letter as classifie...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
AsanDtorKind
Types of ASan module destructors supported.
@ Invalid
Not a valid destructor Kind.
@ Global
Append to llvm.global_dtors.
@ None
Do not emit any destructors for ASan.
LLVM_ABI ASanStackFrameLayout ComputeASanStackFrameLayout(SmallVectorImpl< ASanStackVariableDescription > &Vars, uint64_t Granularity, uint64_t MinHeaderSize)
TargetTransformInfo TTI
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Definition Error.h:769
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
OperandBundleDefT< Value * > OperandBundleDef
Definition AutoUpgrade.h:34
LLVM_ABI void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
static const int kAsanStackUseAfterReturnMagic
LLVM_ABI void setGlobalVariableLargeSection(const Triple &TargetTriple, GlobalVariable &GV)
void removeASanIncompatibleFnAttributes(Function &F, bool ReadsArgMem)
Remove memory attributes that are incompatible with the instrumentation added by AddressSanitizer and...
DWARFExpression::Operation Op
@ Dynamic
Denotes mode unknown at compile time.
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
TinyPtrVector< BasicBlock * > ColorVector
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
Definition Alignment.h:100
iterator_range< df_iterator< T > > depth_first(const T &G)
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
AsanCtorKind
Types of ASan module constructors supported.
LLVM_ABI void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
Definition Local.cpp:3832
LLVM_ABI void appendToUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.used list.
LLVM_ABI void appendToGlobalDtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Same as appendToGlobalCtors(), but for global dtors.
LLVM_ABI bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize, bool IsKasan, uint64_t *ShadowBase, int *MappingScale, bool *OrShadowOffset)
DEMANGLE_ABI std::string demangle(std::string_view MangledName)
Attempt to demangle a string using different demangling schemes.
Definition Demangle.cpp:20
std::string itostr(int64_t X)
LLVM_ABI void SplitBlockAndInsertForEachLane(ElementCount EC, Type *IndexTy, BasicBlock::iterator InsertBefore, std::function< void(IRBuilderBase &, Value *)> Func)
Utility function for performing a given action on each lane of a vector with EC elements.
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
LLVM_ABI bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder, uint8_t DIExprFlags, int Offset)
Replaces dbg.declare record when the address it describes is replaced with a new value.
Definition Local.cpp:1942
#define N
const uint8_t AccessSizeIndex
LLVM_ABI ASanAccessInfo(int32_t Packed)
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition Alignment.h:130
Information about a load/store intrinsic defined by the target.
SmallVector< InterestingMemoryOperand, 1 > InterestingOperands
A CRTP mix-in to automatically provide informational APIs needed for passes.
Definition PassManager.h:70
SizeOffsetAPInt - Used by ObjectSizeOffsetVisitor, which works with APInts.