51#define DEBUG_TYPE "tsan"
54 "tsan-instrument-memory-accesses",
cl::init(
true),
58 cl::desc(
"Instrument function entry and exit"),
61 "tsan-handle-cxx-exceptions",
cl::init(
true),
62 cl::desc(
"Handle C++ exceptions (insert cleanup blocks for unwinding)"),
69 "tsan-instrument-memintrinsics",
cl::init(
true),
72 "tsan-distinguish-volatile",
cl::init(
false),
73 cl::desc(
"Emit special instrumentation for accesses to volatiles"),
76 "tsan-instrument-read-before-write",
cl::init(
false),
77 cl::desc(
"Do not eliminate read instrumentation for read-before-writes"),
80 "tsan-compound-read-before-write",
cl::init(
false),
81 cl::desc(
"Emit special compound instrumentation for reads-before-writes"),
84STATISTIC(NumInstrumentedReads,
"Number of instrumented reads");
85STATISTIC(NumInstrumentedWrites,
"Number of instrumented writes");
87 "Number of reads ignored due to following writes");
88STATISTIC(NumAccessesWithBadSize,
"Number of accesses with bad size");
89STATISTIC(NumInstrumentedVtableWrites,
"Number of vtable ptr writes");
90STATISTIC(NumInstrumentedVtableReads,
"Number of vtable ptr reads");
92 "Number of reads from constant globals");
93STATISTIC(NumOmittedReadsFromVtable,
"Number of vtable reads");
94STATISTIC(NumOmittedNonCaptured,
"Number of accesses ignored due to capturing");
107struct ThreadSanitizer {
112 <<
"warning: Option -tsan-compound-read-before-write has no effect "
113 "when -tsan-instrument-read-before-write is set.\n";
122 struct InstructionInfo {
125 static constexpr unsigned kCompoundRW = (1U << 0);
127 explicit InstructionInfo(
Instruction *Inst) : Inst(Inst) {}
134 bool instrumentLoadOrStore(
const InstructionInfo &
II,
const DataLayout &
DL);
140 bool addrPointsToConstantData(
Value *
Addr);
150 static const size_t kNumberOfAccessSizes = 5;
164 [kNumberOfAccessSizes];
173void insertModuleCtor(
Module &M) {
185 ThreadSanitizer TSan;
202 IntptrTy =
DL.getIntPtrType(Ctx);
208 TsanFuncEntry = M.getOrInsertFunction(
"__tsan_func_entry", Attr,
209 IRB.getVoidTy(), IRB.getPtrTy());
211 M.getOrInsertFunction(
"__tsan_func_exit", Attr, IRB.getVoidTy());
212 TsanIgnoreBegin = M.getOrInsertFunction(
"__tsan_ignore_thread_begin", Attr,
215 M.getOrInsertFunction(
"__tsan_ignore_thread_end", Attr, IRB.getVoidTy());
218 const unsigned ByteSize = 1U << i;
219 const unsigned BitSize = ByteSize * 8;
220 std::string ByteSizeStr = utostr(ByteSize);
221 std::string BitSizeStr = utostr(BitSize);
223 TsanRead[i] = M.getOrInsertFunction(ReadName, Attr, IRB.getVoidTy(),
227 TsanWrite[i] = M.getOrInsertFunction(WriteName, Attr, IRB.getVoidTy(),
230 SmallString<64> UnalignedReadName(
"__tsan_unaligned_read" + ByteSizeStr);
231 TsanUnalignedRead[i] = M.getOrInsertFunction(
232 UnalignedReadName, Attr, IRB.getVoidTy(), IRB.getPtrTy());
234 SmallString<64> UnalignedWriteName(
"__tsan_unaligned_write" + ByteSizeStr);
235 TsanUnalignedWrite[i] = M.getOrInsertFunction(
236 UnalignedWriteName, Attr, IRB.getVoidTy(), IRB.getPtrTy());
238 SmallString<64> VolatileReadName(
"__tsan_volatile_read" + ByteSizeStr);
239 TsanVolatileRead[i] = M.getOrInsertFunction(
240 VolatileReadName, Attr, IRB.getVoidTy(), IRB.getPtrTy());
242 SmallString<64> VolatileWriteName(
"__tsan_volatile_write" + ByteSizeStr);
243 TsanVolatileWrite[i] = M.getOrInsertFunction(
244 VolatileWriteName, Attr, IRB.getVoidTy(), IRB.getPtrTy());
246 SmallString<64> UnalignedVolatileReadName(
"__tsan_unaligned_volatile_read" +
248 TsanUnalignedVolatileRead[i] = M.getOrInsertFunction(
249 UnalignedVolatileReadName, Attr, IRB.getVoidTy(), IRB.getPtrTy());
252 "__tsan_unaligned_volatile_write" + ByteSizeStr);
253 TsanUnalignedVolatileWrite[i] = M.getOrInsertFunction(
254 UnalignedVolatileWriteName, Attr, IRB.getVoidTy(), IRB.getPtrTy());
257 TsanCompoundRW[i] = M.getOrInsertFunction(
258 CompoundRWName, Attr, IRB.getVoidTy(), IRB.getPtrTy());
260 SmallString<64> UnalignedCompoundRWName(
"__tsan_unaligned_read_write" +
262 TsanUnalignedCompoundRW[i] = M.getOrInsertFunction(
263 UnalignedCompoundRWName, Attr, IRB.getVoidTy(), IRB.getPtrTy());
267 SmallString<32> AtomicLoadName(
"__tsan_atomic" + BitSizeStr +
"_load");
269 M.getOrInsertFunction(AtomicLoadName,
271 BitSize <= 32, Attr),
275 using Idxs = std::vector<unsigned>;
276 Idxs Idxs2Or12 ((BitSize <= 32) ? Idxs({1, 2}) : Idxs({2}));
277 Idxs Idxs34Or1234((BitSize <= 32) ? Idxs({1, 2, 3, 4}) : Idxs({3, 4}));
278 SmallString<32> AtomicStoreName(
"__tsan_atomic" + BitSizeStr +
"_store");
279 TsanAtomicStore[i] =
M.getOrInsertFunction(
281 TLI.
getAttrList(&Ctx, Idxs2Or12,
true,
false, Attr),
282 IRB.getVoidTy(), PtrTy, Ty, OrdTy);
286 TsanAtomicRMW[
Op][i] =
nullptr;
287 const char *NamePart =
nullptr;
289 NamePart =
"_exchange";
291 NamePart =
"_fetch_add";
293 NamePart =
"_fetch_sub";
295 NamePart =
"_fetch_and";
297 NamePart =
"_fetch_or";
299 NamePart =
"_fetch_xor";
301 NamePart =
"_fetch_nand";
305 TsanAtomicRMW[
Op][i] =
M.getOrInsertFunction(
308 BitSize <= 32, Attr),
309 Ty, PtrTy, Ty, OrdTy);
313 "_compare_exchange_val");
314 TsanAtomicCAS[i] =
M.getOrInsertFunction(
317 BitSize <= 32, Attr),
318 Ty, PtrTy, Ty, Ty, OrdTy, OrdTy);
321 M.getOrInsertFunction(
"__tsan_vptr_update", Attr, IRB.getVoidTy(),
322 IRB.getPtrTy(), IRB.getPtrTy());
323 TsanVptrLoad =
M.getOrInsertFunction(
"__tsan_vptr_read", Attr,
324 IRB.getVoidTy(), IRB.getPtrTy());
325 TsanAtomicThreadFence =
M.getOrInsertFunction(
326 "__tsan_atomic_thread_fence",
328 IRB.getVoidTy(), OrdTy);
330 TsanAtomicSignalFence =
M.getOrInsertFunction(
331 "__tsan_atomic_signal_fence",
333 IRB.getVoidTy(), OrdTy);
336 M.getOrInsertFunction(
"__tsan_memmove", Attr, IRB.getPtrTy(),
337 IRB.getPtrTy(), IRB.getPtrTy(), IntptrTy);
339 M.getOrInsertFunction(
"__tsan_memcpy", Attr, IRB.getPtrTy(),
340 IRB.getPtrTy(), IRB.getPtrTy(), IntptrTy);
341 MemsetFn =
M.getOrInsertFunction(
344 IRB.getPtrTy(), IRB.getPtrTy(), IRB.getInt32Ty(), IntptrTy);
348 if (
MDNode *
Tag =
I->getMetadata(LLVMContext::MD_tbaa))
349 return Tag->isTBAAVtableAccess();
357 Addr =
Addr->stripInBoundsOffsets();
360 if (GV->hasSection()) {
373 Type *PtrTy = cast<PointerType>(
Addr->getType()->getScalarType());
381bool ThreadSanitizer::addrPointsToConstantData(
Value *
Addr) {
384 Addr =
GEP->getPointerOperand();
387 if (GV->isConstant()) {
389 NumOmittedReadsFromConstantGlobals++;
395 NumOmittedReadsFromVtable++;
414void ThreadSanitizer::chooseInstructionsToInstrument(
420 const bool IsWrite = isa<StoreInst>(*
I);
421 Value *
Addr = IsWrite ? cast<StoreInst>(
I)->getPointerOperand()
422 : cast<LoadInst>(
I)->getPointerOperand();
428 const auto WriteEntry = WriteTargets.
find(
Addr);
430 auto &WI =
All[WriteEntry->second];
433 const bool AnyVolatile =
435 cast<StoreInst>(WI.Inst)->isVolatile());
439 WI.Flags |= InstructionInfo::kCompoundRW;
440 NumOmittedReadsBeforeWrite++;
445 if (addrPointsToConstantData(
Addr)) {
456 NumOmittedNonCaptured++;
465 WriteTargets[
Addr] =
All.size() - 1;
476 if (isa<LoadInst>(
I) || isa<StoreInst>(
I))
481void ThreadSanitizer::InsertRuntimeIgnores(
Function &
F) {
483 IRB.CreateCall(TsanIgnoreBegin);
487 AtExit->CreateCall(TsanIgnoreEnd);
491bool ThreadSanitizer::sanitizeFunction(
Function &
F,
500 if (
F.hasFnAttribute(Attribute::Naked))
505 if (
F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
515 bool SanitizeFunction =
F.hasFnAttribute(Attribute::SanitizeThread);
520 for (
auto &Inst : BB) {
522 if (Inst.hasMetadata(LLVMContext::MD_nosanitize))
526 else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst))
528 else if ((isa<CallInst>(Inst) && !isa<DbgInfoIntrinsic>(Inst)) ||
529 isa<InvokeInst>(Inst)) {
530 if (
CallInst *CI = dyn_cast<CallInst>(&Inst))
532 if (isa<MemIntrinsic>(Inst))
535 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
539 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
DL);
548 for (
const auto &
II : AllLoadsAndStores) {
549 Res |= instrumentLoadOrStore(
II,
DL);
555 for (
auto *Inst : AtomicAccesses) {
556 Res |= instrumentAtomic(Inst,
DL);
560 for (
auto *Inst : MemIntrinCalls) {
561 Res |= instrumentMemIntrinsic(Inst);
564 if (
F.hasFnAttribute(
"sanitize_thread_no_checking_at_run_time")) {
565 assert(!
F.hasFnAttribute(Attribute::SanitizeThread));
567 InsertRuntimeIgnores(
F);
573 Value *ReturnAddress =
574 IRB.CreateIntrinsic(Intrinsic::returnaddress, {}, IRB.getInt32(0));
575 IRB.CreateCall(TsanFuncEntry, ReturnAddress);
580 AtExit->CreateCall(TsanFuncExit, {});
587bool ThreadSanitizer::instrumentLoadOrStore(
const InstructionInfo &
II,
590 const bool IsWrite = isa<StoreInst>(*
II.Inst);
591 Value *
Addr = IsWrite ? cast<StoreInst>(
II.Inst)->getPointerOperand()
592 : cast<LoadInst>(
II.Inst)->getPointerOperand();
598 if (
Addr->isSwiftError())
601 int Idx = getMemoryAccessFuncIndex(OrigTy,
Addr,
DL);
606 Value *StoredValue = cast<StoreInst>(
II.Inst)->getValueOperand();
610 if (isa<VectorType>(StoredValue->
getType()))
611 StoredValue = IRB.CreateExtractElement(
612 StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0));
614 StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getPtrTy());
616 IRB.CreateCall(TsanVptrUpdate, {
Addr, StoredValue});
617 NumInstrumentedVtableWrites++;
621 IRB.CreateCall(TsanVptrLoad,
Addr);
622 NumInstrumentedVtableReads++;
626 const Align Alignment = IsWrite ? cast<StoreInst>(
II.Inst)->getAlign()
627 : cast<LoadInst>(
II.Inst)->getAlign();
628 const bool IsCompoundRW =
631 (IsWrite ? cast<StoreInst>(
II.Inst)->isVolatile()
632 : cast<LoadInst>(
II.Inst)->isVolatile());
633 assert((!IsVolatile || !IsCompoundRW) &&
"Compound volatile invalid!");
639 OnAccessFunc = TsanCompoundRW[
Idx];
641 OnAccessFunc = IsWrite ? TsanVolatileWrite[
Idx] : TsanVolatileRead[
Idx];
643 OnAccessFunc = IsWrite ? TsanWrite[
Idx] : TsanRead[
Idx];
646 OnAccessFunc = TsanUnalignedCompoundRW[
Idx];
648 OnAccessFunc = IsWrite ? TsanUnalignedVolatileWrite[
Idx]
649 : TsanUnalignedVolatileRead[
Idx];
651 OnAccessFunc = IsWrite ? TsanUnalignedWrite[
Idx] : TsanUnalignedRead[
Idx];
653 IRB.CreateCall(OnAccessFunc,
Addr);
654 if (IsCompoundRW || IsWrite)
655 NumInstrumentedWrites++;
656 if (IsCompoundRW || !IsWrite)
657 NumInstrumentedReads++;
686bool ThreadSanitizer::instrumentMemIntrinsic(
Instruction *
I) {
689 Value *Cast1 = IRB.CreateIntCast(
M->getArgOperand(1), IRB.getInt32Ty(),
false);
690 Value *Cast2 = IRB.CreateIntCast(
M->getArgOperand(2), IntptrTy,
false);
693 {
M->getArgOperand(0),
696 I->eraseFromParent();
699 isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn,
700 {
M->getArgOperand(0),
702 IRB.CreateIntCast(
M->getArgOperand(2), IntptrTy,
false)});
703 I->eraseFromParent();
718 if (
LoadInst *LI = dyn_cast<LoadInst>(
I)) {
720 Type *OrigTy = LI->getType();
721 int Idx = getMemoryAccessFuncIndex(OrigTy,
Addr,
DL);
726 Value *
C = IRB.CreateCall(TsanAtomicLoad[
Idx], Args);
727 Value *Cast = IRB.CreateBitOrPointerCast(
C, OrigTy);
728 I->replaceAllUsesWith(Cast);
729 }
else if (
StoreInst *SI = dyn_cast<StoreInst>(
I)) {
732 getMemoryAccessFuncIndex(
SI->getValueOperand()->getType(),
Addr,
DL);
735 const unsigned ByteSize = 1U <<
Idx;
736 const unsigned BitSize = ByteSize * 8;
739 IRB.CreateBitOrPointerCast(
SI->getValueOperand(), Ty),
741 IRB.CreateCall(TsanAtomicStore[
Idx], Args);
742 SI->eraseFromParent();
746 getMemoryAccessFuncIndex(RMWI->getValOperand()->getType(),
Addr,
DL);
752 const unsigned ByteSize = 1U <<
Idx;
753 const unsigned BitSize = ByteSize * 8;
755 Value *Val = RMWI->getValOperand();
758 Value *
C = IRB.CreateCall(
F, Args);
759 I->replaceAllUsesWith(IRB.CreateBitOrPointerCast(
C, Val->
getType()));
760 I->eraseFromParent();
763 Type *OrigOldValTy = CASI->getNewValOperand()->getType();
764 int Idx = getMemoryAccessFuncIndex(OrigOldValTy,
Addr,
DL);
767 const unsigned ByteSize = 1U <<
Idx;
768 const unsigned BitSize = ByteSize * 8;
771 IRB.CreateBitOrPointerCast(CASI->getCompareOperand(), Ty);
773 IRB.CreateBitOrPointerCast(CASI->getNewValOperand(), Ty);
782 if (Ty != OrigOldValTy) {
784 OldVal = IRB.CreateIntToPtr(
C, OrigOldValTy);
789 Res = IRB.CreateInsertValue(Res,
Success, 1);
791 I->replaceAllUsesWith(Res);
792 I->eraseFromParent();
793 }
else if (
FenceInst *FI = dyn_cast<FenceInst>(
I)) {
796 ? TsanAtomicSignalFence
797 : TsanAtomicThreadFence;
798 IRB.CreateCall(
F, Args);
799 FI->eraseFromParent();
804int ThreadSanitizer::getMemoryAccessFuncIndex(
Type *OrigTy,
Value *
Addr,
814 NumAccessesWithBadSize++;
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< bool > ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true))
static const size_t kNumberOfAccessSizes
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
static cl::opt< bool > ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics", cl::desc("instrument memory intrinsics"), cl::Hidden, cl::init(true))
Module.h This file contains the declarations for the Module class.
uint64_t IntrinsicInst * II
FunctionAnalysisManager FAM
ModuleAnalysisManager MAM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallString class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static void initialize(TargetLibraryInfoImpl &TLI, const Triple &T, ArrayRef< StringLiteral > StandardNames)
Initialize the set of available library functions based on the specified target triple.
static bool shouldInstrumentReadWriteFromAddress(const Module *M, Value *Addr)
static bool isVtableAccess(Instruction *I)
static bool isTsanAtomic(const Instruction *I)
const char kTsanModuleCtorName[]
static cl::opt< bool > ClInstrumentFuncEntryExit("tsan-instrument-func-entry-exit", cl::init(true), cl::desc("Instrument function entry and exit"), cl::Hidden)
static ConstantInt * createOrdering(IRBuilder<> *IRB, AtomicOrdering ord)
static cl::opt< bool > ClInstrumentMemIntrinsics("tsan-instrument-memintrinsics", cl::init(true), cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden)
const char kTsanInitName[]
static cl::opt< bool > ClDistinguishVolatile("tsan-distinguish-volatile", cl::init(false), cl::desc("Emit special instrumentation for accesses to volatiles"), cl::Hidden)
static cl::opt< bool > ClCompoundReadBeforeWrite("tsan-compound-read-before-write", cl::init(false), cl::desc("Emit special compound instrumentation for reads-before-writes"), cl::Hidden)
static cl::opt< bool > ClInstrumentAtomics("tsan-instrument-atomics", cl::init(true), cl::desc("Instrument atomics"), cl::Hidden)
static cl::opt< bool > ClHandleCxxExceptions("tsan-handle-cxx-exceptions", cl::init(true), cl::desc("Handle C++ exceptions (insert cleanup blocks for unwinding)"), cl::Hidden)
static cl::opt< bool > ClInstrumentReadBeforeWrite("tsan-instrument-read-before-write", cl::init(false), cl::desc("Do not eliminate read instrumentation for read-before-writes"), cl::Hidden)
static cl::opt< bool > ClInstrumentMemoryAccesses("tsan-instrument-memory-accesses", cl::init(true), cl::desc("Instrument memory accesses"), cl::Hidden)
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
AttributeList addFnAttribute(LLVMContext &C, Attribute::AttrKind Kind) const
Add a function attribute to the list.
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
EscapeEnumerator - This is a little algorithm to find all escape points from a function so that "fina...
An instruction for ordering other memory operations.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Class to represent integer types.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
This class wraps the llvm.memcpy/memmove intrinsics.
A Module instance is used to store all the information related to an LLVM module.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
Triple - Helper class for working with autoconf configuration names.
ObjectFormatType getObjectFormat() const
Get the object format for this triple.
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsVolatile[]
Key for Kernel::Arg::Metadata::mIsVolatile.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ C
The default llvm calling convention, compatible with C.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
std::string getInstrProfSectionName(InstrProfSectKind IPSK, Triple::ObjectFormatType OF, bool AddSegmentInfo=true)
Return the name of the profile section corresponding to IPSK.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
auto reverse(ContainerTy &&C)
std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
std::optional< SyncScope::ID > getAtomicSyncScopeID(const Instruction *I)
A helper function that returns an atomic operation's sync scope; returns std::nullopt if it is not an...
bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
DWARFExpression::Operation Op
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
Type * getLoadStoreType(const Value *I)
A helper function that returns the type of a load or store instruction.
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
static void ensureDebugInfo(IRBuilder<> &IRB, const Function &F)
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM)