LLVM 20.0.0git
TailRecursionElimination.cpp
Go to the documentation of this file.
1//===- TailRecursionElimination.cpp - Eliminate Tail Calls ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file transforms calls of the current function (self recursion) followed
10// by a return instruction with a branch to the entry of the function, creating
11// a loop. This pass also implements the following extensions to the basic
12// algorithm:
13//
14// 1. Trivial instructions between the call and return do not prevent the
15// transformation from taking place, though currently the analysis cannot
16// support moving any really useful instructions (only dead ones).
17// 2. This pass transforms functions that are prevented from being tail
18// recursive by an associative and commutative expression to use an
19// accumulator variable, thus compiling the typical naive factorial or
20// 'fib' implementation into efficient code.
21// 3. TRE is performed if the function returns void, if the return
22// returns the result returned by the call, or if the function returns a
23// run-time constant on all exits from the function. It is possible, though
24// unlikely, that the return returns something else (like constant 0), and
25// can still be TRE'd. It can be TRE'd if ALL OTHER return instructions in
26// the function return the exact same value.
27// 4. If it can prove that callees do not access their caller stack frame,
28// they are marked as eligible for tail call elimination (by the code
29// generator).
30//
31// There are several improvements that could be made:
32//
33// 1. If the function has any alloca instructions, these instructions will be
34// moved out of the entry block of the function, causing them to be
35// evaluated each time through the tail recursion. Safely keeping allocas
36// in the entry block requires analysis to proves that the tail-called
37// function does not read or write the stack object.
38// 2. Tail recursion is only performed if the call immediately precedes the
39// return instruction. It's possible that there could be a jump between
40// the call and the return.
41// 3. There can be intervening operations between the call and the return that
42// prevent the TRE from occurring. For example, there could be GEP's and
43// stores to memory that will not be read or written by the call. This
44// requires some substantial analysis (such as with DSA) to prove safe to
45// move ahead of the call, but doing so could allow many more TREs to be
46// performed, for example in TreeAdd/TreeAlloc from the treeadd benchmark.
47// 4. The algorithm we use to detect if callees access their caller stack
48// frames is very primitive.
49//
50//===----------------------------------------------------------------------===//
51
53#include "llvm/ADT/STLExtras.h"
55#include "llvm/ADT/Statistic.h"
59#include "llvm/Analysis/Loads.h"
64#include "llvm/IR/CFG.h"
65#include "llvm/IR/Constants.h"
66#include "llvm/IR/DataLayout.h"
69#include "llvm/IR/Dominators.h"
70#include "llvm/IR/Function.h"
71#include "llvm/IR/IRBuilder.h"
75#include "llvm/IR/Module.h"
77#include "llvm/Pass.h"
78#include "llvm/Support/Debug.h"
82using namespace llvm;
83
84#define DEBUG_TYPE "tailcallelim"
85
86STATISTIC(NumEliminated, "Number of tail calls removed");
87STATISTIC(NumRetDuped, "Number of return duplicated");
88STATISTIC(NumAccumAdded, "Number of accumulators introduced");
89
90/// Scan the specified function for alloca instructions.
91/// If it contains any dynamic allocas, returns false.
92static bool canTRE(Function &F) {
93 // TODO: We don't do TRE if dynamic allocas are used.
94 // Dynamic allocas allocate stack space which should be
95 // deallocated before new iteration started. That is
96 // currently not implemented.
98 auto *AI = dyn_cast<AllocaInst>(&I);
99 return !AI || AI->isStaticAlloca();
100 });
101}
102
103namespace {
104struct AllocaDerivedValueTracker {
105 // Start at a root value and walk its use-def chain to mark calls that use the
106 // value or a derived value in AllocaUsers, and places where it may escape in
107 // EscapePoints.
108 void walk(Value *Root) {
109 SmallVector<Use *, 32> Worklist;
111
112 auto AddUsesToWorklist = [&](Value *V) {
113 for (auto &U : V->uses()) {
114 if (!Visited.insert(&U).second)
115 continue;
116 Worklist.push_back(&U);
117 }
118 };
119
120 AddUsesToWorklist(Root);
121
122 while (!Worklist.empty()) {
123 Use *U = Worklist.pop_back_val();
124 Instruction *I = cast<Instruction>(U->getUser());
125
126 switch (I->getOpcode()) {
127 case Instruction::Call:
128 case Instruction::Invoke: {
129 auto &CB = cast<CallBase>(*I);
130 // If the alloca-derived argument is passed byval it is not an escape
131 // point, or a use of an alloca. Calling with byval copies the contents
132 // of the alloca into argument registers or stack slots, which exist
133 // beyond the lifetime of the current frame.
134 if (CB.isArgOperand(U) && CB.isByValArgument(CB.getArgOperandNo(U)))
135 continue;
136 bool IsNocapture =
137 CB.isDataOperand(U) && CB.doesNotCapture(CB.getDataOperandNo(U));
138 callUsesLocalStack(CB, IsNocapture);
139 if (IsNocapture) {
140 // If the alloca-derived argument is passed in as nocapture, then it
141 // can't propagate to the call's return. That would be capturing.
142 continue;
143 }
144 break;
145 }
146 case Instruction::Load: {
147 // The result of a load is not alloca-derived (unless an alloca has
148 // otherwise escaped, but this is a local analysis).
149 continue;
150 }
151 case Instruction::Store: {
152 if (U->getOperandNo() == 0)
153 EscapePoints.insert(I);
154 continue; // Stores have no users to analyze.
155 }
156 case Instruction::BitCast:
157 case Instruction::GetElementPtr:
158 case Instruction::PHI:
159 case Instruction::Select:
160 case Instruction::AddrSpaceCast:
161 break;
162 default:
163 EscapePoints.insert(I);
164 break;
165 }
166
167 AddUsesToWorklist(I);
168 }
169 }
170
171 void callUsesLocalStack(CallBase &CB, bool IsNocapture) {
172 // Add it to the list of alloca users.
173 AllocaUsers.insert(&CB);
174
175 // If it's nocapture then it can't capture this alloca.
176 if (IsNocapture)
177 return;
178
179 // If it can write to memory, it can leak the alloca value.
180 if (!CB.onlyReadsMemory())
181 EscapePoints.insert(&CB);
182 }
183
186};
187}
188
190 if (F.callsFunctionThatReturnsTwice())
191 return false;
192
193 // The local stack holds all alloca instructions and all byval arguments.
194 AllocaDerivedValueTracker Tracker;
195 for (Argument &Arg : F.args()) {
196 if (Arg.hasByValAttr())
197 Tracker.walk(&Arg);
198 }
199 for (auto &BB : F) {
200 for (auto &I : BB)
201 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
202 Tracker.walk(AI);
203 }
204
205 bool Modified = false;
206
207 // Track whether a block is reachable after an alloca has escaped. Blocks that
208 // contain the escaping instruction will be marked as being visited without an
209 // escaped alloca, since that is how the block began.
210 enum VisitType {
211 UNVISITED,
212 UNESCAPED,
213 ESCAPED
214 };
216
217 // We propagate the fact that an alloca has escaped from block to successor.
218 // Visit the blocks that are propagating the escapedness first. To do this, we
219 // maintain two worklists.
220 SmallVector<BasicBlock *, 32> WorklistUnescaped, WorklistEscaped;
221
222 // We may enter a block and visit it thinking that no alloca has escaped yet,
223 // then see an escape point and go back around a loop edge and come back to
224 // the same block twice. Because of this, we defer setting tail on calls when
225 // we first encounter them in a block. Every entry in this list does not
226 // statically use an alloca via use-def chain analysis, but may find an alloca
227 // through other means if the block turns out to be reachable after an escape
228 // point.
229 SmallVector<CallInst *, 32> DeferredTails;
230
231 BasicBlock *BB = &F.getEntryBlock();
232 VisitType Escaped = UNESCAPED;
233 do {
234 for (auto &I : *BB) {
235 if (Tracker.EscapePoints.count(&I))
236 Escaped = ESCAPED;
237
238 CallInst *CI = dyn_cast<CallInst>(&I);
239 // A PseudoProbeInst has the IntrInaccessibleMemOnly tag hence it is
240 // considered accessing memory and will be marked as a tail call if we
241 // don't bail out here.
242 if (!CI || CI->isTailCall() || isa<DbgInfoIntrinsic>(&I) ||
243 isa<PseudoProbeInst>(&I))
244 continue;
245
246 // Bail out for intrinsic stackrestore call because it can modify
247 // unescaped allocas.
248 if (auto *II = dyn_cast<IntrinsicInst>(CI))
249 if (II->getIntrinsicID() == Intrinsic::stackrestore)
250 continue;
251
252 // Special-case operand bundles "clang.arc.attachedcall", "ptrauth", and
253 // "kcfi".
254 bool IsNoTail = CI->isNoTailCall() ||
258
259 if (!IsNoTail && CI->doesNotAccessMemory()) {
260 // A call to a readnone function whose arguments are all things computed
261 // outside this function can be marked tail. Even if you stored the
262 // alloca address into a global, a readnone function can't load the
263 // global anyhow.
264 //
265 // Note that this runs whether we know an alloca has escaped or not. If
266 // it has, then we can't trust Tracker.AllocaUsers to be accurate.
267 bool SafeToTail = true;
268 for (auto &Arg : CI->args()) {
269 if (isa<Constant>(Arg.getUser()))
270 continue;
271 if (Argument *A = dyn_cast<Argument>(Arg.getUser()))
272 if (!A->hasByValAttr())
273 continue;
274 SafeToTail = false;
275 break;
276 }
277 if (SafeToTail) {
278 using namespace ore;
279 ORE->emit([&]() {
280 return OptimizationRemark(DEBUG_TYPE, "tailcall-readnone", CI)
281 << "marked as tail call candidate (readnone)";
282 });
283 CI->setTailCall();
284 Modified = true;
285 continue;
286 }
287 }
288
289 if (!IsNoTail && Escaped == UNESCAPED && !Tracker.AllocaUsers.count(CI))
290 DeferredTails.push_back(CI);
291 }
292
293 for (auto *SuccBB : successors(BB)) {
294 auto &State = Visited[SuccBB];
295 if (State < Escaped) {
296 State = Escaped;
297 if (State == ESCAPED)
298 WorklistEscaped.push_back(SuccBB);
299 else
300 WorklistUnescaped.push_back(SuccBB);
301 }
302 }
303
304 if (!WorklistEscaped.empty()) {
305 BB = WorklistEscaped.pop_back_val();
306 Escaped = ESCAPED;
307 } else {
308 BB = nullptr;
309 while (!WorklistUnescaped.empty()) {
310 auto *NextBB = WorklistUnescaped.pop_back_val();
311 if (Visited[NextBB] == UNESCAPED) {
312 BB = NextBB;
313 Escaped = UNESCAPED;
314 break;
315 }
316 }
317 }
318 } while (BB);
319
320 for (CallInst *CI : DeferredTails) {
321 if (Visited[CI->getParent()] != ESCAPED) {
322 // If the escape point was part way through the block, calls after the
323 // escape point wouldn't have been put into DeferredTails.
324 LLVM_DEBUG(dbgs() << "Marked as tail call candidate: " << *CI << "\n");
325 CI->setTailCall();
326 Modified = true;
327 }
328 }
329
330 return Modified;
331}
332
333/// Return true if it is safe to move the specified
334/// instruction from after the call to before the call, assuming that all
335/// instructions between the call and this instruction are movable.
336///
338 if (isa<DbgInfoIntrinsic>(I))
339 return true;
340
341 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
342 if (II->getIntrinsicID() == Intrinsic::lifetime_end &&
343 llvm::findAllocaForValue(II->getArgOperand(1)))
344 return true;
345
346 // FIXME: We can move load/store/call/free instructions above the call if the
347 // call does not mod/ref the memory location being processed.
348 if (I->mayHaveSideEffects()) // This also handles volatile loads.
349 return false;
350
351 if (LoadInst *L = dyn_cast<LoadInst>(I)) {
352 // Loads may always be moved above calls without side effects.
353 if (CI->mayHaveSideEffects()) {
354 // Non-volatile loads may be moved above a call with side effects if it
355 // does not write to memory and the load provably won't trap.
356 // Writes to memory only matter if they may alias the pointer
357 // being loaded from.
358 const DataLayout &DL = L->getDataLayout();
359 if (isModSet(AA->getModRefInfo(CI, MemoryLocation::get(L))) ||
360 !isSafeToLoadUnconditionally(L->getPointerOperand(), L->getType(),
361 L->getAlign(), DL, L))
362 return false;
363 }
364 }
365
366 // Otherwise, if this is a side-effect free instruction, check to make sure
367 // that it does not use the return value of the call. If it doesn't use the
368 // return value of the call, it must only use things that are defined before
369 // the call, or movable instructions between the call and the instruction
370 // itself.
371 return !is_contained(I->operands(), CI);
372}
373
375 if (!I->isAssociative() || !I->isCommutative())
376 return false;
377
378 assert(I->getNumOperands() >= 2 &&
379 "Associative/commutative operations should have at least 2 args!");
380
381 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
382 // Accumulators must have an identity.
383 if (!ConstantExpr::getIntrinsicIdentity(II->getIntrinsicID(), I->getType()))
384 return false;
385 }
386
387 // Exactly one operand should be the result of the call instruction.
388 if ((I->getOperand(0) == CI && I->getOperand(1) == CI) ||
389 (I->getOperand(0) != CI && I->getOperand(1) != CI))
390 return false;
391
392 // The only user of this instruction we allow is a single return instruction.
393 if (!I->hasOneUse() || !isa<ReturnInst>(I->user_back()))
394 return false;
395
396 return true;
397}
398
400 while (isa<DbgInfoIntrinsic>(I))
401 ++I;
402 return &*I;
403}
404
405namespace {
406class TailRecursionEliminator {
407 Function &F;
409 AliasAnalysis *AA;
411 DomTreeUpdater &DTU;
412
413 // The below are shared state we want to have available when eliminating any
414 // calls in the function. There values should be populated by
415 // createTailRecurseLoopHeader the first time we find a call we can eliminate.
416 BasicBlock *HeaderBB = nullptr;
417 SmallVector<PHINode *, 8> ArgumentPHIs;
418
419 // PHI node to store our return value.
420 PHINode *RetPN = nullptr;
421
422 // i1 PHI node to track if we have a valid return value stored in RetPN.
423 PHINode *RetKnownPN = nullptr;
424
425 // Vector of select instructions we insereted. These selects use RetKnownPN
426 // to either propagate RetPN or select a new return value.
428
429 // The below are shared state needed when performing accumulator recursion.
430 // There values should be populated by insertAccumulator the first time we
431 // find an elimination that requires an accumulator.
432
433 // PHI node to store our current accumulated value.
434 PHINode *AccPN = nullptr;
435
436 // The instruction doing the accumulating.
437 Instruction *AccumulatorRecursionInstr = nullptr;
438
439 TailRecursionEliminator(Function &F, const TargetTransformInfo *TTI,
441 DomTreeUpdater &DTU)
442 : F(F), TTI(TTI), AA(AA), ORE(ORE), DTU(DTU) {}
443
444 CallInst *findTRECandidate(BasicBlock *BB);
445
446 void createTailRecurseLoopHeader(CallInst *CI);
447
448 void insertAccumulator(Instruction *AccRecInstr);
449
450 bool eliminateCall(CallInst *CI);
451
452 void cleanupAndFinalize();
453
454 bool processBlock(BasicBlock &BB);
455
456 void copyByValueOperandIntoLocalTemp(CallInst *CI, int OpndIdx);
457
458 void copyLocalTempOfByValueOperandIntoArguments(CallInst *CI, int OpndIdx);
459
460public:
461 static bool eliminate(Function &F, const TargetTransformInfo *TTI,
463 DomTreeUpdater &DTU);
464};
465} // namespace
466
467CallInst *TailRecursionEliminator::findTRECandidate(BasicBlock *BB) {
468 Instruction *TI = BB->getTerminator();
469
470 if (&BB->front() == TI) // Make sure there is something before the terminator.
471 return nullptr;
472
473 // Scan backwards from the return, checking to see if there is a tail call in
474 // this block. If so, set CI to it.
475 CallInst *CI = nullptr;
476 BasicBlock::iterator BBI(TI);
477 while (true) {
478 CI = dyn_cast<CallInst>(BBI);
479 if (CI && CI->getCalledFunction() == &F)
480 break;
481
482 if (BBI == BB->begin())
483 return nullptr; // Didn't find a potential tail call.
484 --BBI;
485 }
486
487 assert((!CI->isTailCall() || !CI->isNoTailCall()) &&
488 "Incompatible call site attributes(Tail,NoTail)");
489 if (!CI->isTailCall())
490 return nullptr;
491
492 // As a special case, detect code like this:
493 // double fabs(double f) { return __builtin_fabs(f); } // a 'fabs' call
494 // and disable this xform in this case, because the code generator will
495 // lower the call to fabs into inline code.
496 if (BB == &F.getEntryBlock() &&
497 firstNonDbg(BB->front().getIterator()) == CI &&
498 firstNonDbg(std::next(BB->begin())) == TI && CI->getCalledFunction() &&
500 // A single-block function with just a call and a return. Check that
501 // the arguments match.
502 auto I = CI->arg_begin(), E = CI->arg_end();
503 Function::arg_iterator FI = F.arg_begin(), FE = F.arg_end();
504 for (; I != E && FI != FE; ++I, ++FI)
505 if (*I != &*FI) break;
506 if (I == E && FI == FE)
507 return nullptr;
508 }
509
510 return CI;
511}
512
513void TailRecursionEliminator::createTailRecurseLoopHeader(CallInst *CI) {
514 HeaderBB = &F.getEntryBlock();
515 BasicBlock *NewEntry = BasicBlock::Create(F.getContext(), "", &F, HeaderBB);
516 NewEntry->takeName(HeaderBB);
517 HeaderBB->setName("tailrecurse");
518 BranchInst::Create(HeaderBB, NewEntry);
519 // If the new branch preserves the debug location of CI, it could result in
520 // misleading stepping, if CI is located in a conditional branch.
521 // So, here we don't give any debug location to the new branch.
522
523 // Move all fixed sized allocas from HeaderBB to NewEntry.
524 for (BasicBlock::iterator OEBI = HeaderBB->begin(), E = HeaderBB->end(),
525 NEBI = NewEntry->begin();
526 OEBI != E;)
527 if (AllocaInst *AI = dyn_cast<AllocaInst>(OEBI++))
528 if (isa<ConstantInt>(AI->getArraySize()))
529 AI->moveBefore(&*NEBI);
530
531 // Now that we have created a new block, which jumps to the entry
532 // block, insert a PHI node for each argument of the function.
533 // For now, we initialize each PHI to only have the real arguments
534 // which are passed in.
535 BasicBlock::iterator InsertPos = HeaderBB->begin();
536 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
537 PHINode *PN = PHINode::Create(I->getType(), 2, I->getName() + ".tr");
538 PN->insertBefore(InsertPos);
539 I->replaceAllUsesWith(PN); // Everyone use the PHI node now!
540 PN->addIncoming(&*I, NewEntry);
541 ArgumentPHIs.push_back(PN);
542 }
543
544 // If the function doen't return void, create the RetPN and RetKnownPN PHI
545 // nodes to track our return value. We initialize RetPN with poison and
546 // RetKnownPN with false since we can't know our return value at function
547 // entry.
548 Type *RetType = F.getReturnType();
549 if (!RetType->isVoidTy()) {
550 Type *BoolType = Type::getInt1Ty(F.getContext());
551 RetPN = PHINode::Create(RetType, 2, "ret.tr");
552 RetPN->insertBefore(InsertPos);
553 RetKnownPN = PHINode::Create(BoolType, 2, "ret.known.tr");
554 RetKnownPN->insertBefore(InsertPos);
555
556 RetPN->addIncoming(PoisonValue::get(RetType), NewEntry);
557 RetKnownPN->addIncoming(ConstantInt::getFalse(BoolType), NewEntry);
558 }
559
560 // The entry block was changed from HeaderBB to NewEntry.
561 // The forward DominatorTree needs to be recalculated when the EntryBB is
562 // changed. In this corner-case we recalculate the entire tree.
563 DTU.recalculate(*NewEntry->getParent());
564}
565
566void TailRecursionEliminator::insertAccumulator(Instruction *AccRecInstr) {
567 assert(!AccPN && "Trying to insert multiple accumulators");
568
569 AccumulatorRecursionInstr = AccRecInstr;
570
571 // Start by inserting a new PHI node for the accumulator.
572 pred_iterator PB = pred_begin(HeaderBB), PE = pred_end(HeaderBB);
573 AccPN = PHINode::Create(F.getReturnType(), std::distance(PB, PE) + 1,
574 "accumulator.tr");
575 AccPN->insertBefore(HeaderBB->begin());
576
577 // Loop over all of the predecessors of the tail recursion block. For the
578 // real entry into the function we seed the PHI with the identity constant for
579 // the accumulation operation. For any other existing branches to this block
580 // (due to other tail recursions eliminated) the accumulator is not modified.
581 // Because we haven't added the branch in the current block to HeaderBB yet,
582 // it will not show up as a predecessor.
583 for (pred_iterator PI = PB; PI != PE; ++PI) {
584 BasicBlock *P = *PI;
585 if (P == &F.getEntryBlock()) {
586 Constant *Identity =
587 ConstantExpr::getIdentity(AccRecInstr, AccRecInstr->getType());
588 AccPN->addIncoming(Identity, P);
589 } else {
590 AccPN->addIncoming(AccPN, P);
591 }
592 }
593
594 ++NumAccumAdded;
595}
596
597// Creates a copy of contents of ByValue operand of the specified
598// call instruction into the newly created temporarily variable.
599void TailRecursionEliminator::copyByValueOperandIntoLocalTemp(CallInst *CI,
600 int OpndIdx) {
601 Type *AggTy = CI->getParamByValType(OpndIdx);
602 assert(AggTy);
603 const DataLayout &DL = F.getDataLayout();
604
605 // Get alignment of byVal operand.
606 Align Alignment(CI->getParamAlign(OpndIdx).valueOrOne());
607
608 // Create alloca for temporarily byval operands.
609 // Put alloca into the entry block.
610 Value *NewAlloca = new AllocaInst(
611 AggTy, DL.getAllocaAddrSpace(), nullptr, Alignment,
612 CI->getArgOperand(OpndIdx)->getName(), F.getEntryBlock().begin());
613
614 IRBuilder<> Builder(CI);
615 Value *Size = Builder.getInt64(DL.getTypeAllocSize(AggTy));
616
617 // Copy data from byvalue operand into the temporarily variable.
618 Builder.CreateMemCpy(NewAlloca, /*DstAlign*/ Alignment,
619 CI->getArgOperand(OpndIdx),
620 /*SrcAlign*/ Alignment, Size);
621 CI->setArgOperand(OpndIdx, NewAlloca);
622}
623
624// Creates a copy from temporarily variable(keeping value of ByVal argument)
625// into the corresponding function argument location.
626void TailRecursionEliminator::copyLocalTempOfByValueOperandIntoArguments(
627 CallInst *CI, int OpndIdx) {
628 Type *AggTy = CI->getParamByValType(OpndIdx);
629 assert(AggTy);
630 const DataLayout &DL = F.getDataLayout();
631
632 // Get alignment of byVal operand.
633 Align Alignment(CI->getParamAlign(OpndIdx).valueOrOne());
634
635 IRBuilder<> Builder(CI);
636 Value *Size = Builder.getInt64(DL.getTypeAllocSize(AggTy));
637
638 // Copy data from the temporarily variable into corresponding
639 // function argument location.
640 Builder.CreateMemCpy(F.getArg(OpndIdx), /*DstAlign*/ Alignment,
641 CI->getArgOperand(OpndIdx),
642 /*SrcAlign*/ Alignment, Size);
643}
644
645bool TailRecursionEliminator::eliminateCall(CallInst *CI) {
646 ReturnInst *Ret = cast<ReturnInst>(CI->getParent()->getTerminator());
647
648 // Ok, we found a potential tail call. We can currently only transform the
649 // tail call if all of the instructions between the call and the return are
650 // movable to above the call itself, leaving the call next to the return.
651 // Check that this is the case now.
652 Instruction *AccRecInstr = nullptr;
653 BasicBlock::iterator BBI(CI);
654 for (++BBI; &*BBI != Ret; ++BBI) {
655 if (canMoveAboveCall(&*BBI, CI, AA))
656 continue;
657
658 // If we can't move the instruction above the call, it might be because it
659 // is an associative and commutative operation that could be transformed
660 // using accumulator recursion elimination. Check to see if this is the
661 // case, and if so, remember which instruction accumulates for later.
662 if (AccPN || !canTransformAccumulatorRecursion(&*BBI, CI))
663 return false; // We cannot eliminate the tail recursion!
664
665 // Yes, this is accumulator recursion. Remember which instruction
666 // accumulates.
667 AccRecInstr = &*BBI;
668 }
669
670 BasicBlock *BB = Ret->getParent();
671
672 using namespace ore;
673 ORE->emit([&]() {
674 return OptimizationRemark(DEBUG_TYPE, "tailcall-recursion", CI)
675 << "transforming tail recursion into loop";
676 });
677
678 // OK! We can transform this tail call. If this is the first one found,
679 // create the new entry block, allowing us to branch back to the old entry.
680 if (!HeaderBB)
681 createTailRecurseLoopHeader(CI);
682
683 // Copy values of ByVal operands into local temporarily variables.
684 for (unsigned I = 0, E = CI->arg_size(); I != E; ++I) {
685 if (CI->isByValArgument(I))
686 copyByValueOperandIntoLocalTemp(CI, I);
687 }
688
689 // Ok, now that we know we have a pseudo-entry block WITH all of the
690 // required PHI nodes, add entries into the PHI node for the actual
691 // parameters passed into the tail-recursive call.
692 for (unsigned I = 0, E = CI->arg_size(); I != E; ++I) {
693 if (CI->isByValArgument(I)) {
694 copyLocalTempOfByValueOperandIntoArguments(CI, I);
695 // When eliminating a tail call, we modify the values of the arguments.
696 // Therefore, if the byval parameter has a readonly attribute, we have to
697 // remove it. It is safe because, from the perspective of a caller, the
698 // byval parameter is always treated as "readonly," even if the readonly
699 // attribute is removed.
700 F.removeParamAttr(I, Attribute::ReadOnly);
701 ArgumentPHIs[I]->addIncoming(F.getArg(I), BB);
702 } else
703 ArgumentPHIs[I]->addIncoming(CI->getArgOperand(I), BB);
704 }
705
706 if (AccRecInstr) {
707 insertAccumulator(AccRecInstr);
708
709 // Rewrite the accumulator recursion instruction so that it does not use
710 // the result of the call anymore, instead, use the PHI node we just
711 // inserted.
712 AccRecInstr->setOperand(AccRecInstr->getOperand(0) != CI, AccPN);
713 }
714
715 // Update our return value tracking
716 if (RetPN) {
717 if (Ret->getReturnValue() == CI || AccRecInstr) {
718 // Defer selecting a return value
719 RetPN->addIncoming(RetPN, BB);
720 RetKnownPN->addIncoming(RetKnownPN, BB);
721 } else {
722 // We found a return value we want to use, insert a select instruction to
723 // select it if we don't already know what our return value will be and
724 // store the result in our return value PHI node.
725 SelectInst *SI =
726 SelectInst::Create(RetKnownPN, RetPN, Ret->getReturnValue(),
727 "current.ret.tr", Ret->getIterator());
728 RetSelects.push_back(SI);
729
730 RetPN->addIncoming(SI, BB);
731 RetKnownPN->addIncoming(ConstantInt::getTrue(RetKnownPN->getType()), BB);
732 }
733
734 if (AccPN)
735 AccPN->addIncoming(AccRecInstr ? AccRecInstr : AccPN, BB);
736 }
737
738 // Now that all of the PHI nodes are in place, remove the call and
739 // ret instructions, replacing them with an unconditional branch.
740 BranchInst *NewBI = BranchInst::Create(HeaderBB, Ret->getIterator());
741 NewBI->setDebugLoc(CI->getDebugLoc());
742
743 Ret->eraseFromParent(); // Remove return.
744 CI->eraseFromParent(); // Remove call.
745 DTU.applyUpdates({{DominatorTree::Insert, BB, HeaderBB}});
746 ++NumEliminated;
747 return true;
748}
749
750void TailRecursionEliminator::cleanupAndFinalize() {
751 // If we eliminated any tail recursions, it's possible that we inserted some
752 // silly PHI nodes which just merge an initial value (the incoming operand)
753 // with themselves. Check to see if we did and clean up our mess if so. This
754 // occurs when a function passes an argument straight through to its tail
755 // call.
756 for (PHINode *PN : ArgumentPHIs) {
757 // If the PHI Node is a dynamic constant, replace it with the value it is.
758 if (Value *PNV = simplifyInstruction(PN, F.getDataLayout())) {
759 PN->replaceAllUsesWith(PNV);
760 PN->eraseFromParent();
761 }
762 }
763
764 if (RetPN) {
765 if (RetSelects.empty()) {
766 // If we didn't insert any select instructions, then we know we didn't
767 // store a return value and we can remove the PHI nodes we inserted.
768 RetPN->dropAllReferences();
769 RetPN->eraseFromParent();
770
771 RetKnownPN->dropAllReferences();
772 RetKnownPN->eraseFromParent();
773
774 if (AccPN) {
775 // We need to insert a copy of our accumulator instruction before any
776 // return in the function, and return its result instead.
777 Instruction *AccRecInstr = AccumulatorRecursionInstr;
778 for (BasicBlock &BB : F) {
779 ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator());
780 if (!RI)
781 continue;
782
783 Instruction *AccRecInstrNew = AccRecInstr->clone();
784 AccRecInstrNew->setName("accumulator.ret.tr");
785 AccRecInstrNew->setOperand(AccRecInstr->getOperand(0) == AccPN,
786 RI->getOperand(0));
787 AccRecInstrNew->insertBefore(RI);
788 AccRecInstrNew->dropLocation();
789 RI->setOperand(0, AccRecInstrNew);
790 }
791 }
792 } else {
793 // We need to insert a select instruction before any return left in the
794 // function to select our stored return value if we have one.
795 for (BasicBlock &BB : F) {
796 ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator());
797 if (!RI)
798 continue;
799
800 SelectInst *SI =
801 SelectInst::Create(RetKnownPN, RetPN, RI->getOperand(0),
802 "current.ret.tr", RI->getIterator());
803 RetSelects.push_back(SI);
804 RI->setOperand(0, SI);
805 }
806
807 if (AccPN) {
808 // We need to insert a copy of our accumulator instruction before any
809 // of the selects we inserted, and select its result instead.
810 Instruction *AccRecInstr = AccumulatorRecursionInstr;
811 for (SelectInst *SI : RetSelects) {
812 Instruction *AccRecInstrNew = AccRecInstr->clone();
813 AccRecInstrNew->setName("accumulator.ret.tr");
814 AccRecInstrNew->setOperand(AccRecInstr->getOperand(0) == AccPN,
815 SI->getFalseValue());
816 AccRecInstrNew->insertBefore(SI);
817 AccRecInstrNew->dropLocation();
818 SI->setFalseValue(AccRecInstrNew);
819 }
820 }
821 }
822 }
823}
824
825bool TailRecursionEliminator::processBlock(BasicBlock &BB) {
826 Instruction *TI = BB.getTerminator();
827
828 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
829 if (BI->isConditional())
830 return false;
831
832 BasicBlock *Succ = BI->getSuccessor(0);
833 ReturnInst *Ret = dyn_cast<ReturnInst>(Succ->getFirstNonPHIOrDbg(true));
834
835 if (!Ret)
836 return false;
837
838 CallInst *CI = findTRECandidate(&BB);
839
840 if (!CI)
841 return false;
842
843 LLVM_DEBUG(dbgs() << "FOLDING: " << *Succ
844 << "INTO UNCOND BRANCH PRED: " << BB);
845 FoldReturnIntoUncondBranch(Ret, Succ, &BB, &DTU);
846 ++NumRetDuped;
847
848 // If all predecessors of Succ have been eliminated by
849 // FoldReturnIntoUncondBranch, delete it. It is important to empty it,
850 // because the ret instruction in there is still using a value which
851 // eliminateCall will attempt to remove. This block can only contain
852 // instructions that can't have uses, therefore it is safe to remove.
853 if (pred_empty(Succ))
854 DTU.deleteBB(Succ);
855
856 eliminateCall(CI);
857 return true;
858 } else if (isa<ReturnInst>(TI)) {
859 CallInst *CI = findTRECandidate(&BB);
860
861 if (CI)
862 return eliminateCall(CI);
863 }
864
865 return false;
866}
867
868bool TailRecursionEliminator::eliminate(Function &F,
870 AliasAnalysis *AA,
872 DomTreeUpdater &DTU) {
873 if (F.getFnAttribute("disable-tail-calls").getValueAsBool())
874 return false;
875
876 bool MadeChange = false;
877 MadeChange |= markTails(F, ORE);
878
879 // If this function is a varargs function, we won't be able to PHI the args
880 // right, so don't even try to convert it...
881 if (F.getFunctionType()->isVarArg())
882 return MadeChange;
883
884 if (!canTRE(F))
885 return MadeChange;
886
887 // Change any tail recursive calls to loops.
888 TailRecursionEliminator TRE(F, TTI, AA, ORE, DTU);
889
890 for (BasicBlock &BB : F)
891 MadeChange |= TRE.processBlock(BB);
892
893 TRE.cleanupAndFinalize();
894
895 return MadeChange;
896}
897
898namespace {
899struct TailCallElim : public FunctionPass {
900 static char ID; // Pass identification, replacement for typeid
901 TailCallElim() : FunctionPass(ID) {
903 }
904
905 void getAnalysisUsage(AnalysisUsage &AU) const override {
912 }
913
914 bool runOnFunction(Function &F) override {
915 if (skipFunction(F))
916 return false;
917
918 auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
919 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
920 auto *PDTWP = getAnalysisIfAvailable<PostDominatorTreeWrapperPass>();
921 auto *PDT = PDTWP ? &PDTWP->getPostDomTree() : nullptr;
922 // There is no noticable performance difference here between Lazy and Eager
923 // UpdateStrategy based on some test results. It is feasible to switch the
924 // UpdateStrategy to Lazy if we find it profitable later.
925 DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Eager);
926
927 return TailRecursionEliminator::eliminate(
928 F, &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F),
929 &getAnalysis<AAResultsWrapperPass>().getAAResults(),
930 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(), DTU);
931 }
932};
933}
934
935char TailCallElim::ID = 0;
936INITIALIZE_PASS_BEGIN(TailCallElim, "tailcallelim", "Tail Call Elimination",
937 false, false)
942
943// Public interface to the TailCallElimination pass
945 return new TailCallElim();
946}
947
950
956 // There is no noticable performance difference here between Lazy and Eager
957 // UpdateStrategy based on some test results. It is feasible to switch the
958 // UpdateStrategy to Lazy if we find it profitable later.
959 DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Eager);
960 bool Changed = TailRecursionEliminator::eliminate(F, &TTI, &AA, &ORE, DTU);
961
962 if (!Changed)
963 return PreservedAnalyses::all();
967 return PA;
968}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Size
This is the interface for a simple mod/ref and alias analysis over globals.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Module.h This file contains the declarations for the Module class.
uint64_t IntrinsicInst * II
#define P(N)
PassBuilder PB(Machine, PassOpts->PTO, std::nullopt, &PIC)
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:57
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallPtrSet class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
static bool canTRE(Function &F)
Scan the specified function for alloca instructions.
static bool canMoveAboveCall(Instruction *I, CallInst *CI, AliasAnalysis *AA)
Return true if it is safe to move the specified instruction from after the call to before the call,...
Tail Call Elimination
static Instruction * firstNonDbg(BasicBlock::iterator I)
#define DEBUG_TYPE
static bool canTransformAccumulatorRecursion(Instruction *I, CallInst *CI)
static bool markTails(Function &F, OptimizationRemarkEmitter *ORE)
This pass exposes codegen information to IR-level passes.
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
ModRefInfo getModRefInfo(const Instruction *I, const std::optional< MemoryLocation > &OptLoc)
Check whether or not an instruction may read or write the optionally specified memory location.
an instruction to allocate memory on the stack
Definition: Instructions.h:61
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
Definition: PassManager.h:424
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:405
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:448
const Instruction & front() const
Definition: BasicBlock.h:471
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition: BasicBlock.h:212
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
const Instruction * getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
Definition: BasicBlock.cpp:386
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:177
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1236
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1465
bool doesNotAccessMemory(unsigned OpNo) const
Definition: InstrTypes.h:1810
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1385
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
Definition: InstrTypes.h:1774
MaybeAlign getParamAlign(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Definition: InstrTypes.h:1838
bool onlyReadsMemory(unsigned OpNo) const
Definition: InstrTypes.h:1816
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Definition: InstrTypes.h:1856
bool hasOperandBundlesOtherThan(ArrayRef< uint32_t > IDs) const
Return true if this operand bundle user contains operand bundles with tags other than those specified...
Definition: InstrTypes.h:2217
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1410
void setArgOperand(unsigned i, Value *v)
Definition: InstrTypes.h:1415
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1391
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1401
unsigned arg_size() const
Definition: InstrTypes.h:1408
This class represents a function call, abstracting a target machine's calling convention.
bool isNoTailCall() const
bool isTailCall() const
void setTailCall(bool IsTc=true)
static Constant * getIdentity(Instruction *I, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary or intrinsic Instruction.
Definition: Constants.cpp:2727
static Constant * getIntrinsicIdentity(Intrinsic::ID, Type *Ty)
Definition: Constants.cpp:2710
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:850
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:857
This is an important base class in LLVM.
Definition: Constant.h:42
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:109
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:317
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:310
virtual bool runOnFunction(Function &F)=0
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition: Pass.cpp:178
Legacy wrapper pass to provide the GlobalsAAResult object.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2674
Instruction * clone() const
Create a copy of 'this' instruction that is identical in all ways except the following:
void dropLocation()
Drop the instruction's debug location.
Definition: DebugInfo.cpp:967
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
Definition: Instruction.cpp:97
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:466
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:92
bool mayHaveSideEffects() const LLVM_READONLY
Return true if the instruction may have side effects.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:463
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
An instruction for reading from memory.
Definition: Instructions.h:174
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
OptimizationRemarkEmitter legacy analysis pass.
The optimization diagnostic interface.
void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for applied optimization remarks.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: Pass.cpp:98
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1852
Analysis pass which computes a PostDominatorTree.
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
void preserve()
Mark an analysis as preserved.
Definition: Analysis.h:131
Return a value (possibly void), from a function.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:368
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:503
bool empty() const
Definition: SmallVector.h:95
void push_back(const T &Elt)
Definition: SmallVector.h:427
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1210
Analysis pass providing the TargetTransformInfo.
Wrapper pass for TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
bool isLoweredToCall(const Function *F) const
Test whether calls to a function lower to actual program function calls.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static IntegerType * getInt1Ty(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
Value * getOperand(unsigned i) const
Definition: User.h:169
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
pred_iterator pred_end(BasicBlock *BB)
Definition: CFG.h:114
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
FunctionPass * createTailCallEliminationPass()
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
auto successors(const MachineBasicBlock *BB)
void initializeTailCallElimPass(PassRegistry &)
ReturnInst * FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB, BasicBlock *Pred, DomTreeUpdater *DTU=nullptr)
This method duplicates the specified return instruction into a predecessor which ends in an unconditi...
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
pred_iterator pred_begin(BasicBlock *BB)
Definition: CFG.h:110
bool isModSet(const ModRefInfo MRI)
Definition: ModRef.h:48
bool isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, Instruction *ScanFrom, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if we know that executing a load from this value cannot trap.
Definition: Loads.cpp:372
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1879
bool pred_empty(const BasicBlock *BB)
Definition: CFG.h:118
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)