clang  3.7.0
CGExprScalar.cpp
Go to the documentation of this file.
1 //===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CGCXXABI.h"
16 #include "CGDebugInfo.h"
17 #include "CGObjCRuntime.h"
18 #include "CodeGenModule.h"
19 #include "TargetInfo.h"
20 #include "clang/AST/ASTContext.h"
21 #include "clang/AST/DeclObjC.h"
22 #include "clang/AST/RecordLayout.h"
23 #include "clang/AST/StmtVisitor.h"
24 #include "clang/Basic/TargetInfo.h"
26 #include "llvm/IR/CFG.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/IR/GlobalVariable.h"
31 #include "llvm/IR/Intrinsics.h"
32 #include "llvm/IR/Module.h"
33 #include <cstdarg>
34 
35 using namespace clang;
36 using namespace CodeGen;
37 using llvm::Value;
38 
39 //===----------------------------------------------------------------------===//
40 // Scalar Expression Emitter
41 //===----------------------------------------------------------------------===//
42 
43 namespace {
44 struct BinOpInfo {
45  Value *LHS;
46  Value *RHS;
47  QualType Ty; // Computation Type.
48  BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
49  bool FPContractable;
50  const Expr *E; // Entire expr, for error unsupported. May not be binop.
51 };
52 
53 static bool MustVisitNullValue(const Expr *E) {
54  // If a null pointer expression's type is the C++0x nullptr_t, then
55  // it's not necessarily a simple constant and it must be evaluated
56  // for its potential side effects.
57  return E->getType()->isNullPtrType();
58 }
59 
60 class ScalarExprEmitter
61  : public StmtVisitor<ScalarExprEmitter, Value*> {
62  CodeGenFunction &CGF;
64  bool IgnoreResultAssign;
65  llvm::LLVMContext &VMContext;
66 public:
67 
68  ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
69  : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
70  VMContext(cgf.getLLVMContext()) {
71  }
72 
73  //===--------------------------------------------------------------------===//
74  // Utilities
75  //===--------------------------------------------------------------------===//
76 
77  bool TestAndClearIgnoreResultAssign() {
78  bool I = IgnoreResultAssign;
79  IgnoreResultAssign = false;
80  return I;
81  }
82 
83  llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
84  LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
85  LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
86  return CGF.EmitCheckedLValue(E, TCK);
87  }
88 
89  void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,
90  const BinOpInfo &Info);
91 
92  Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
93  return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
94  }
95 
96  void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
97  const AlignValueAttr *AVAttr = nullptr;
98  if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
99  const ValueDecl *VD = DRE->getDecl();
100 
101  if (VD->getType()->isReferenceType()) {
102  if (const auto *TTy =
103  dyn_cast<TypedefType>(VD->getType().getNonReferenceType()))
104  AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
105  } else {
106  // Assumptions for function parameters are emitted at the start of the
107  // function, so there is no need to repeat that here.
108  if (isa<ParmVarDecl>(VD))
109  return;
110 
111  AVAttr = VD->getAttr<AlignValueAttr>();
112  }
113  }
114 
115  if (!AVAttr)
116  if (const auto *TTy =
117  dyn_cast<TypedefType>(E->getType()))
118  AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
119 
120  if (!AVAttr)
121  return;
122 
123  Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
124  llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
125  CGF.EmitAlignmentAssumption(V, AlignmentCI->getZExtValue());
126  }
127 
128  /// EmitLoadOfLValue - Given an expression with complex type that represents a
129  /// value l-value, this method emits the address of the l-value, then loads
130  /// and returns the result.
131  Value *EmitLoadOfLValue(const Expr *E) {
132  Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
133  E->getExprLoc());
134 
135  EmitLValueAlignmentAssumption(E, V);
136  return V;
137  }
138 
139  /// EmitConversionToBool - Convert the specified expression value to a
140  /// boolean (i1) truth value. This is equivalent to "Val != 0".
141  Value *EmitConversionToBool(Value *Src, QualType DstTy);
142 
143  /// \brief Emit a check that a conversion to or from a floating-point type
144  /// does not overflow.
145  void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
146  Value *Src, QualType SrcType,
147  QualType DstType, llvm::Type *DstTy);
148 
149  /// EmitScalarConversion - Emit a conversion from the specified type to the
150  /// specified destination type, both of which are LLVM scalar types.
151  Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy);
152 
153  /// EmitComplexToScalarConversion - Emit a conversion from the specified
154  /// complex type to the specified destination type, where the destination type
155  /// is an LLVM scalar type.
156  Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
157  QualType SrcTy, QualType DstTy);
158 
159  /// EmitNullValue - Emit a value that corresponds to null for the given type.
160  Value *EmitNullValue(QualType Ty);
161 
162  /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
163  Value *EmitFloatToBoolConversion(Value *V) {
164  // Compare against 0.0 for fp scalars.
165  llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
166  return Builder.CreateFCmpUNE(V, Zero, "tobool");
167  }
168 
169  /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
170  Value *EmitPointerToBoolConversion(Value *V) {
171  Value *Zero = llvm::ConstantPointerNull::get(
172  cast<llvm::PointerType>(V->getType()));
173  return Builder.CreateICmpNE(V, Zero, "tobool");
174  }
175 
176  Value *EmitIntToBoolConversion(Value *V) {
177  // Because of the type rules of C, we often end up computing a
178  // logical value, then zero extending it to int, then wanting it
179  // as a logical value again. Optimize this common case.
180  if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
181  if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
182  Value *Result = ZI->getOperand(0);
183  // If there aren't any more uses, zap the instruction to save space.
184  // Note that there can be more uses, for example if this
185  // is the result of an assignment.
186  if (ZI->use_empty())
187  ZI->eraseFromParent();
188  return Result;
189  }
190  }
191 
192  return Builder.CreateIsNotNull(V, "tobool");
193  }
194 
195  //===--------------------------------------------------------------------===//
196  // Visitor Methods
197  //===--------------------------------------------------------------------===//
198 
199  Value *Visit(Expr *E) {
200  ApplyDebugLocation DL(CGF, E);
202  }
203 
204  Value *VisitStmt(Stmt *S) {
205  S->dump(CGF.getContext().getSourceManager());
206  llvm_unreachable("Stmt can't have complex result type!");
207  }
208  Value *VisitExpr(Expr *S);
209 
210  Value *VisitParenExpr(ParenExpr *PE) {
211  return Visit(PE->getSubExpr());
212  }
213  Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
214  return Visit(E->getReplacement());
215  }
216  Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
217  return Visit(GE->getResultExpr());
218  }
219 
220  // Leaves.
221  Value *VisitIntegerLiteral(const IntegerLiteral *E) {
222  return Builder.getInt(E->getValue());
223  }
224  Value *VisitFloatingLiteral(const FloatingLiteral *E) {
225  return llvm::ConstantFP::get(VMContext, E->getValue());
226  }
227  Value *VisitCharacterLiteral(const CharacterLiteral *E) {
228  return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
229  }
230  Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
231  return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
232  }
233  Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
234  return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
235  }
236  Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
237  return EmitNullValue(E->getType());
238  }
239  Value *VisitGNUNullExpr(const GNUNullExpr *E) {
240  return EmitNullValue(E->getType());
241  }
242  Value *VisitOffsetOfExpr(OffsetOfExpr *E);
243  Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
244  Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
245  llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
246  return Builder.CreateBitCast(V, ConvertType(E->getType()));
247  }
248 
249  Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
250  return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
251  }
252 
253  Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
254  return CGF.EmitPseudoObjectRValue(E).getScalarVal();
255  }
256 
257  Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
258  if (E->isGLValue())
259  return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E), E->getExprLoc());
260 
261  // Otherwise, assume the mapping is the scalar directly.
262  return CGF.getOpaqueRValueMapping(E).getScalarVal();
263  }
264 
265  // l-values.
266  Value *VisitDeclRefExpr(DeclRefExpr *E) {
267  if (CodeGenFunction::ConstantEmission result = CGF.tryEmitAsConstant(E)) {
268  if (result.isReference())
269  return EmitLoadOfLValue(result.getReferenceLValue(CGF, E),
270  E->getExprLoc());
271  return result.getValue();
272  }
273  return EmitLoadOfLValue(E);
274  }
275 
276  Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
277  return CGF.EmitObjCSelectorExpr(E);
278  }
279  Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
280  return CGF.EmitObjCProtocolExpr(E);
281  }
282  Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
283  return EmitLoadOfLValue(E);
284  }
285  Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
286  if (E->getMethodDecl() &&
288  return EmitLoadOfLValue(E);
289  return CGF.EmitObjCMessageExpr(E).getScalarVal();
290  }
291 
292  Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
293  LValue LV = CGF.EmitObjCIsaExpr(E);
294  Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal();
295  return V;
296  }
297 
298  Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
299  Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
300  Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
301  Value *VisitMemberExpr(MemberExpr *E);
302  Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
303  Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
304  return EmitLoadOfLValue(E);
305  }
306 
307  Value *VisitInitListExpr(InitListExpr *E);
308 
309  Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
310  return EmitNullValue(E->getType());
311  }
312  Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
313  if (E->getType()->isVariablyModifiedType())
314  CGF.EmitVariablyModifiedType(E->getType());
315 
316  if (CGDebugInfo *DI = CGF.getDebugInfo())
317  DI->EmitExplicitCastType(E->getType());
318 
319  return VisitCastExpr(E);
320  }
321  Value *VisitCastExpr(CastExpr *E);
322 
323  Value *VisitCallExpr(const CallExpr *E) {
324  if (E->getCallReturnType(CGF.getContext())->isReferenceType())
325  return EmitLoadOfLValue(E);
326 
327  Value *V = CGF.EmitCallExpr(E).getScalarVal();
328 
329  EmitLValueAlignmentAssumption(E, V);
330  return V;
331  }
332 
333  Value *VisitStmtExpr(const StmtExpr *E);
334 
335  // Unary Operators.
336  Value *VisitUnaryPostDec(const UnaryOperator *E) {
337  LValue LV = EmitLValue(E->getSubExpr());
338  return EmitScalarPrePostIncDec(E, LV, false, false);
339  }
340  Value *VisitUnaryPostInc(const UnaryOperator *E) {
341  LValue LV = EmitLValue(E->getSubExpr());
342  return EmitScalarPrePostIncDec(E, LV, true, false);
343  }
344  Value *VisitUnaryPreDec(const UnaryOperator *E) {
345  LValue LV = EmitLValue(E->getSubExpr());
346  return EmitScalarPrePostIncDec(E, LV, false, true);
347  }
348  Value *VisitUnaryPreInc(const UnaryOperator *E) {
349  LValue LV = EmitLValue(E->getSubExpr());
350  return EmitScalarPrePostIncDec(E, LV, true, true);
351  }
352 
353  llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
354  llvm::Value *InVal,
355  bool IsInc);
356 
357  llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
358  bool isInc, bool isPre);
359 
360 
361  Value *VisitUnaryAddrOf(const UnaryOperator *E) {
362  if (isa<MemberPointerType>(E->getType())) // never sugared
363  return CGF.CGM.getMemberPointerConstant(E);
364 
365  return EmitLValue(E->getSubExpr()).getAddress();
366  }
367  Value *VisitUnaryDeref(const UnaryOperator *E) {
368  if (E->getType()->isVoidType())
369  return Visit(E->getSubExpr()); // the actual value should be unused
370  return EmitLoadOfLValue(E);
371  }
372  Value *VisitUnaryPlus(const UnaryOperator *E) {
373  // This differs from gcc, though, most likely due to a bug in gcc.
374  TestAndClearIgnoreResultAssign();
375  return Visit(E->getSubExpr());
376  }
377  Value *VisitUnaryMinus (const UnaryOperator *E);
378  Value *VisitUnaryNot (const UnaryOperator *E);
379  Value *VisitUnaryLNot (const UnaryOperator *E);
380  Value *VisitUnaryReal (const UnaryOperator *E);
381  Value *VisitUnaryImag (const UnaryOperator *E);
382  Value *VisitUnaryExtension(const UnaryOperator *E) {
383  return Visit(E->getSubExpr());
384  }
385 
386  // C++
387  Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
388  return EmitLoadOfLValue(E);
389  }
390 
391  Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
392  return Visit(DAE->getExpr());
393  }
394  Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
396  return Visit(DIE->getExpr());
397  }
398  Value *VisitCXXThisExpr(CXXThisExpr *TE) {
399  return CGF.LoadCXXThis();
400  }
401 
402  Value *VisitExprWithCleanups(ExprWithCleanups *E) {
403  CGF.enterFullExpression(E);
405  return Visit(E->getSubExpr());
406  }
407  Value *VisitCXXNewExpr(const CXXNewExpr *E) {
408  return CGF.EmitCXXNewExpr(E);
409  }
410  Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
411  CGF.EmitCXXDeleteExpr(E);
412  return nullptr;
413  }
414 
415  Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
416  return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
417  }
418 
419  Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
420  return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
421  }
422 
423  Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
424  return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
425  }
426 
427  Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
428  // C++ [expr.pseudo]p1:
429  // The result shall only be used as the operand for the function call
430  // operator (), and the result of such a call has type void. The only
431  // effect is the evaluation of the postfix-expression before the dot or
432  // arrow.
433  CGF.EmitScalarExpr(E->getBase());
434  return nullptr;
435  }
436 
437  Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
438  return EmitNullValue(E->getType());
439  }
440 
441  Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
442  CGF.EmitCXXThrowExpr(E);
443  return nullptr;
444  }
445 
446  Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
447  return Builder.getInt1(E->getValue());
448  }
449 
450  // Binary Operators.
451  Value *EmitMul(const BinOpInfo &Ops) {
452  if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
453  switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
455  return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
457  if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
458  return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
459  // Fall through.
461  return EmitOverflowCheckedBinOp(Ops);
462  }
463  }
464 
465  if (Ops.Ty->isUnsignedIntegerType() &&
466  CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow))
467  return EmitOverflowCheckedBinOp(Ops);
468 
469  if (Ops.LHS->getType()->isFPOrFPVectorTy())
470  return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
471  return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
472  }
473  /// Create a binary op that checks for overflow.
474  /// Currently only supports +, - and *.
475  Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
476 
477  // Check for undefined division and modulus behaviors.
478  void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
479  llvm::Value *Zero,bool isDiv);
480  // Common helper for getting how wide LHS of shift is.
481  static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS);
482  Value *EmitDiv(const BinOpInfo &Ops);
483  Value *EmitRem(const BinOpInfo &Ops);
484  Value *EmitAdd(const BinOpInfo &Ops);
485  Value *EmitSub(const BinOpInfo &Ops);
486  Value *EmitShl(const BinOpInfo &Ops);
487  Value *EmitShr(const BinOpInfo &Ops);
488  Value *EmitAnd(const BinOpInfo &Ops) {
489  return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
490  }
491  Value *EmitXor(const BinOpInfo &Ops) {
492  return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
493  }
494  Value *EmitOr (const BinOpInfo &Ops) {
495  return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
496  }
497 
498  BinOpInfo EmitBinOps(const BinaryOperator *E);
499  LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
500  Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
501  Value *&Result);
502 
503  Value *EmitCompoundAssign(const CompoundAssignOperator *E,
504  Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
505 
506  // Binary operators and binary compound assignment operators.
507 #define HANDLEBINOP(OP) \
508  Value *VisitBin ## OP(const BinaryOperator *E) { \
509  return Emit ## OP(EmitBinOps(E)); \
510  } \
511  Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \
512  return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \
513  }
514  HANDLEBINOP(Mul)
515  HANDLEBINOP(Div)
516  HANDLEBINOP(Rem)
517  HANDLEBINOP(Add)
518  HANDLEBINOP(Sub)
519  HANDLEBINOP(Shl)
520  HANDLEBINOP(Shr)
522  HANDLEBINOP(Xor)
523  HANDLEBINOP(Or)
524 #undef HANDLEBINOP
525 
526  // Comparisons.
527  Value *EmitCompare(const BinaryOperator *E, unsigned UICmpOpc,
528  unsigned SICmpOpc, unsigned FCmpOpc);
529 #define VISITCOMP(CODE, UI, SI, FP) \
530  Value *VisitBin##CODE(const BinaryOperator *E) { \
531  return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
532  llvm::FCmpInst::FP); }
533  VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT)
534  VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT)
535  VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE)
536  VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE)
537  VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ)
538  VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE)
539 #undef VISITCOMP
540 
541  Value *VisitBinAssign (const BinaryOperator *E);
542 
543  Value *VisitBinLAnd (const BinaryOperator *E);
544  Value *VisitBinLOr (const BinaryOperator *E);
545  Value *VisitBinComma (const BinaryOperator *E);
546 
547  Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
548  Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
549 
550  // Other Operators.
551  Value *VisitBlockExpr(const BlockExpr *BE);
552  Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
553  Value *VisitChooseExpr(ChooseExpr *CE);
554  Value *VisitVAArgExpr(VAArgExpr *VE);
555  Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
556  return CGF.EmitObjCStringLiteral(E);
557  }
558  Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
559  return CGF.EmitObjCBoxedExpr(E);
560  }
561  Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
562  return CGF.EmitObjCArrayLiteral(E);
563  }
564  Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
565  return CGF.EmitObjCDictionaryLiteral(E);
566  }
567  Value *VisitAsTypeExpr(AsTypeExpr *CE);
568  Value *VisitAtomicExpr(AtomicExpr *AE);
569 };
570 } // end anonymous namespace.
571 
572 //===----------------------------------------------------------------------===//
573 // Utilities
574 //===----------------------------------------------------------------------===//
575 
576 /// EmitConversionToBool - Convert the specified expression value to a
577 /// boolean (i1) truth value. This is equivalent to "Val != 0".
578 Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
579  assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
580 
581  if (SrcType->isRealFloatingType())
582  return EmitFloatToBoolConversion(Src);
583 
584  if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
585  return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
586 
587  assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
588  "Unknown scalar type to convert");
589 
590  if (isa<llvm::IntegerType>(Src->getType()))
591  return EmitIntToBoolConversion(Src);
592 
593  assert(isa<llvm::PointerType>(Src->getType()));
594  return EmitPointerToBoolConversion(Src);
595 }
596 
597 void ScalarExprEmitter::EmitFloatConversionCheck(Value *OrigSrc,
598  QualType OrigSrcType,
599  Value *Src, QualType SrcType,
600  QualType DstType,
601  llvm::Type *DstTy) {
602  CodeGenFunction::SanitizerScope SanScope(&CGF);
603  using llvm::APFloat;
604  using llvm::APSInt;
605 
606  llvm::Type *SrcTy = Src->getType();
607 
608  llvm::Value *Check = nullptr;
609  if (llvm::IntegerType *IntTy = dyn_cast<llvm::IntegerType>(SrcTy)) {
610  // Integer to floating-point. This can fail for unsigned short -> __half
611  // or unsigned __int128 -> float.
612  assert(DstType->isFloatingType());
613  bool SrcIsUnsigned = OrigSrcType->isUnsignedIntegerOrEnumerationType();
614 
615  APFloat LargestFloat =
616  APFloat::getLargest(CGF.getContext().getFloatTypeSemantics(DstType));
617  APSInt LargestInt(IntTy->getBitWidth(), SrcIsUnsigned);
618 
619  bool IsExact;
620  if (LargestFloat.convertToInteger(LargestInt, APFloat::rmTowardZero,
621  &IsExact) != APFloat::opOK)
622  // The range of representable values of this floating point type includes
623  // all values of this integer type. Don't need an overflow check.
624  return;
625 
626  llvm::Value *Max = llvm::ConstantInt::get(VMContext, LargestInt);
627  if (SrcIsUnsigned)
628  Check = Builder.CreateICmpULE(Src, Max);
629  else {
630  llvm::Value *Min = llvm::ConstantInt::get(VMContext, -LargestInt);
631  llvm::Value *GE = Builder.CreateICmpSGE(Src, Min);
632  llvm::Value *LE = Builder.CreateICmpSLE(Src, Max);
633  Check = Builder.CreateAnd(GE, LE);
634  }
635  } else {
636  const llvm::fltSemantics &SrcSema =
637  CGF.getContext().getFloatTypeSemantics(OrigSrcType);
638  if (isa<llvm::IntegerType>(DstTy)) {
639  // Floating-point to integer. This has undefined behavior if the source is
640  // +-Inf, NaN, or doesn't fit into the destination type (after truncation
641  // to an integer).
642  unsigned Width = CGF.getContext().getIntWidth(DstType);
643  bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
644 
645  APSInt Min = APSInt::getMinValue(Width, Unsigned);
646  APFloat MinSrc(SrcSema, APFloat::uninitialized);
647  if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
648  APFloat::opOverflow)
649  // Don't need an overflow check for lower bound. Just check for
650  // -Inf/NaN.
651  MinSrc = APFloat::getInf(SrcSema, true);
652  else
653  // Find the largest value which is too small to represent (before
654  // truncation toward zero).
655  MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
656 
657  APSInt Max = APSInt::getMaxValue(Width, Unsigned);
658  APFloat MaxSrc(SrcSema, APFloat::uninitialized);
659  if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
660  APFloat::opOverflow)
661  // Don't need an overflow check for upper bound. Just check for
662  // +Inf/NaN.
663  MaxSrc = APFloat::getInf(SrcSema, false);
664  else
665  // Find the smallest value which is too large to represent (before
666  // truncation toward zero).
667  MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
668 
669  // If we're converting from __half, convert the range to float to match
670  // the type of src.
671  if (OrigSrcType->isHalfType()) {
672  const llvm::fltSemantics &Sema =
673  CGF.getContext().getFloatTypeSemantics(SrcType);
674  bool IsInexact;
675  MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
676  MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
677  }
678 
679  llvm::Value *GE =
680  Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
681  llvm::Value *LE =
682  Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
683  Check = Builder.CreateAnd(GE, LE);
684  } else {
685  // FIXME: Maybe split this sanitizer out from float-cast-overflow.
686  //
687  // Floating-point to floating-point. This has undefined behavior if the
688  // source is not in the range of representable values of the destination
689  // type. The C and C++ standards are spectacularly unclear here. We
690  // diagnose finite out-of-range conversions, but allow infinities and NaNs
691  // to convert to the corresponding value in the smaller type.
692  //
693  // C11 Annex F gives all such conversions defined behavior for IEC 60559
694  // conforming implementations. Unfortunately, LLVM's fptrunc instruction
695  // does not.
696 
697  // Converting from a lower rank to a higher rank can never have
698  // undefined behavior, since higher-rank types must have a superset
699  // of values of lower-rank types.
700  if (CGF.getContext().getFloatingTypeOrder(OrigSrcType, DstType) != 1)
701  return;
702 
703  assert(!OrigSrcType->isHalfType() &&
704  "should not check conversion from __half, it has the lowest rank");
705 
706  const llvm::fltSemantics &DstSema =
707  CGF.getContext().getFloatTypeSemantics(DstType);
708  APFloat MinBad = APFloat::getLargest(DstSema, false);
709  APFloat MaxBad = APFloat::getInf(DstSema, false);
710 
711  bool IsInexact;
712  MinBad.convert(SrcSema, APFloat::rmTowardZero, &IsInexact);
713  MaxBad.convert(SrcSema, APFloat::rmTowardZero, &IsInexact);
714 
715  Value *AbsSrc = CGF.EmitNounwindRuntimeCall(
716  CGF.CGM.getIntrinsic(llvm::Intrinsic::fabs, Src->getType()), Src);
717  llvm::Value *GE =
718  Builder.CreateFCmpOGT(AbsSrc, llvm::ConstantFP::get(VMContext, MinBad));
719  llvm::Value *LE =
720  Builder.CreateFCmpOLT(AbsSrc, llvm::ConstantFP::get(VMContext, MaxBad));
721  Check = Builder.CreateNot(Builder.CreateAnd(GE, LE));
722  }
723  }
724 
725  // FIXME: Provide a SourceLocation.
726  llvm::Constant *StaticArgs[] = {
727  CGF.EmitCheckTypeDescriptor(OrigSrcType),
728  CGF.EmitCheckTypeDescriptor(DstType)
729  };
730  CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
731  "float_cast_overflow", StaticArgs, OrigSrc);
732 }
733 
734 /// EmitScalarConversion - Emit a conversion from the specified type to the
735 /// specified destination type, both of which are LLVM scalar types.
736 Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
737  QualType DstType) {
738  SrcType = CGF.getContext().getCanonicalType(SrcType);
739  DstType = CGF.getContext().getCanonicalType(DstType);
740  if (SrcType == DstType) return Src;
741 
742  if (DstType->isVoidType()) return nullptr;
743 
744  llvm::Value *OrigSrc = Src;
745  QualType OrigSrcType = SrcType;
746  llvm::Type *SrcTy = Src->getType();
747 
748  // Handle conversions to bool first, they are special: comparisons against 0.
749  if (DstType->isBooleanType())
750  return EmitConversionToBool(Src, SrcType);
751 
752  llvm::Type *DstTy = ConvertType(DstType);
753 
754  // Cast from half through float if half isn't a native type.
755  if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
756  // Cast to FP using the intrinsic if the half type itself isn't supported.
757  if (DstTy->isFloatingPointTy()) {
758  if (!CGF.getContext().getLangOpts().HalfArgsAndReturns)
759  return Builder.CreateCall(
760  CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
761  Src);
762  } else {
763  // Cast to other types through float, using either the intrinsic or FPExt,
764  // depending on whether the half type itself is supported
765  // (as opposed to operations on half, available with NativeHalfType).
766  if (!CGF.getContext().getLangOpts().HalfArgsAndReturns) {
767  Src = Builder.CreateCall(
768  CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
769  CGF.CGM.FloatTy),
770  Src);
771  } else {
772  Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
773  }
774  SrcType = CGF.getContext().FloatTy;
775  SrcTy = CGF.FloatTy;
776  }
777  }
778 
779  // Ignore conversions like int -> uint.
780  if (SrcTy == DstTy)
781  return Src;
782 
783  // Handle pointer conversions next: pointers can only be converted to/from
784  // other pointers and integers. Check for pointer types in terms of LLVM, as
785  // some native types (like Obj-C id) may map to a pointer type.
786  if (isa<llvm::PointerType>(DstTy)) {
787  // The source value may be an integer, or a pointer.
788  if (isa<llvm::PointerType>(SrcTy))
789  return Builder.CreateBitCast(Src, DstTy, "conv");
790 
791  assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
792  // First, convert to the correct width so that we control the kind of
793  // extension.
794  llvm::Type *MiddleTy = CGF.IntPtrTy;
795  bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
796  llvm::Value* IntResult =
797  Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
798  // Then, cast to pointer.
799  return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
800  }
801 
802  if (isa<llvm::PointerType>(SrcTy)) {
803  // Must be an ptr to int cast.
804  assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
805  return Builder.CreatePtrToInt(Src, DstTy, "conv");
806  }
807 
808  // A scalar can be splatted to an extended vector of the same element type
809  if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
810  // Cast the scalar to element type
811  QualType EltTy = DstType->getAs<ExtVectorType>()->getElementType();
812  llvm::Value *Elt = EmitScalarConversion(Src, SrcType, EltTy);
813 
814  // Splat the element across to all elements
815  unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
816  return Builder.CreateVectorSplat(NumElements, Elt, "splat");
817  }
818 
819  // Allow bitcast from vector to integer/fp of the same size.
820  if (isa<llvm::VectorType>(SrcTy) ||
821  isa<llvm::VectorType>(DstTy))
822  return Builder.CreateBitCast(Src, DstTy, "conv");
823 
824  // Finally, we have the arithmetic types: real int/float.
825  Value *Res = nullptr;
826  llvm::Type *ResTy = DstTy;
827 
828  // An overflowing conversion has undefined behavior if either the source type
829  // or the destination type is a floating-point type.
830  if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
831  (OrigSrcType->isFloatingType() || DstType->isFloatingType()))
832  EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType,
833  DstTy);
834 
835  // Cast to half through float if half isn't a native type.
836  if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
837  // Make sure we cast in a single step if from another FP type.
838  if (SrcTy->isFloatingPointTy()) {
839  // Use the intrinsic if the half type itself isn't supported
840  // (as opposed to operations on half, available with NativeHalfType).
841  if (!CGF.getContext().getLangOpts().HalfArgsAndReturns)
842  return Builder.CreateCall(
843  CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
844  // If the half type is supported, just use an fptrunc.
845  return Builder.CreateFPTrunc(Src, DstTy);
846  }
847  DstTy = CGF.FloatTy;
848  }
849 
850  if (isa<llvm::IntegerType>(SrcTy)) {
851  bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
852  if (isa<llvm::IntegerType>(DstTy))
853  Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
854  else if (InputSigned)
855  Res = Builder.CreateSIToFP(Src, DstTy, "conv");
856  else
857  Res = Builder.CreateUIToFP(Src, DstTy, "conv");
858  } else if (isa<llvm::IntegerType>(DstTy)) {
859  assert(SrcTy->isFloatingPointTy() && "Unknown real conversion");
860  if (DstType->isSignedIntegerOrEnumerationType())
861  Res = Builder.CreateFPToSI(Src, DstTy, "conv");
862  else
863  Res = Builder.CreateFPToUI(Src, DstTy, "conv");
864  } else {
865  assert(SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy() &&
866  "Unknown real conversion");
867  if (DstTy->getTypeID() < SrcTy->getTypeID())
868  Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
869  else
870  Res = Builder.CreateFPExt(Src, DstTy, "conv");
871  }
872 
873  if (DstTy != ResTy) {
874  if (!CGF.getContext().getLangOpts().HalfArgsAndReturns) {
875  assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
876  Res = Builder.CreateCall(
877  CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
878  Res);
879  } else {
880  Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
881  }
882  }
883 
884  return Res;
885 }
886 
887 /// EmitComplexToScalarConversion - Emit a conversion from the specified complex
888 /// type to the specified destination type, where the destination type is an
889 /// LLVM scalar type.
890 Value *ScalarExprEmitter::
891 EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
892  QualType SrcTy, QualType DstTy) {
893  // Get the source element type.
894  SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
895 
896  // Handle conversions to bool first, they are special: comparisons against 0.
897  if (DstTy->isBooleanType()) {
898  // Complex != 0 -> (Real != 0) | (Imag != 0)
899  Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy);
900  Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy);
901  return Builder.CreateOr(Src.first, Src.second, "tobool");
902  }
903 
904  // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
905  // the imaginary part of the complex value is discarded and the value of the
906  // real part is converted according to the conversion rules for the
907  // corresponding real type.
908  return EmitScalarConversion(Src.first, SrcTy, DstTy);
909 }
910 
911 Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
912  return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
913 }
914 
915 /// \brief Emit a sanitization check for the given "binary" operation (which
916 /// might actually be a unary increment which has been lowered to a binary
917 /// operation). The check passes if all values in \p Checks (which are \c i1),
918 /// are \c true.
919 void ScalarExprEmitter::EmitBinOpCheck(
920  ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
921  assert(CGF.IsSanitizerScope);
922  StringRef CheckName;
924  SmallVector<llvm::Value *, 2> DynamicData;
925 
926  BinaryOperatorKind Opcode = Info.Opcode;
929 
930  StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
931  const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
932  if (UO && UO->getOpcode() == UO_Minus) {
933  CheckName = "negate_overflow";
934  StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
935  DynamicData.push_back(Info.RHS);
936  } else {
937  if (BinaryOperator::isShiftOp(Opcode)) {
938  // Shift LHS negative or too large, or RHS out of bounds.
939  CheckName = "shift_out_of_bounds";
940  const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
941  StaticData.push_back(
942  CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
943  StaticData.push_back(
944  CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
945  } else if (Opcode == BO_Div || Opcode == BO_Rem) {
946  // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
947  CheckName = "divrem_overflow";
948  StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
949  } else {
950  // Arithmetic overflow (+, -, *).
951  switch (Opcode) {
952  case BO_Add: CheckName = "add_overflow"; break;
953  case BO_Sub: CheckName = "sub_overflow"; break;
954  case BO_Mul: CheckName = "mul_overflow"; break;
955  default: llvm_unreachable("unexpected opcode for bin op check");
956  }
957  StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
958  }
959  DynamicData.push_back(Info.LHS);
960  DynamicData.push_back(Info.RHS);
961  }
962 
963  CGF.EmitCheck(Checks, CheckName, StaticData, DynamicData);
964 }
965 
966 //===----------------------------------------------------------------------===//
967 // Visitor Methods
968 //===----------------------------------------------------------------------===//
969 
970 Value *ScalarExprEmitter::VisitExpr(Expr *E) {
971  CGF.ErrorUnsupported(E, "scalar expression");
972  if (E->getType()->isVoidType())
973  return nullptr;
974  return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
975 }
976 
977 Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
978  // Vector Mask Case
979  if (E->getNumSubExprs() == 2 ||
980  (E->getNumSubExprs() == 3 && E->getExpr(2)->getType()->isVectorType())) {
981  Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
982  Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
983  Value *Mask;
984 
985  llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType());
986  unsigned LHSElts = LTy->getNumElements();
987 
988  if (E->getNumSubExprs() == 3) {
989  Mask = CGF.EmitScalarExpr(E->getExpr(2));
990 
991  // Shuffle LHS & RHS into one input vector.
993  for (unsigned i = 0; i != LHSElts; ++i) {
994  concat.push_back(Builder.getInt32(2*i));
995  concat.push_back(Builder.getInt32(2*i+1));
996  }
997 
998  Value* CV = llvm::ConstantVector::get(concat);
999  LHS = Builder.CreateShuffleVector(LHS, RHS, CV, "concat");
1000  LHSElts *= 2;
1001  } else {
1002  Mask = RHS;
1003  }
1004 
1005  llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType());
1006  llvm::Constant* EltMask;
1007 
1008  EltMask = llvm::ConstantInt::get(MTy->getElementType(),
1009  llvm::NextPowerOf2(LHSElts-1)-1);
1010 
1011  // Mask off the high bits of each shuffle index.
1012  Value *MaskBits = llvm::ConstantVector::getSplat(MTy->getNumElements(),
1013  EltMask);
1014  Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1015 
1016  // newv = undef
1017  // mask = mask & maskbits
1018  // for each elt
1019  // n = extract mask i
1020  // x = extract val n
1021  // newv = insert newv, x, i
1022  llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(),
1023  MTy->getNumElements());
1024  Value* NewV = llvm::UndefValue::get(RTy);
1025  for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1026  Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1027  Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1028 
1029  Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1030  NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1031  }
1032  return NewV;
1033  }
1034 
1035  Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1036  Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1037 
1039  for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1040  llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
1041  // Check for -1 and output it as undef in the IR.
1042  if (Idx.isSigned() && Idx.isAllOnesValue())
1043  indices.push_back(llvm::UndefValue::get(CGF.Int32Ty));
1044  else
1045  indices.push_back(Builder.getInt32(Idx.getZExtValue()));
1046  }
1047 
1048  Value *SV = llvm::ConstantVector::get(indices);
1049  return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
1050 }
1051 
1052 Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1053  QualType SrcType = E->getSrcExpr()->getType(),
1054  DstType = E->getType();
1055 
1056  Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
1057 
1058  SrcType = CGF.getContext().getCanonicalType(SrcType);
1059  DstType = CGF.getContext().getCanonicalType(DstType);
1060  if (SrcType == DstType) return Src;
1061 
1062  assert(SrcType->isVectorType() &&
1063  "ConvertVector source type must be a vector");
1064  assert(DstType->isVectorType() &&
1065  "ConvertVector destination type must be a vector");
1066 
1067  llvm::Type *SrcTy = Src->getType();
1068  llvm::Type *DstTy = ConvertType(DstType);
1069 
1070  // Ignore conversions like int -> uint.
1071  if (SrcTy == DstTy)
1072  return Src;
1073 
1074  QualType SrcEltType = SrcType->getAs<VectorType>()->getElementType(),
1075  DstEltType = DstType->getAs<VectorType>()->getElementType();
1076 
1077  assert(SrcTy->isVectorTy() &&
1078  "ConvertVector source IR type must be a vector");
1079  assert(DstTy->isVectorTy() &&
1080  "ConvertVector destination IR type must be a vector");
1081 
1082  llvm::Type *SrcEltTy = SrcTy->getVectorElementType(),
1083  *DstEltTy = DstTy->getVectorElementType();
1084 
1085  if (DstEltType->isBooleanType()) {
1086  assert((SrcEltTy->isFloatingPointTy() ||
1087  isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
1088 
1089  llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
1090  if (SrcEltTy->isFloatingPointTy()) {
1091  return Builder.CreateFCmpUNE(Src, Zero, "tobool");
1092  } else {
1093  return Builder.CreateICmpNE(Src, Zero, "tobool");
1094  }
1095  }
1096 
1097  // We have the arithmetic types: real int/float.
1098  Value *Res = nullptr;
1099 
1100  if (isa<llvm::IntegerType>(SrcEltTy)) {
1101  bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1102  if (isa<llvm::IntegerType>(DstEltTy))
1103  Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1104  else if (InputSigned)
1105  Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1106  else
1107  Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1108  } else if (isa<llvm::IntegerType>(DstEltTy)) {
1109  assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
1110  if (DstEltType->isSignedIntegerOrEnumerationType())
1111  Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1112  else
1113  Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1114  } else {
1115  assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
1116  "Unknown real conversion");
1117  if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
1118  Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1119  else
1120  Res = Builder.CreateFPExt(Src, DstTy, "conv");
1121  }
1122 
1123  return Res;
1124 }
1125 
1126 Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
1127  llvm::APSInt Value;
1128  if (E->EvaluateAsInt(Value, CGF.getContext(), Expr::SE_AllowSideEffects)) {
1129  if (E->isArrow())
1130  CGF.EmitScalarExpr(E->getBase());
1131  else
1132  EmitLValue(E->getBase());
1133  return Builder.getInt(Value);
1134  }
1135 
1136  return EmitLoadOfLValue(E);
1137 }
1138 
1139 Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
1140  TestAndClearIgnoreResultAssign();
1141 
1142  // Emit subscript expressions in rvalue context's. For most cases, this just
1143  // loads the lvalue formed by the subscript expr. However, we have to be
1144  // careful, because the base of a vector subscript is occasionally an rvalue,
1145  // so we can't get it as an lvalue.
1146  if (!E->getBase()->getType()->isVectorType())
1147  return EmitLoadOfLValue(E);
1148 
1149  // Handle the vector case. The base must be a vector, the index must be an
1150  // integer value.
1151  Value *Base = Visit(E->getBase());
1152  Value *Idx = Visit(E->getIdx());
1153  QualType IdxTy = E->getIdx()->getType();
1154 
1155  if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
1156  CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
1157 
1158  return Builder.CreateExtractElement(Base, Idx, "vecext");
1159 }
1160 
1161 static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
1162  unsigned Off, llvm::Type *I32Ty) {
1163  int MV = SVI->getMaskValue(Idx);
1164  if (MV == -1)
1165  return llvm::UndefValue::get(I32Ty);
1166  return llvm::ConstantInt::get(I32Ty, Off+MV);
1167 }
1168 
1169 static llvm::Constant *getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
1170  if (C->getBitWidth() != 32) {
1171  assert(llvm::ConstantInt::isValueValidForType(I32Ty,
1172  C->getZExtValue()) &&
1173  "Index operand too large for shufflevector mask!");
1174  return llvm::ConstantInt::get(I32Ty, C->getZExtValue());
1175  }
1176  return C;
1177 }
1178 
1179 Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
1180  bool Ignore = TestAndClearIgnoreResultAssign();
1181  (void)Ignore;
1182  assert (Ignore == false && "init list ignored");
1183  unsigned NumInitElements = E->getNumInits();
1184 
1185  if (E->hadArrayRangeDesignator())
1186  CGF.ErrorUnsupported(E, "GNU array range designator extension");
1187 
1188  llvm::VectorType *VType =
1189  dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
1190 
1191  if (!VType) {
1192  if (NumInitElements == 0) {
1193  // C++11 value-initialization for the scalar.
1194  return EmitNullValue(E->getType());
1195  }
1196  // We have a scalar in braces. Just use the first element.
1197  return Visit(E->getInit(0));
1198  }
1199 
1200  unsigned ResElts = VType->getNumElements();
1201 
1202  // Loop over initializers collecting the Value for each, and remembering
1203  // whether the source was swizzle (ExtVectorElementExpr). This will allow
1204  // us to fold the shuffle for the swizzle into the shuffle for the vector
1205  // initializer, since LLVM optimizers generally do not want to touch
1206  // shuffles.
1207  unsigned CurIdx = 0;
1208  bool VIsUndefShuffle = false;
1209  llvm::Value *V = llvm::UndefValue::get(VType);
1210  for (unsigned i = 0; i != NumInitElements; ++i) {
1211  Expr *IE = E->getInit(i);
1212  Value *Init = Visit(IE);
1214 
1215  llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
1216 
1217  // Handle scalar elements. If the scalar initializer is actually one
1218  // element of a different vector of the same width, use shuffle instead of
1219  // extract+insert.
1220  if (!VVT) {
1221  if (isa<ExtVectorElementExpr>(IE)) {
1222  llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
1223 
1224  if (EI->getVectorOperandType()->getNumElements() == ResElts) {
1225  llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
1226  Value *LHS = nullptr, *RHS = nullptr;
1227  if (CurIdx == 0) {
1228  // insert into undef -> shuffle (src, undef)
1229  // shufflemask must use an i32
1230  Args.push_back(getAsInt32(C, CGF.Int32Ty));
1231  Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1232 
1233  LHS = EI->getVectorOperand();
1234  RHS = V;
1235  VIsUndefShuffle = true;
1236  } else if (VIsUndefShuffle) {
1237  // insert into undefshuffle && size match -> shuffle (v, src)
1238  llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
1239  for (unsigned j = 0; j != CurIdx; ++j)
1240  Args.push_back(getMaskElt(SVV, j, 0, CGF.Int32Ty));
1241  Args.push_back(Builder.getInt32(ResElts + C->getZExtValue()));
1242  Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1243 
1244  LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1245  RHS = EI->getVectorOperand();
1246  VIsUndefShuffle = false;
1247  }
1248  if (!Args.empty()) {
1249  llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1250  V = Builder.CreateShuffleVector(LHS, RHS, Mask);
1251  ++CurIdx;
1252  continue;
1253  }
1254  }
1255  }
1256  V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
1257  "vecinit");
1258  VIsUndefShuffle = false;
1259  ++CurIdx;
1260  continue;
1261  }
1262 
1263  unsigned InitElts = VVT->getNumElements();
1264 
1265  // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
1266  // input is the same width as the vector being constructed, generate an
1267  // optimized shuffle of the swizzle input into the result.
1268  unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
1269  if (isa<ExtVectorElementExpr>(IE)) {
1270  llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
1271  Value *SVOp = SVI->getOperand(0);
1272  llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType());
1273 
1274  if (OpTy->getNumElements() == ResElts) {
1275  for (unsigned j = 0; j != CurIdx; ++j) {
1276  // If the current vector initializer is a shuffle with undef, merge
1277  // this shuffle directly into it.
1278  if (VIsUndefShuffle) {
1279  Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0,
1280  CGF.Int32Ty));
1281  } else {
1282  Args.push_back(Builder.getInt32(j));
1283  }
1284  }
1285  for (unsigned j = 0, je = InitElts; j != je; ++j)
1286  Args.push_back(getMaskElt(SVI, j, Offset, CGF.Int32Ty));
1287  Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1288 
1289  if (VIsUndefShuffle)
1290  V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1291 
1292  Init = SVOp;
1293  }
1294  }
1295 
1296  // Extend init to result vector length, and then shuffle its contribution
1297  // to the vector initializer into V.
1298  if (Args.empty()) {
1299  for (unsigned j = 0; j != InitElts; ++j)
1300  Args.push_back(Builder.getInt32(j));
1301  Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1302  llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1303  Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT),
1304  Mask, "vext");
1305 
1306  Args.clear();
1307  for (unsigned j = 0; j != CurIdx; ++j)
1308  Args.push_back(Builder.getInt32(j));
1309  for (unsigned j = 0; j != InitElts; ++j)
1310  Args.push_back(Builder.getInt32(j+Offset));
1311  Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1312  }
1313 
1314  // If V is undef, make sure it ends up on the RHS of the shuffle to aid
1315  // merging subsequent shuffles into this one.
1316  if (CurIdx == 0)
1317  std::swap(V, Init);
1318  llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1319  V = Builder.CreateShuffleVector(V, Init, Mask, "vecinit");
1320  VIsUndefShuffle = isa<llvm::UndefValue>(Init);
1321  CurIdx += InitElts;
1322  }
1323 
1324  // FIXME: evaluate codegen vs. shuffling against constant null vector.
1325  // Emit remaining default initializers.
1326  llvm::Type *EltTy = VType->getElementType();
1327 
1328  // Emit remaining default initializers
1329  for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
1330  Value *Idx = Builder.getInt32(CurIdx);
1331  llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
1332  V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
1333  }
1334  return V;
1335 }
1336 
1337 static bool ShouldNullCheckClassCastValue(const CastExpr *CE) {
1338  const Expr *E = CE->getSubExpr();
1339 
1341  return false;
1342 
1343  if (isa<CXXThisExpr>(E)) {
1344  // We always assume that 'this' is never null.
1345  return false;
1346  }
1347 
1348  if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
1349  // And that glvalue casts are never null.
1350  if (ICE->getValueKind() != VK_RValue)
1351  return false;
1352  }
1353 
1354  return true;
1355 }
1356 
1357 // VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
1358 // have to handle a more broad range of conversions than explicit casts, as they
1359 // handle things like function to ptr-to-function decay etc.
1360 Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
1361  Expr *E = CE->getSubExpr();
1362  QualType DestTy = CE->getType();
1363  CastKind Kind = CE->getCastKind();
1364 
1365  if (!DestTy->isVoidType())
1366  TestAndClearIgnoreResultAssign();
1367 
1368  // Since almost all cast kinds apply to scalars, this switch doesn't have
1369  // a default case, so the compiler will warn on a missing case. The cases
1370  // are in the same order as in the CastKind enum.
1371  switch (Kind) {
1372  case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
1373  case CK_BuiltinFnToFnPtr:
1374  llvm_unreachable("builtin functions are handled elsewhere");
1375 
1376  case CK_LValueBitCast:
1377  case CK_ObjCObjectLValueCast: {
1378  Value *V = EmitLValue(E).getAddress();
1379  V = Builder.CreateBitCast(V,
1380  ConvertType(CGF.getContext().getPointerType(DestTy)));
1381  return EmitLoadOfLValue(CGF.MakeNaturalAlignAddrLValue(V, DestTy),
1382  CE->getExprLoc());
1383  }
1384 
1388  case CK_BitCast: {
1389  Value *Src = Visit(const_cast<Expr*>(E));
1390  llvm::Type *SrcTy = Src->getType();
1391  llvm::Type *DstTy = ConvertType(DestTy);
1392  if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
1393  SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
1394  llvm_unreachable("wrong cast for pointers in different address spaces"
1395  "(must be an address space cast)!");
1396  }
1397 
1398  if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
1399  if (auto PT = DestTy->getAs<PointerType>())
1400  CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Src,
1401  /*MayBeNull=*/true,
1403  CE->getLocStart());
1404  }
1405 
1406  return Builder.CreateBitCast(Src, DstTy);
1407  }
1409  Value *Src = Visit(const_cast<Expr*>(E));
1410  return Builder.CreateAddrSpaceCast(Src, ConvertType(DestTy));
1411  }
1412  case CK_AtomicToNonAtomic:
1413  case CK_NonAtomicToAtomic:
1414  case CK_NoOp:
1416  return Visit(const_cast<Expr*>(E));
1417 
1418  case CK_BaseToDerived: {
1419  const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
1420  assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
1421 
1422  llvm::Value *V = Visit(E);
1423 
1424  llvm::Value *Derived =
1425  CGF.GetAddressOfDerivedClass(V, DerivedClassDecl,
1426  CE->path_begin(), CE->path_end(),
1428 
1429  // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
1430  // performed and the object is not of the derived type.
1431  if (CGF.sanitizePerformTypeCheck())
1432  CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(),
1433  Derived, DestTy->getPointeeType());
1434 
1435  if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
1436  CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived,
1437  /*MayBeNull=*/true,
1439  CE->getLocStart());
1440 
1441  return Derived;
1442  }
1444  case CK_DerivedToBase: {
1445  const CXXRecordDecl *DerivedClassDecl =
1447  assert(DerivedClassDecl && "DerivedToBase arg isn't a C++ object pointer!");
1448 
1449  return CGF.GetAddressOfBaseClass(
1450  Visit(E), DerivedClassDecl, CE->path_begin(), CE->path_end(),
1452  }
1453  case CK_Dynamic: {
1454  Value *V = Visit(const_cast<Expr*>(E));
1455  const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
1456  return CGF.EmitDynamicCast(V, DCE);
1457  }
1458 
1459  case CK_ArrayToPointerDecay: {
1460  assert(E->getType()->isArrayType() &&
1461  "Array to pointer decay must have array source type!");
1462 
1463  Value *V = EmitLValue(E).getAddress(); // Bitfields can't be arrays.
1464 
1465  // Note that VLA pointers are always decayed, so we don't need to do
1466  // anything here.
1467  if (!E->getType()->isVariableArrayType()) {
1468  assert(isa<llvm::PointerType>(V->getType()) && "Expected pointer");
1469  llvm::Type *NewTy = ConvertType(E->getType());
1470  V = CGF.Builder.CreatePointerCast(
1471  V, NewTy->getPointerTo(V->getType()->getPointerAddressSpace()));
1472 
1473  assert(isa<llvm::ArrayType>(V->getType()->getPointerElementType()) &&
1474  "Expected pointer to array");
1475  V = Builder.CreateStructGEP(NewTy, V, 0, "arraydecay");
1476  }
1477 
1478  // Make sure the array decay ends up being the right type. This matters if
1479  // the array type was of an incomplete type.
1480  return CGF.Builder.CreatePointerCast(V, ConvertType(CE->getType()));
1481  }
1483  return EmitLValue(E).getAddress();
1484 
1485  case CK_NullToPointer:
1486  if (MustVisitNullValue(E))
1487  (void) Visit(E);
1488 
1489  return llvm::ConstantPointerNull::get(
1490  cast<llvm::PointerType>(ConvertType(DestTy)));
1491 
1492  case CK_NullToMemberPointer: {
1493  if (MustVisitNullValue(E))
1494  (void) Visit(E);
1495 
1496  const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
1497  return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
1498  }
1499 
1503  Value *Src = Visit(E);
1504 
1505  // Note that the AST doesn't distinguish between checked and
1506  // unchecked member pointer conversions, so we always have to
1507  // implement checked conversions here. This is inefficient when
1508  // actual control flow may be required in order to perform the
1509  // check, which it is for data member pointers (but not member
1510  // function pointers on Itanium and ARM).
1511  return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
1512  }
1513 
1514  case CK_ARCProduceObject:
1515  return CGF.EmitARCRetainScalarExpr(E);
1516  case CK_ARCConsumeObject:
1517  return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
1519  llvm::Value *value = Visit(E);
1520  value = CGF.EmitARCRetainAutoreleasedReturnValue(value);
1521  return CGF.EmitObjCConsumeObject(E->getType(), value);
1522  }
1524  return CGF.EmitARCExtendBlockObject(E);
1525 
1527  return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
1528 
1536  case CK_ToUnion:
1537  llvm_unreachable("scalar cast to non-scalar value");
1538 
1539  case CK_LValueToRValue:
1540  assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
1541  assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
1542  return Visit(const_cast<Expr*>(E));
1543 
1544  case CK_IntegralToPointer: {
1545  Value *Src = Visit(const_cast<Expr*>(E));
1546 
1547  // First, convert to the correct width so that we control the kind of
1548  // extension.
1549  llvm::Type *MiddleTy = CGF.IntPtrTy;
1550  bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
1551  llvm::Value* IntResult =
1552  Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1553 
1554  return Builder.CreateIntToPtr(IntResult, ConvertType(DestTy));
1555  }
1556  case CK_PointerToIntegral:
1557  assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
1558  return Builder.CreatePtrToInt(Visit(E), ConvertType(DestTy));
1559 
1560  case CK_ToVoid: {
1561  CGF.EmitIgnoredExpr(E);
1562  return nullptr;
1563  }
1564  case CK_VectorSplat: {
1565  llvm::Type *DstTy = ConvertType(DestTy);
1566  Value *Elt = Visit(const_cast<Expr*>(E));
1567  Elt = EmitScalarConversion(Elt, E->getType(),
1568  DestTy->getAs<VectorType>()->getElementType());
1569 
1570  // Splat the element across to all elements
1571  unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
1572  return Builder.CreateVectorSplat(NumElements, Elt, "splat");
1573  }
1574 
1575  case CK_IntegralCast:
1576  case CK_IntegralToFloating:
1577  case CK_FloatingToIntegral:
1578  case CK_FloatingCast:
1579  return EmitScalarConversion(Visit(E), E->getType(), DestTy);
1580  case CK_IntegralToBoolean:
1581  return EmitIntToBoolConversion(Visit(E));
1582  case CK_PointerToBoolean:
1583  return EmitPointerToBoolConversion(Visit(E));
1584  case CK_FloatingToBoolean:
1585  return EmitFloatToBoolConversion(Visit(E));
1587  llvm::Value *MemPtr = Visit(E);
1588  const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
1589  return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
1590  }
1591 
1594  return CGF.EmitComplexExpr(E, false, true).first;
1595 
1598  CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
1599 
1600  // TODO: kill this function off, inline appropriate case here
1601  return EmitComplexToScalarConversion(V, E->getType(), DestTy);
1602  }
1603 
1604  case CK_ZeroToOCLEvent: {
1605  assert(DestTy->isEventT() && "CK_ZeroToOCLEvent cast on non-event type");
1606  return llvm::Constant::getNullValue(ConvertType(DestTy));
1607  }
1608 
1609  }
1610 
1611  llvm_unreachable("unknown scalar cast");
1612 }
1613 
1614 Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
1616  llvm::Value *RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
1617  !E->getType()->isVoidType());
1618  if (!RetAlloca)
1619  return nullptr;
1620  return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
1621  E->getExprLoc());
1622 }
1623 
1624 //===----------------------------------------------------------------------===//
1625 // Unary Operators
1626 //===----------------------------------------------------------------------===//
1627 
1628 static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
1629  llvm::Value *InVal, bool IsInc) {
1630  BinOpInfo BinOp;
1631  BinOp.LHS = InVal;
1632  BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
1633  BinOp.Ty = E->getType();
1634  BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
1635  BinOp.FPContractable = false;
1636  BinOp.E = E;
1637  return BinOp;
1638 }
1639 
1640 llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
1641  const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
1642  llvm::Value *Amount =
1643  llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
1644  StringRef Name = IsInc ? "inc" : "dec";
1645  switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
1647  return Builder.CreateAdd(InVal, Amount, Name);
1649  if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
1650  return Builder.CreateNSWAdd(InVal, Amount, Name);
1651  // Fall through.
1653  return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, InVal, IsInc));
1654  }
1655  llvm_unreachable("Unknown SignedOverflowBehaviorTy");
1656 }
1657 
1658 llvm::Value *
1659 ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
1660  bool isInc, bool isPre) {
1661 
1662  QualType type = E->getSubExpr()->getType();
1663  llvm::PHINode *atomicPHI = nullptr;
1664  llvm::Value *value;
1665  llvm::Value *input;
1666 
1667  int amount = (isInc ? 1 : -1);
1668 
1669  if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
1670  type = atomicTy->getValueType();
1671  if (isInc && type->isBooleanType()) {
1672  llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
1673  if (isPre) {
1674  Builder.Insert(new llvm::StoreInst(True,
1675  LV.getAddress(), LV.isVolatileQualified(),
1676  LV.getAlignment().getQuantity(),
1677  llvm::SequentiallyConsistent));
1678  return Builder.getTrue();
1679  }
1680  // For atomic bool increment, we just store true and return it for
1681  // preincrement, do an atomic swap with true for postincrement
1682  return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1683  LV.getAddress(), True, llvm::SequentiallyConsistent);
1684  }
1685  // Special case for atomic increment / decrement on integers, emit
1686  // atomicrmw instructions. We skip this if we want to be doing overflow
1687  // checking, and fall into the slow path with the atomic cmpxchg loop.
1688  if (!type->isBooleanType() && type->isIntegerType() &&
1689  !(type->isUnsignedIntegerType() &&
1690  CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
1691  CGF.getLangOpts().getSignedOverflowBehavior() !=
1693  llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
1694  llvm::AtomicRMWInst::Sub;
1695  llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
1696  llvm::Instruction::Sub;
1697  llvm::Value *amt = CGF.EmitToMemory(
1698  llvm::ConstantInt::get(ConvertType(type), 1, true), type);
1699  llvm::Value *old = Builder.CreateAtomicRMW(aop,
1700  LV.getAddress(), amt, llvm::SequentiallyConsistent);
1701  return isPre ? Builder.CreateBinOp(op, old, amt) : old;
1702  }
1703  value = EmitLoadOfLValue(LV, E->getExprLoc());
1704  input = value;
1705  // For every other atomic operation, we need to emit a load-op-cmpxchg loop
1706  llvm::BasicBlock *startBB = Builder.GetInsertBlock();
1707  llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
1708  value = CGF.EmitToMemory(value, type);
1709  Builder.CreateBr(opBB);
1710  Builder.SetInsertPoint(opBB);
1711  atomicPHI = Builder.CreatePHI(value->getType(), 2);
1712  atomicPHI->addIncoming(value, startBB);
1713  value = atomicPHI;
1714  } else {
1715  value = EmitLoadOfLValue(LV, E->getExprLoc());
1716  input = value;
1717  }
1718 
1719  // Special case of integer increment that we have to check first: bool++.
1720  // Due to promotion rules, we get:
1721  // bool++ -> bool = bool + 1
1722  // -> bool = (int)bool + 1
1723  // -> bool = ((int)bool + 1 != 0)
1724  // An interesting aspect of this is that increment is always true.
1725  // Decrement does not have this property.
1726  if (isInc && type->isBooleanType()) {
1727  value = Builder.getTrue();
1728 
1729  // Most common case by far: integer increment.
1730  } else if (type->isIntegerType()) {
1731  // Note that signed integer inc/dec with width less than int can't
1732  // overflow because of promotion rules; we're just eliding a few steps here.
1733  bool CanOverflow = value->getType()->getIntegerBitWidth() >=
1734  CGF.IntTy->getIntegerBitWidth();
1735  if (CanOverflow && type->isSignedIntegerOrEnumerationType()) {
1736  value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
1737  } else if (CanOverflow && type->isUnsignedIntegerType() &&
1738  CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
1739  value =
1740  EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, value, isInc));
1741  } else {
1742  llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
1743  value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
1744  }
1745 
1746  // Next most common: pointer increment.
1747  } else if (const PointerType *ptr = type->getAs<PointerType>()) {
1748  QualType type = ptr->getPointeeType();
1749 
1750  // VLA types don't have constant size.
1751  if (const VariableArrayType *vla
1752  = CGF.getContext().getAsVariableArrayType(type)) {
1753  llvm::Value *numElts = CGF.getVLASize(vla).first;
1754  if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
1755  if (CGF.getLangOpts().isSignedOverflowDefined())
1756  value = Builder.CreateGEP(value, numElts, "vla.inc");
1757  else
1758  value = Builder.CreateInBoundsGEP(value, numElts, "vla.inc");
1759 
1760  // Arithmetic on function pointers (!) is just +-1.
1761  } else if (type->isFunctionType()) {
1762  llvm::Value *amt = Builder.getInt32(amount);
1763 
1764  value = CGF.EmitCastToVoidPtr(value);
1765  if (CGF.getLangOpts().isSignedOverflowDefined())
1766  value = Builder.CreateGEP(value, amt, "incdec.funcptr");
1767  else
1768  value = Builder.CreateInBoundsGEP(value, amt, "incdec.funcptr");
1769  value = Builder.CreateBitCast(value, input->getType());
1770 
1771  // For everything else, we can just do a simple increment.
1772  } else {
1773  llvm::Value *amt = Builder.getInt32(amount);
1774  if (CGF.getLangOpts().isSignedOverflowDefined())
1775  value = Builder.CreateGEP(value, amt, "incdec.ptr");
1776  else
1777  value = Builder.CreateInBoundsGEP(value, amt, "incdec.ptr");
1778  }
1779 
1780  // Vector increment/decrement.
1781  } else if (type->isVectorType()) {
1782  if (type->hasIntegerRepresentation()) {
1783  llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
1784 
1785  value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
1786  } else {
1787  value = Builder.CreateFAdd(
1788  value,
1789  llvm::ConstantFP::get(value->getType(), amount),
1790  isInc ? "inc" : "dec");
1791  }
1792 
1793  // Floating point.
1794  } else if (type->isRealFloatingType()) {
1795  // Add the inc/dec to the real part.
1796  llvm::Value *amt;
1797 
1798  if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1799  // Another special case: half FP increment should be done via float
1800  if (!CGF.getContext().getLangOpts().HalfArgsAndReturns) {
1801  value = Builder.CreateCall(
1802  CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1803  CGF.CGM.FloatTy),
1804  input, "incdec.conv");
1805  } else {
1806  value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
1807  }
1808  }
1809 
1810  if (value->getType()->isFloatTy())
1811  amt = llvm::ConstantFP::get(VMContext,
1812  llvm::APFloat(static_cast<float>(amount)));
1813  else if (value->getType()->isDoubleTy())
1814  amt = llvm::ConstantFP::get(VMContext,
1815  llvm::APFloat(static_cast<double>(amount)));
1816  else {
1817  // Remaining types are either Half or LongDouble. Convert from float.
1818  llvm::APFloat F(static_cast<float>(amount));
1819  bool ignored;
1820  // Don't use getFloatTypeSemantics because Half isn't
1821  // necessarily represented using the "half" LLVM type.
1822  F.convert(value->getType()->isHalfTy()
1823  ? CGF.getTarget().getHalfFormat()
1824  : CGF.getTarget().getLongDoubleFormat(),
1825  llvm::APFloat::rmTowardZero, &ignored);
1826  amt = llvm::ConstantFP::get(VMContext, F);
1827  }
1828  value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
1829 
1830  if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1831  if (!CGF.getContext().getLangOpts().HalfArgsAndReturns) {
1832  value = Builder.CreateCall(
1833  CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
1834  CGF.CGM.FloatTy),
1835  value, "incdec.conv");
1836  } else {
1837  value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
1838  }
1839  }
1840 
1841  // Objective-C pointer types.
1842  } else {
1843  const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
1844  value = CGF.EmitCastToVoidPtr(value);
1845 
1846  CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
1847  if (!isInc) size = -size;
1848  llvm::Value *sizeValue =
1849  llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
1850 
1851  if (CGF.getLangOpts().isSignedOverflowDefined())
1852  value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
1853  else
1854  value = Builder.CreateInBoundsGEP(value, sizeValue, "incdec.objptr");
1855  value = Builder.CreateBitCast(value, input->getType());
1856  }
1857 
1858  if (atomicPHI) {
1859  llvm::BasicBlock *opBB = Builder.GetInsertBlock();
1860  llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
1861  auto Pair = CGF.EmitAtomicCompareExchange(
1862  LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
1863  llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
1864  llvm::Value *success = Pair.second;
1865  atomicPHI->addIncoming(old, opBB);
1866  Builder.CreateCondBr(success, contBB, opBB);
1867  Builder.SetInsertPoint(contBB);
1868  return isPre ? value : input;
1869  }
1870 
1871  // Store the updated result through the lvalue.
1872  if (LV.isBitField())
1873  CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
1874  else
1875  CGF.EmitStoreThroughLValue(RValue::get(value), LV);
1876 
1877  // If this is a postinc, return the value read from memory, otherwise use the
1878  // updated value.
1879  return isPre ? value : input;
1880 }
1881 
1882 
1883 
1884 Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
1885  TestAndClearIgnoreResultAssign();
1886  // Emit unary minus with EmitSub so we handle overflow cases etc.
1887  BinOpInfo BinOp;
1888  BinOp.RHS = Visit(E->getSubExpr());
1889 
1890  if (BinOp.RHS->getType()->isFPOrFPVectorTy())
1891  BinOp.LHS = llvm::ConstantFP::getZeroValueForNegation(BinOp.RHS->getType());
1892  else
1893  BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
1894  BinOp.Ty = E->getType();
1895  BinOp.Opcode = BO_Sub;
1896  BinOp.FPContractable = false;
1897  BinOp.E = E;
1898  return EmitSub(BinOp);
1899 }
1900 
1901 Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
1902  TestAndClearIgnoreResultAssign();
1903  Value *Op = Visit(E->getSubExpr());
1904  return Builder.CreateNot(Op, "neg");
1905 }
1906 
1907 Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
1908  // Perform vector logical not on comparison with zero vector.
1909  if (E->getType()->isExtVectorType()) {
1910  Value *Oper = Visit(E->getSubExpr());
1911  Value *Zero = llvm::Constant::getNullValue(Oper->getType());
1912  Value *Result;
1913  if (Oper->getType()->isFPOrFPVectorTy())
1914  Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
1915  else
1916  Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
1917  return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
1918  }
1919 
1920  // Compare operand to zero.
1921  Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
1922 
1923  // Invert value.
1924  // TODO: Could dynamically modify easy computations here. For example, if
1925  // the operand is an icmp ne, turn into icmp eq.
1926  BoolVal = Builder.CreateNot(BoolVal, "lnot");
1927 
1928  // ZExt result to the expr type.
1929  return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
1930 }
1931 
1932 Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
1933  // Try folding the offsetof to a constant.
1934  llvm::APSInt Value;
1935  if (E->EvaluateAsInt(Value, CGF.getContext()))
1936  return Builder.getInt(Value);
1937 
1938  // Loop over the components of the offsetof to compute the value.
1939  unsigned n = E->getNumComponents();
1940  llvm::Type* ResultType = ConvertType(E->getType());
1941  llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
1942  QualType CurrentType = E->getTypeSourceInfo()->getType();
1943  for (unsigned i = 0; i != n; ++i) {
1945  llvm::Value *Offset = nullptr;
1946  switch (ON.getKind()) {
1948  // Compute the index
1949  Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
1950  llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
1951  bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
1952  Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
1953 
1954  // Save the element type
1955  CurrentType =
1956  CGF.getContext().getAsArrayType(CurrentType)->getElementType();
1957 
1958  // Compute the element size
1959  llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
1960  CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
1961 
1962  // Multiply out to compute the result
1963  Offset = Builder.CreateMul(Idx, ElemSize);
1964  break;
1965  }
1966 
1968  FieldDecl *MemberDecl = ON.getField();
1969  RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl();
1970  const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
1971 
1972  // Compute the index of the field in its parent.
1973  unsigned i = 0;
1974  // FIXME: It would be nice if we didn't have to loop here!
1975  for (RecordDecl::field_iterator Field = RD->field_begin(),
1976  FieldEnd = RD->field_end();
1977  Field != FieldEnd; ++Field, ++i) {
1978  if (*Field == MemberDecl)
1979  break;
1980  }
1981  assert(i < RL.getFieldCount() && "offsetof field in wrong type");
1982 
1983  // Compute the offset to the field
1984  int64_t OffsetInt = RL.getFieldOffset(i) /
1985  CGF.getContext().getCharWidth();
1986  Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
1987 
1988  // Save the element type.
1989  CurrentType = MemberDecl->getType();
1990  break;
1991  }
1992 
1994  llvm_unreachable("dependent __builtin_offsetof");
1995 
1997  if (ON.getBase()->isVirtual()) {
1998  CGF.ErrorUnsupported(E, "virtual base in offsetof");
1999  continue;
2000  }
2001 
2002  RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl();
2003  const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2004 
2005  // Save the element type.
2006  CurrentType = ON.getBase()->getType();
2007 
2008  // Compute the offset to the base.
2009  const RecordType *BaseRT = CurrentType->getAs<RecordType>();
2010  CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
2011  CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
2012  Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
2013  break;
2014  }
2015  }
2016  Result = Builder.CreateAdd(Result, Offset);
2017  }
2018  return Result;
2019 }
2020 
2021 /// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
2022 /// argument of the sizeof expression as an integer.
2023 Value *
2024 ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
2025  const UnaryExprOrTypeTraitExpr *E) {
2026  QualType TypeToSize = E->getTypeOfArgument();
2027  if (E->getKind() == UETT_SizeOf) {
2028  if (const VariableArrayType *VAT =
2029  CGF.getContext().getAsVariableArrayType(TypeToSize)) {
2030  if (E->isArgumentType()) {
2031  // sizeof(type) - make sure to emit the VLA size.
2032  CGF.EmitVariablyModifiedType(TypeToSize);
2033  } else {
2034  // C99 6.5.3.4p2: If the argument is an expression of type
2035  // VLA, it is evaluated.
2036  CGF.EmitIgnoredExpr(E->getArgumentExpr());
2037  }
2038 
2039  QualType eltType;
2040  llvm::Value *numElts;
2041  std::tie(numElts, eltType) = CGF.getVLASize(VAT);
2042 
2043  llvm::Value *size = numElts;
2044 
2045  // Scale the number of non-VLA elements by the non-VLA element size.
2046  CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
2047  if (!eltSize.isOne())
2048  size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), numElts);
2049 
2050  return size;
2051  }
2052  } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
2053  auto Alignment =
2054  CGF.getContext()
2055  .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
2057  .getQuantity();
2058  return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
2059  }
2060 
2061  // If this isn't sizeof(vla), the result must be constant; use the constant
2062  // folding logic so we don't have to duplicate it here.
2063  return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
2064 }
2065 
2066 Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
2067  Expr *Op = E->getSubExpr();
2068  if (Op->getType()->isAnyComplexType()) {
2069  // If it's an l-value, load through the appropriate subobject l-value.
2070  // Note that we have to ask E because Op might be an l-value that
2071  // this won't work for, e.g. an Obj-C property.
2072  if (E->isGLValue())
2073  return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2074  E->getExprLoc()).getScalarVal();
2075 
2076  // Otherwise, calculate and project.
2077  return CGF.EmitComplexExpr(Op, false, true).first;
2078  }
2079 
2080  return Visit(Op);
2081 }
2082 
2083 Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
2084  Expr *Op = E->getSubExpr();
2085  if (Op->getType()->isAnyComplexType()) {
2086  // If it's an l-value, load through the appropriate subobject l-value.
2087  // Note that we have to ask E because Op might be an l-value that
2088  // this won't work for, e.g. an Obj-C property.
2089  if (Op->isGLValue())
2090  return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2091  E->getExprLoc()).getScalarVal();
2092 
2093  // Otherwise, calculate and project.
2094  return CGF.EmitComplexExpr(Op, true, false).second;
2095  }
2096 
2097  // __imag on a scalar returns zero. Emit the subexpr to ensure side
2098  // effects are evaluated, but not the actual value.
2099  if (Op->isGLValue())
2100  CGF.EmitLValue(Op);
2101  else
2102  CGF.EmitScalarExpr(Op, true);
2103  return llvm::Constant::getNullValue(ConvertType(E->getType()));
2104 }
2105 
2106 //===----------------------------------------------------------------------===//
2107 // Binary Operators
2108 //===----------------------------------------------------------------------===//
2109 
2110 BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
2111  TestAndClearIgnoreResultAssign();
2112  BinOpInfo Result;
2113  Result.LHS = Visit(E->getLHS());
2114  Result.RHS = Visit(E->getRHS());
2115  Result.Ty = E->getType();
2116  Result.Opcode = E->getOpcode();
2117  Result.FPContractable = E->isFPContractable();
2118  Result.E = E;
2119  return Result;
2120 }
2121 
2122 LValue ScalarExprEmitter::EmitCompoundAssignLValue(
2123  const CompoundAssignOperator *E,
2124  Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
2125  Value *&Result) {
2126  QualType LHSTy = E->getLHS()->getType();
2127  BinOpInfo OpInfo;
2128 
2130  return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
2131 
2132  // Emit the RHS first. __block variables need to have the rhs evaluated
2133  // first, plus this should improve codegen a little.
2134  OpInfo.RHS = Visit(E->getRHS());
2135  OpInfo.Ty = E->getComputationResultType();
2136  OpInfo.Opcode = E->getOpcode();
2137  OpInfo.FPContractable = false;
2138  OpInfo.E = E;
2139  // Load/convert the LHS.
2140  LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
2141 
2142  llvm::PHINode *atomicPHI = nullptr;
2143  if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
2144  QualType type = atomicTy->getValueType();
2145  if (!type->isBooleanType() && type->isIntegerType() &&
2146  !(type->isUnsignedIntegerType() &&
2147  CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2148  CGF.getLangOpts().getSignedOverflowBehavior() !=
2150  llvm::AtomicRMWInst::BinOp aop = llvm::AtomicRMWInst::BAD_BINOP;
2151  switch (OpInfo.Opcode) {
2152  // We don't have atomicrmw operands for *, %, /, <<, >>
2153  case BO_MulAssign: case BO_DivAssign:
2154  case BO_RemAssign:
2155  case BO_ShlAssign:
2156  case BO_ShrAssign:
2157  break;
2158  case BO_AddAssign:
2159  aop = llvm::AtomicRMWInst::Add;
2160  break;
2161  case BO_SubAssign:
2162  aop = llvm::AtomicRMWInst::Sub;
2163  break;
2164  case BO_AndAssign:
2166  break;
2167  case BO_XorAssign:
2168  aop = llvm::AtomicRMWInst::Xor;
2169  break;
2170  case BO_OrAssign:
2171  aop = llvm::AtomicRMWInst::Or;
2172  break;
2173  default:
2174  llvm_unreachable("Invalid compound assignment type");
2175  }
2176  if (aop != llvm::AtomicRMWInst::BAD_BINOP) {
2177  llvm::Value *amt = CGF.EmitToMemory(EmitScalarConversion(OpInfo.RHS,
2178  E->getRHS()->getType(), LHSTy), LHSTy);
2179  Builder.CreateAtomicRMW(aop, LHSLV.getAddress(), amt,
2180  llvm::SequentiallyConsistent);
2181  return LHSLV;
2182  }
2183  }
2184  // FIXME: For floating point types, we should be saving and restoring the
2185  // floating point environment in the loop.
2186  llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2187  llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2188  OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
2189  OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
2190  Builder.CreateBr(opBB);
2191  Builder.SetInsertPoint(opBB);
2192  atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
2193  atomicPHI->addIncoming(OpInfo.LHS, startBB);
2194  OpInfo.LHS = atomicPHI;
2195  }
2196  else
2197  OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
2198 
2199  OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
2200  E->getComputationLHSType());
2201 
2202  // Expand the binary operator.
2203  Result = (this->*Func)(OpInfo);
2204 
2205  // Convert the result back to the LHS type.
2206  Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy);
2207 
2208  if (atomicPHI) {
2209  llvm::BasicBlock *opBB = Builder.GetInsertBlock();
2210  llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
2211  auto Pair = CGF.EmitAtomicCompareExchange(
2212  LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
2213  llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
2214  llvm::Value *success = Pair.second;
2215  atomicPHI->addIncoming(old, opBB);
2216  Builder.CreateCondBr(success, contBB, opBB);
2217  Builder.SetInsertPoint(contBB);
2218  return LHSLV;
2219  }
2220 
2221  // Store the result value into the LHS lvalue. Bit-fields are handled
2222  // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
2223  // 'An assignment expression has the value of the left operand after the
2224  // assignment...'.
2225  if (LHSLV.isBitField())
2226  CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result);
2227  else
2228  CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV);
2229 
2230  return LHSLV;
2231 }
2232 
2233 Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
2234  Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
2235  bool Ignore = TestAndClearIgnoreResultAssign();
2236  Value *RHS;
2237  LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
2238 
2239  // If the result is clearly ignored, return now.
2240  if (Ignore)
2241  return nullptr;
2242 
2243  // The result of an assignment in C is the assigned r-value.
2244  if (!CGF.getLangOpts().CPlusPlus)
2245  return RHS;
2246 
2247  // If the lvalue is non-volatile, return the computed value of the assignment.
2248  if (!LHS.isVolatileQualified())
2249  return RHS;
2250 
2251  // Otherwise, reload the value.
2252  return EmitLoadOfLValue(LHS, E->getExprLoc());
2253 }
2254 
2255 void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
2256  const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
2258 
2259  if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
2260  Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
2261  SanitizerKind::IntegerDivideByZero));
2262  }
2263 
2264  if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
2265  Ops.Ty->hasSignedIntegerRepresentation()) {
2266  llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
2267 
2268  llvm::Value *IntMin =
2269  Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
2270  llvm::Value *NegOne = llvm::ConstantInt::get(Ty, -1ULL);
2271 
2272  llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
2273  llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
2274  llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
2275  Checks.push_back(
2276  std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));
2277  }
2278 
2279  if (Checks.size() > 0)
2280  EmitBinOpCheck(Checks, Ops);
2281 }
2282 
2283 Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
2284  {
2285  CodeGenFunction::SanitizerScope SanScope(&CGF);
2286  if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
2287  CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
2288  Ops.Ty->isIntegerType()) {
2289  llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
2290  EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
2291  } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
2292  Ops.Ty->isRealFloatingType()) {
2293  llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
2294  llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
2295  EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
2296  Ops);
2297  }
2298  }
2299 
2300  if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
2301  llvm::Value *Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
2302  if (CGF.getLangOpts().OpenCL) {
2303  // OpenCL 1.1 7.4: minimum accuracy of single precision / is 2.5ulp
2304  llvm::Type *ValTy = Val->getType();
2305  if (ValTy->isFloatTy() ||
2306  (isa<llvm::VectorType>(ValTy) &&
2307  cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy()))
2308  CGF.SetFPAccuracy(Val, 2.5);
2309  }
2310  return Val;
2311  }
2312  else if (Ops.Ty->hasUnsignedIntegerRepresentation())
2313  return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
2314  else
2315  return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
2316 }
2317 
2318 Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
2319  // Rem in C can't be a floating point type: C99 6.5.5p2.
2320  if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
2321  CodeGenFunction::SanitizerScope SanScope(&CGF);
2322  llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
2323 
2324  if (Ops.Ty->isIntegerType())
2325  EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
2326  }
2327 
2328  if (Ops.Ty->hasUnsignedIntegerRepresentation())
2329  return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
2330  else
2331  return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
2332 }
2333 
2334 Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
2335  unsigned IID;
2336  unsigned OpID = 0;
2337 
2338  bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
2339  switch (Ops.Opcode) {
2340  case BO_Add:
2341  case BO_AddAssign:
2342  OpID = 1;
2343  IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
2344  llvm::Intrinsic::uadd_with_overflow;
2345  break;
2346  case BO_Sub:
2347  case BO_SubAssign:
2348  OpID = 2;
2349  IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
2350  llvm::Intrinsic::usub_with_overflow;
2351  break;
2352  case BO_Mul:
2353  case BO_MulAssign:
2354  OpID = 3;
2355  IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
2356  llvm::Intrinsic::umul_with_overflow;
2357  break;
2358  default:
2359  llvm_unreachable("Unsupported operation for overflow detection");
2360  }
2361  OpID <<= 1;
2362  if (isSigned)
2363  OpID |= 1;
2364 
2365  llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
2366 
2367  llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
2368 
2369  Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
2370  Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
2371  Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
2372 
2373  // Handle overflow with llvm.trap if no custom handler has been specified.
2374  const std::string *handlerName =
2375  &CGF.getLangOpts().OverflowHandler;
2376  if (handlerName->empty()) {
2377  // If the signed-integer-overflow sanitizer is enabled, emit a call to its
2378  // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
2379  if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
2380  CodeGenFunction::SanitizerScope SanScope(&CGF);
2381  llvm::Value *NotOverflow = Builder.CreateNot(overflow);
2382  SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
2383  : SanitizerKind::UnsignedIntegerOverflow;
2384  EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
2385  } else
2386  CGF.EmitTrapCheck(Builder.CreateNot(overflow));
2387  return result;
2388  }
2389 
2390  // Branch in case of overflow.
2391  llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
2392  llvm::Function::iterator insertPt = initialBB;
2393  llvm::BasicBlock *continueBB = CGF.createBasicBlock("nooverflow", CGF.CurFn,
2394  std::next(insertPt));
2395  llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
2396 
2397  Builder.CreateCondBr(overflow, overflowBB, continueBB);
2398 
2399  // If an overflow handler is set, then we want to call it and then use its
2400  // result, if it returns.
2401  Builder.SetInsertPoint(overflowBB);
2402 
2403  // Get the overflow handler.
2404  llvm::Type *Int8Ty = CGF.Int8Ty;
2405  llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
2406  llvm::FunctionType *handlerTy =
2407  llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
2408  llvm::Value *handler = CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
2409 
2410  // Sign extend the args to 64-bit, so that we can use the same handler for
2411  // all types of overflow.
2412  llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
2413  llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
2414 
2415  // Call the handler with the two arguments, the operation, and the size of
2416  // the result.
2417  llvm::Value *handlerArgs[] = {
2418  lhs,
2419  rhs,
2420  Builder.getInt8(OpID),
2421  Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
2422  };
2423  llvm::Value *handlerResult =
2424  CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
2425 
2426  // Truncate the result back to the desired size.
2427  handlerResult = Builder.CreateTrunc(handlerResult, opTy);
2428  Builder.CreateBr(continueBB);
2429 
2430  Builder.SetInsertPoint(continueBB);
2431  llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
2432  phi->addIncoming(result, initialBB);
2433  phi->addIncoming(handlerResult, overflowBB);
2434 
2435  return phi;
2436 }
2437 
2438 /// Emit pointer + index arithmetic.
2440  const BinOpInfo &op,
2441  bool isSubtraction) {
2442  // Must have binary (not unary) expr here. Unary pointer
2443  // increment/decrement doesn't use this path.
2444  const BinaryOperator *expr = cast<BinaryOperator>(op.E);
2445 
2446  Value *pointer = op.LHS;
2447  Expr *pointerOperand = expr->getLHS();
2448  Value *index = op.RHS;
2449  Expr *indexOperand = expr->getRHS();
2450 
2451  // In a subtraction, the LHS is always the pointer.
2452  if (!isSubtraction && !pointer->getType()->isPointerTy()) {
2453  std::swap(pointer, index);
2454  std::swap(pointerOperand, indexOperand);
2455  }
2456 
2457  unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
2458  if (width != CGF.PointerWidthInBits) {
2459  // Zero-extend or sign-extend the pointer value according to
2460  // whether the index is signed or not.
2461  bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
2462  index = CGF.Builder.CreateIntCast(index, CGF.PtrDiffTy, isSigned,
2463  "idx.ext");
2464  }
2465 
2466  // If this is subtraction, negate the index.
2467  if (isSubtraction)
2468  index = CGF.Builder.CreateNeg(index, "idx.neg");
2469 
2470  if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
2471  CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
2472  /*Accessed*/ false);
2473 
2474  const PointerType *pointerType
2475  = pointerOperand->getType()->getAs<PointerType>();
2476  if (!pointerType) {
2477  QualType objectType = pointerOperand->getType()
2479  ->getPointeeType();
2480  llvm::Value *objectSize
2481  = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
2482 
2483  index = CGF.Builder.CreateMul(index, objectSize);
2484 
2485  Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
2486  result = CGF.Builder.CreateGEP(result, index, "add.ptr");
2487  return CGF.Builder.CreateBitCast(result, pointer->getType());
2488  }
2489 
2490  QualType elementType = pointerType->getPointeeType();
2491  if (const VariableArrayType *vla
2492  = CGF.getContext().getAsVariableArrayType(elementType)) {
2493  // The element count here is the total number of non-VLA elements.
2494  llvm::Value *numElements = CGF.getVLASize(vla).first;
2495 
2496  // Effectively, the multiply by the VLA size is part of the GEP.
2497  // GEP indexes are signed, and scaling an index isn't permitted to
2498  // signed-overflow, so we use the same semantics for our explicit
2499  // multiply. We suppress this if overflow is not undefined behavior.
2500  if (CGF.getLangOpts().isSignedOverflowDefined()) {
2501  index = CGF.Builder.CreateMul(index, numElements, "vla.index");
2502  pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr");
2503  } else {
2504  index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
2505  pointer = CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr");
2506  }
2507  return pointer;
2508  }
2509 
2510  // Explicitly handle GNU void* and function pointer arithmetic extensions. The
2511  // GNU void* casts amount to no-ops since our void* type is i8*, but this is
2512  // future proof.
2513  if (elementType->isVoidType() || elementType->isFunctionType()) {
2514  Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
2515  result = CGF.Builder.CreateGEP(result, index, "add.ptr");
2516  return CGF.Builder.CreateBitCast(result, pointer->getType());
2517  }
2518 
2520  return CGF.Builder.CreateGEP(pointer, index, "add.ptr");
2521 
2522  return CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr");
2523 }
2524 
2525 // Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
2526 // Addend. Use negMul and negAdd to negate the first operand of the Mul or
2527 // the add operand respectively. This allows fmuladd to represent a*b-c, or
2528 // c-a*b. Patterns in LLVM should catch the negated forms and translate them to
2529 // efficient operations.
2530 static Value* buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend,
2531  const CodeGenFunction &CGF, CGBuilderTy &Builder,
2532  bool negMul, bool negAdd) {
2533  assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.");
2534 
2535  Value *MulOp0 = MulOp->getOperand(0);
2536  Value *MulOp1 = MulOp->getOperand(1);
2537  if (negMul) {
2538  MulOp0 =
2539  Builder.CreateFSub(
2540  llvm::ConstantFP::getZeroValueForNegation(MulOp0->getType()), MulOp0,
2541  "neg");
2542  } else if (negAdd) {
2543  Addend =
2544  Builder.CreateFSub(
2545  llvm::ConstantFP::getZeroValueForNegation(Addend->getType()), Addend,
2546  "neg");
2547  }
2548 
2549  Value *FMulAdd = Builder.CreateCall(
2550  CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
2551  {MulOp0, MulOp1, Addend});
2552  MulOp->eraseFromParent();
2553 
2554  return FMulAdd;
2555 }
2556 
2557 // Check whether it would be legal to emit an fmuladd intrinsic call to
2558 // represent op and if so, build the fmuladd.
2559 //
2560 // Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
2561 // Does NOT check the type of the operation - it's assumed that this function
2562 // will be called from contexts where it's known that the type is contractable.
2563 static Value* tryEmitFMulAdd(const BinOpInfo &op,
2564  const CodeGenFunction &CGF, CGBuilderTy &Builder,
2565  bool isSub=false) {
2566 
2567  assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
2568  op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
2569  "Only fadd/fsub can be the root of an fmuladd.");
2570 
2571  // Check whether this op is marked as fusable.
2572  if (!op.FPContractable)
2573  return nullptr;
2574 
2575  // Check whether -ffp-contract=on. (If -ffp-contract=off/fast, fusing is
2576  // either disabled, or handled entirely by the LLVM backend).
2577  if (CGF.CGM.getCodeGenOpts().getFPContractMode() != CodeGenOptions::FPC_On)
2578  return nullptr;
2579 
2580  // We have a potentially fusable op. Look for a mul on one of the operands.
2581  if (llvm::BinaryOperator* LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) {
2582  if (LHSBinOp->getOpcode() == llvm::Instruction::FMul) {
2583  assert(LHSBinOp->getNumUses() == 0 &&
2584  "Operations with multiple uses shouldn't be contracted.");
2585  return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
2586  }
2587  } else if (llvm::BinaryOperator* RHSBinOp =
2588  dyn_cast<llvm::BinaryOperator>(op.RHS)) {
2589  if (RHSBinOp->getOpcode() == llvm::Instruction::FMul) {
2590  assert(RHSBinOp->getNumUses() == 0 &&
2591  "Operations with multiple uses shouldn't be contracted.");
2592  return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
2593  }
2594  }
2595 
2596  return nullptr;
2597 }
2598 
2599 Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
2600  if (op.LHS->getType()->isPointerTy() ||
2601  op.RHS->getType()->isPointerTy())
2602  return emitPointerArithmetic(CGF, op, /*subtraction*/ false);
2603 
2604  if (op.Ty->isSignedIntegerOrEnumerationType()) {
2605  switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2607  return Builder.CreateAdd(op.LHS, op.RHS, "add");
2609  if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
2610  return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
2611  // Fall through.
2613  return EmitOverflowCheckedBinOp(op);
2614  }
2615  }
2616 
2617  if (op.Ty->isUnsignedIntegerType() &&
2618  CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow))
2619  return EmitOverflowCheckedBinOp(op);
2620 
2621  if (op.LHS->getType()->isFPOrFPVectorTy()) {
2622  // Try to form an fmuladd.
2623  if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
2624  return FMulAdd;
2625 
2626  return Builder.CreateFAdd(op.LHS, op.RHS, "add");
2627  }
2628 
2629  return Builder.CreateAdd(op.LHS, op.RHS, "add");
2630 }
2631 
2632 Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
2633  // The LHS is always a pointer if either side is.
2634  if (!op.LHS->getType()->isPointerTy()) {
2635  if (op.Ty->isSignedIntegerOrEnumerationType()) {
2636  switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2638  return Builder.CreateSub(op.LHS, op.RHS, "sub");
2640  if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
2641  return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
2642  // Fall through.
2644  return EmitOverflowCheckedBinOp(op);
2645  }
2646  }
2647 
2648  if (op.Ty->isUnsignedIntegerType() &&
2649  CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow))
2650  return EmitOverflowCheckedBinOp(op);
2651 
2652  if (op.LHS->getType()->isFPOrFPVectorTy()) {
2653  // Try to form an fmuladd.
2654  if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
2655  return FMulAdd;
2656  return Builder.CreateFSub(op.LHS, op.RHS, "sub");
2657  }
2658 
2659  return Builder.CreateSub(op.LHS, op.RHS, "sub");
2660  }
2661 
2662  // If the RHS is not a pointer, then we have normal pointer
2663  // arithmetic.
2664  if (!op.RHS->getType()->isPointerTy())
2665  return emitPointerArithmetic(CGF, op, /*subtraction*/ true);
2666 
2667  // Otherwise, this is a pointer subtraction.
2668 
2669  // Do the raw subtraction part.
2670  llvm::Value *LHS
2671  = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
2672  llvm::Value *RHS
2673  = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
2674  Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
2675 
2676  // Okay, figure out the element size.
2677  const BinaryOperator *expr = cast<BinaryOperator>(op.E);
2678  QualType elementType = expr->getLHS()->getType()->getPointeeType();
2679 
2680  llvm::Value *divisor = nullptr;
2681 
2682  // For a variable-length array, this is going to be non-constant.
2683  if (const VariableArrayType *vla
2684  = CGF.getContext().getAsVariableArrayType(elementType)) {
2685  llvm::Value *numElements;
2686  std::tie(numElements, elementType) = CGF.getVLASize(vla);
2687 
2688  divisor = numElements;
2689 
2690  // Scale the number of non-VLA elements by the non-VLA element size.
2691  CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
2692  if (!eltSize.isOne())
2693  divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
2694 
2695  // For everything elese, we can just compute it, safe in the
2696  // assumption that Sema won't let anything through that we can't
2697  // safely compute the size of.
2698  } else {
2699  CharUnits elementSize;
2700  // Handle GCC extension for pointer arithmetic on void* and
2701  // function pointer types.
2702  if (elementType->isVoidType() || elementType->isFunctionType())
2703  elementSize = CharUnits::One();
2704  else
2705  elementSize = CGF.getContext().getTypeSizeInChars(elementType);
2706 
2707  // Don't even emit the divide for element size of 1.
2708  if (elementSize.isOne())
2709  return diffInChars;
2710 
2711  divisor = CGF.CGM.getSize(elementSize);
2712  }
2713 
2714  // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
2715  // pointer difference in C is only defined in the case where both operands
2716  // are pointing to elements of an array.
2717  return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
2718 }
2719 
2720 Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) {
2721  llvm::IntegerType *Ty;
2722  if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
2723  Ty = cast<llvm::IntegerType>(VT->getElementType());
2724  else
2725  Ty = cast<llvm::IntegerType>(LHS->getType());
2726  return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1);
2727 }
2728 
2729 Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
2730  // LLVM requires the LHS and RHS to be the same type: promote or truncate the
2731  // RHS to the same size as the LHS.
2732  Value *RHS = Ops.RHS;
2733  if (Ops.LHS->getType() != RHS->getType())
2734  RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
2735 
2736  bool SanitizeBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
2737  Ops.Ty->hasSignedIntegerRepresentation();
2738  bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
2739  // OpenCL 6.3j: shift values are effectively % word size of LHS.
2740  if (CGF.getLangOpts().OpenCL)
2741  RHS =
2742  Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shl.mask");
2743  else if ((SanitizeBase || SanitizeExponent) &&
2744  isa<llvm::IntegerType>(Ops.LHS->getType())) {
2745  CodeGenFunction::SanitizerScope SanScope(&CGF);
2747  llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, RHS);
2748  llvm::Value *ValidExponent = Builder.CreateICmpULE(RHS, WidthMinusOne);
2749 
2750  if (SanitizeExponent) {
2751  Checks.push_back(
2752  std::make_pair(ValidExponent, SanitizerKind::ShiftExponent));
2753  }
2754 
2755  if (SanitizeBase) {
2756  // Check whether we are shifting any non-zero bits off the top of the
2757  // integer. We only emit this check if exponent is valid - otherwise
2758  // instructions below will have undefined behavior themselves.
2759  llvm::BasicBlock *Orig = Builder.GetInsertBlock();
2760  llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
2761  llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
2762  Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
2763  CGF.EmitBlock(CheckShiftBase);
2764  llvm::Value *BitsShiftedOff =
2765  Builder.CreateLShr(Ops.LHS,
2766  Builder.CreateSub(WidthMinusOne, RHS, "shl.zeros",
2767  /*NUW*/true, /*NSW*/true),
2768  "shl.check");
2769  if (CGF.getLangOpts().CPlusPlus) {
2770  // In C99, we are not permitted to shift a 1 bit into the sign bit.
2771  // Under C++11's rules, shifting a 1 bit into the sign bit is
2772  // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
2773  // define signed left shifts, so we use the C99 and C++11 rules there).
2774  llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
2775  BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
2776  }
2777  llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
2778  llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
2779  CGF.EmitBlock(Cont);
2780  llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
2781  BaseCheck->addIncoming(Builder.getTrue(), Orig);
2782  BaseCheck->addIncoming(ValidBase, CheckShiftBase);
2783  Checks.push_back(std::make_pair(BaseCheck, SanitizerKind::ShiftBase));
2784  }
2785 
2786  assert(!Checks.empty());
2787  EmitBinOpCheck(Checks, Ops);
2788  }
2789 
2790  return Builder.CreateShl(Ops.LHS, RHS, "shl");
2791 }
2792 
2793 Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
2794  // LLVM requires the LHS and RHS to be the same type: promote or truncate the
2795  // RHS to the same size as the LHS.
2796  Value *RHS = Ops.RHS;
2797  if (Ops.LHS->getType() != RHS->getType())
2798  RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
2799 
2800  // OpenCL 6.3j: shift values are effectively % word size of LHS.
2801  if (CGF.getLangOpts().OpenCL)
2802  RHS =
2803  Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shr.mask");
2804  else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
2805  isa<llvm::IntegerType>(Ops.LHS->getType())) {
2806  CodeGenFunction::SanitizerScope SanScope(&CGF);
2807  llvm::Value *Valid =
2808  Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS));
2809  EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops);
2810  }
2811 
2812  if (Ops.Ty->hasUnsignedIntegerRepresentation())
2813  return Builder.CreateLShr(Ops.LHS, RHS, "shr");
2814  return Builder.CreateAShr(Ops.LHS, RHS, "shr");
2815 }
2816 
2818 // return corresponding comparison intrinsic for given vector type
2820  BuiltinType::Kind ElemKind) {
2821  switch (ElemKind) {
2822  default: llvm_unreachable("unexpected element type");
2823  case BuiltinType::Char_U:
2824  case BuiltinType::UChar:
2825  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
2826  llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
2827  case BuiltinType::Char_S:
2828  case BuiltinType::SChar:
2829  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
2830  llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
2831  case BuiltinType::UShort:
2832  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
2833  llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
2834  case BuiltinType::Short:
2835  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
2836  llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
2837  case BuiltinType::UInt:
2838  case BuiltinType::ULong:
2839  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
2840  llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
2841  case BuiltinType::Int:
2842  case BuiltinType::Long:
2843  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
2844  llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
2845  case BuiltinType::Float:
2846  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
2847  llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
2848  }
2849 }
2850 
2851 Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
2852  unsigned SICmpOpc, unsigned FCmpOpc) {
2853  TestAndClearIgnoreResultAssign();
2854  Value *Result;
2855  QualType LHSTy = E->getLHS()->getType();
2856  QualType RHSTy = E->getRHS()->getType();
2857  if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
2858  assert(E->getOpcode() == BO_EQ ||
2859  E->getOpcode() == BO_NE);
2860  Value *LHS = CGF.EmitScalarExpr(E->getLHS());
2861  Value *RHS = CGF.EmitScalarExpr(E->getRHS());
2862  Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
2863  CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
2864  } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
2865  Value *LHS = Visit(E->getLHS());
2866  Value *RHS = Visit(E->getRHS());
2867 
2868  // If AltiVec, the comparison results in a numeric type, so we use
2869  // intrinsics comparing vectors and giving 0 or 1 as a result
2870  if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
2871  // constants for mapping CR6 register bits to predicate result
2872  enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
2873 
2874  llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
2875 
2876  // in several cases vector arguments order will be reversed
2877  Value *FirstVecArg = LHS,
2878  *SecondVecArg = RHS;
2879 
2880  QualType ElTy = LHSTy->getAs<VectorType>()->getElementType();
2881  const BuiltinType *BTy = ElTy->getAs<BuiltinType>();
2882  BuiltinType::Kind ElementKind = BTy->getKind();
2883 
2884  switch(E->getOpcode()) {
2885  default: llvm_unreachable("is not a comparison operation");
2886  case BO_EQ:
2887  CR6 = CR6_LT;
2888  ID = GetIntrinsic(VCMPEQ, ElementKind);
2889  break;
2890  case BO_NE:
2891  CR6 = CR6_EQ;
2892  ID = GetIntrinsic(VCMPEQ, ElementKind);
2893  break;
2894  case BO_LT:
2895  CR6 = CR6_LT;
2896  ID = GetIntrinsic(VCMPGT, ElementKind);
2897  std::swap(FirstVecArg, SecondVecArg);
2898  break;
2899  case BO_GT:
2900  CR6 = CR6_LT;
2901  ID = GetIntrinsic(VCMPGT, ElementKind);
2902  break;
2903  case BO_LE:
2904  if (ElementKind == BuiltinType::Float) {
2905  CR6 = CR6_LT;
2906  ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
2907  std::swap(FirstVecArg, SecondVecArg);
2908  }
2909  else {
2910  CR6 = CR6_EQ;
2911  ID = GetIntrinsic(VCMPGT, ElementKind);
2912  }
2913  break;
2914  case BO_GE:
2915  if (ElementKind == BuiltinType::Float) {
2916  CR6 = CR6_LT;
2917  ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
2918  }
2919  else {
2920  CR6 = CR6_EQ;
2921  ID = GetIntrinsic(VCMPGT, ElementKind);
2922  std::swap(FirstVecArg, SecondVecArg);
2923  }
2924  break;
2925  }
2926 
2927  Value *CR6Param = Builder.getInt32(CR6);
2928  llvm::Function *F = CGF.CGM.getIntrinsic(ID);
2929  Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
2930  return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType());
2931  }
2932 
2933  if (LHS->getType()->isFPOrFPVectorTy()) {
2934  Result = Builder.CreateFCmp((llvm::CmpInst::Predicate)FCmpOpc,
2935  LHS, RHS, "cmp");
2936  } else if (LHSTy->hasSignedIntegerRepresentation()) {
2937  Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)SICmpOpc,
2938  LHS, RHS, "cmp");
2939  } else {
2940  // Unsigned integers and pointers.
2941  Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
2942  LHS, RHS, "cmp");
2943  }
2944 
2945  // If this is a vector comparison, sign extend the result to the appropriate
2946  // vector integer type and return it (don't convert to bool).
2947  if (LHSTy->isVectorType())
2948  return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
2949 
2950  } else {
2951  // Complex Comparison: can only be an equality comparison.
2953  QualType CETy;
2954  if (auto *CTy = LHSTy->getAs<ComplexType>()) {
2955  LHS = CGF.EmitComplexExpr(E->getLHS());
2956  CETy = CTy->getElementType();
2957  } else {
2958  LHS.first = Visit(E->getLHS());
2959  LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
2960  CETy = LHSTy;
2961  }
2962  if (auto *CTy = RHSTy->getAs<ComplexType>()) {
2963  RHS = CGF.EmitComplexExpr(E->getRHS());
2964  assert(CGF.getContext().hasSameUnqualifiedType(CETy,
2965  CTy->getElementType()) &&
2966  "The element types must always match.");
2967  (void)CTy;
2968  } else {
2969  RHS.first = Visit(E->getRHS());
2970  RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
2971  assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
2972  "The element types must always match.");
2973  }
2974 
2975  Value *ResultR, *ResultI;
2976  if (CETy->isRealFloatingType()) {
2977  ResultR = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc,
2978  LHS.first, RHS.first, "cmp.r");
2979  ResultI = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc,
2980  LHS.second, RHS.second, "cmp.i");
2981  } else {
2982  // Complex comparisons can only be equality comparisons. As such, signed
2983  // and unsigned opcodes are the same.
2984  ResultR = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
2985  LHS.first, RHS.first, "cmp.r");
2986  ResultI = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
2987  LHS.second, RHS.second, "cmp.i");
2988  }
2989 
2990  if (E->getOpcode() == BO_EQ) {
2991  Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
2992  } else {
2993  assert(E->getOpcode() == BO_NE &&
2994  "Complex comparison other than == or != ?");
2995  Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
2996  }
2997  }
2998 
2999  return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType());
3000 }
3001 
3002 Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
3003  bool Ignore = TestAndClearIgnoreResultAssign();
3004 
3005  Value *RHS;
3006  LValue LHS;
3007 
3008  switch (E->getLHS()->getType().getObjCLifetime()) {
3010  std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
3011  break;
3012 
3014  std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
3015  break;
3016 
3017  case Qualifiers::OCL_Weak:
3018  RHS = Visit(E->getRHS());
3019  LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3020  RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
3021  break;
3022 
3023  // No reason to do any of these differently.
3024  case Qualifiers::OCL_None:
3026  // __block variables need to have the rhs evaluated first, plus
3027  // this should improve codegen just a little.
3028  RHS = Visit(E->getRHS());
3029  LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3030 
3031  // Store the value into the LHS. Bit-fields are handled specially
3032  // because the result is altered by the store, i.e., [C99 6.5.16p1]
3033  // 'An assignment expression has the value of the left operand after
3034  // the assignment...'.
3035  if (LHS.isBitField())
3036  CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
3037  else
3038  CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
3039  }
3040 
3041  // If the result is clearly ignored, return now.
3042  if (Ignore)
3043  return nullptr;
3044 
3045  // The result of an assignment in C is the assigned r-value.
3046  if (!CGF.getLangOpts().CPlusPlus)
3047  return RHS;
3048 
3049  // If the lvalue is non-volatile, return the computed value of the assignment.
3050  if (!LHS.isVolatileQualified())
3051  return RHS;
3052 
3053  // Otherwise, reload the value.
3054  return EmitLoadOfLValue(LHS, E->getExprLoc());
3055 }
3056 
3057 Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
3058  // Perform vector logical and on comparisons with zero vectors.
3059  if (E->getType()->isVectorType()) {
3060  CGF.incrementProfileCounter(E);
3061 
3062  Value *LHS = Visit(E->getLHS());
3063  Value *RHS = Visit(E->getRHS());
3064  Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
3065  if (LHS->getType()->isFPOrFPVectorTy()) {
3066  LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
3067  RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
3068  } else {
3069  LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
3070  RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
3071  }
3072  Value *And = Builder.CreateAnd(LHS, RHS);
3073  return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
3074  }
3075 
3076  llvm::Type *ResTy = ConvertType(E->getType());
3077 
3078  // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
3079  // If we have 1 && X, just emit X without inserting the control flow.
3080  bool LHSCondVal;
3081  if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
3082  if (LHSCondVal) { // If we have 1 && X, just emit X.
3083  CGF.incrementProfileCounter(E);
3084 
3085  Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
3086  // ZExt result to int or bool.
3087  return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
3088  }
3089 
3090  // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
3091  if (!CGF.ContainsLabel(E->getRHS()))
3092  return llvm::Constant::getNullValue(ResTy);
3093  }
3094 
3095  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
3096  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
3097 
3099 
3100  // Branch on the LHS first. If it is false, go to the failure (cont) block.
3101  CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
3102  CGF.getProfileCount(E->getRHS()));
3103 
3104  // Any edges into the ContBlock are now from an (indeterminate number of)
3105  // edges from this first condition. All of these values will be false. Start
3106  // setting up the PHI node in the Cont Block for this.
3107  llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
3108  "", ContBlock);
3109  for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
3110  PI != PE; ++PI)
3111  PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
3112 
3113  eval.begin(CGF);
3114  CGF.EmitBlock(RHSBlock);
3115  CGF.incrementProfileCounter(E);
3116  Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
3117  eval.end(CGF);
3118 
3119  // Reaquire the RHS block, as there may be subblocks inserted.
3120  RHSBlock = Builder.GetInsertBlock();
3121 
3122  // Emit an unconditional branch from this block to ContBlock.
3123  {
3124  // There is no need to emit line number for unconditional branch.
3125  auto NL = ApplyDebugLocation::CreateEmpty(CGF);
3126  CGF.EmitBlock(ContBlock);
3127  }
3128  // Insert an entry into the phi node for the edge with the value of RHSCond.
3129  PN->addIncoming(RHSCond, RHSBlock);
3130 
3131  // ZExt result to int.
3132  return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
3133 }
3134 
3135 Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
3136  // Perform vector logical or on comparisons with zero vectors.
3137  if (E->getType()->isVectorType()) {
3138  CGF.incrementProfileCounter(E);
3139 
3140  Value *LHS = Visit(E->getLHS());
3141  Value *RHS = Visit(E->getRHS());
3142  Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
3143  if (LHS->getType()->isFPOrFPVectorTy()) {
3144  LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
3145  RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
3146  } else {
3147  LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
3148  RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
3149  }
3150  Value *Or = Builder.CreateOr(LHS, RHS);
3151  return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
3152  }
3153 
3154  llvm::Type *ResTy = ConvertType(E->getType());
3155 
3156  // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
3157  // If we have 0 || X, just emit X without inserting the control flow.
3158  bool LHSCondVal;
3159  if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
3160  if (!LHSCondVal) { // If we have 0 || X, just emit X.
3161  CGF.incrementProfileCounter(E);
3162 
3163  Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
3164  // ZExt result to int or bool.
3165  return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
3166  }
3167 
3168  // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
3169  if (!CGF.ContainsLabel(E->getRHS()))
3170  return llvm::ConstantInt::get(ResTy, 1);
3171  }
3172 
3173  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
3174  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
3175 
3177 
3178  // Branch on the LHS first. If it is true, go to the success (cont) block.
3179  CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
3180  CGF.getCurrentProfileCount() -
3181  CGF.getProfileCount(E->getRHS()));
3182 
3183  // Any edges into the ContBlock are now from an (indeterminate number of)
3184  // edges from this first condition. All of these values will be true. Start
3185  // setting up the PHI node in the Cont Block for this.
3186  llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
3187  "", ContBlock);
3188  for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
3189  PI != PE; ++PI)
3190  PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
3191 
3192  eval.begin(CGF);
3193 
3194  // Emit the RHS condition as a bool value.
3195  CGF.EmitBlock(RHSBlock);
3196  CGF.incrementProfileCounter(E);
3197  Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
3198 
3199  eval.end(CGF);
3200 
3201  // Reaquire the RHS block, as there may be subblocks inserted.
3202  RHSBlock = Builder.GetInsertBlock();
3203 
3204  // Emit an unconditional branch from this block to ContBlock. Insert an entry
3205  // into the phi node for the edge with the value of RHSCond.
3206  CGF.EmitBlock(ContBlock);
3207  PN->addIncoming(RHSCond, RHSBlock);
3208 
3209  // ZExt result to int.
3210  return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
3211 }
3212 
3213 Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
3214  CGF.EmitIgnoredExpr(E->getLHS());
3215  CGF.EnsureInsertPoint();
3216  return Visit(E->getRHS());
3217 }
3218 
3219 //===----------------------------------------------------------------------===//
3220 // Other Operators
3221 //===----------------------------------------------------------------------===//
3222 
3223 /// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
3224 /// expression is cheap enough and side-effect-free enough to evaluate
3225 /// unconditionally instead of conditionally. This is used to convert control
3226 /// flow into selects in some cases.
3228  CodeGenFunction &CGF) {
3229  // Anything that is an integer or floating point constant is fine.
3230  return E->IgnoreParens()->isEvaluatable(CGF.getContext());
3231 
3232  // Even non-volatile automatic variables can't be evaluated unconditionally.
3233  // Referencing a thread_local may cause non-trivial initialization work to
3234  // occur. If we're inside a lambda and one of the variables is from the scope
3235  // outside the lambda, that function may have returned already. Reading its
3236  // locals is a bad idea. Also, these reads may introduce races there didn't
3237  // exist in the source-level program.
3238 }
3239 
3240 
3241 Value *ScalarExprEmitter::
3242 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
3243  TestAndClearIgnoreResultAssign();
3244 
3245  // Bind the common expression if necessary.
3246  CodeGenFunction::OpaqueValueMapping binding(CGF, E);
3247 
3248  Expr *condExpr = E->getCond();
3249  Expr *lhsExpr = E->getTrueExpr();
3250  Expr *rhsExpr = E->getFalseExpr();
3251 
3252  // If the condition constant folds and can be elided, try to avoid emitting
3253  // the condition and the dead arm.
3254  bool CondExprBool;
3255  if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
3256  Expr *live = lhsExpr, *dead = rhsExpr;
3257  if (!CondExprBool) std::swap(live, dead);
3258 
3259  // If the dead side doesn't have labels we need, just emit the Live part.
3260  if (!CGF.ContainsLabel(dead)) {
3261  if (CondExprBool)
3262  CGF.incrementProfileCounter(E);
3263  Value *Result = Visit(live);
3264 
3265  // If the live part is a throw expression, it acts like it has a void
3266  // type, so evaluating it returns a null Value*. However, a conditional
3267  // with non-void type must return a non-null Value*.
3268  if (!Result && !E->getType()->isVoidType())
3269  Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
3270 
3271  return Result;
3272  }
3273  }
3274 
3275  // OpenCL: If the condition is a vector, we can treat this condition like
3276  // the select function.
3277  if (CGF.getLangOpts().OpenCL
3278  && condExpr->getType()->isVectorType()) {
3279  CGF.incrementProfileCounter(E);
3280 
3281  llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
3282  llvm::Value *LHS = Visit(lhsExpr);
3283  llvm::Value *RHS = Visit(rhsExpr);
3284 
3285  llvm::Type *condType = ConvertType(condExpr->getType());
3286  llvm::VectorType *vecTy = cast<llvm::VectorType>(condType);
3287 
3288  unsigned numElem = vecTy->getNumElements();
3289  llvm::Type *elemType = vecTy->getElementType();
3290 
3291  llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
3292  llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
3293  llvm::Value *tmp = Builder.CreateSExt(TestMSB,
3294  llvm::VectorType::get(elemType,
3295  numElem),
3296  "sext");
3297  llvm::Value *tmp2 = Builder.CreateNot(tmp);
3298 
3299  // Cast float to int to perform ANDs if necessary.
3300  llvm::Value *RHSTmp = RHS;
3301  llvm::Value *LHSTmp = LHS;
3302  bool wasCast = false;
3303  llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
3304  if (rhsVTy->getElementType()->isFloatingPointTy()) {
3305  RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
3306  LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
3307  wasCast = true;
3308  }
3309 
3310  llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
3311  llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
3312  llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
3313  if (wasCast)
3314  tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
3315 
3316  return tmp5;
3317  }
3318 
3319  // If this is a really simple expression (like x ? 4 : 5), emit this as a
3320  // select instead of as control flow. We can only do this if it is cheap and
3321  // safe to evaluate the LHS and RHS unconditionally.
3322  if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
3324  CGF.incrementProfileCounter(E);
3325 
3326  llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
3327  llvm::Value *LHS = Visit(lhsExpr);
3328  llvm::Value *RHS = Visit(rhsExpr);
3329  if (!LHS) {
3330  // If the conditional has void type, make sure we return a null Value*.
3331  assert(!RHS && "LHS and RHS types must match");
3332  return nullptr;
3333  }
3334  return Builder.CreateSelect(CondV, LHS, RHS, "cond");
3335  }
3336 
3337  llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
3338  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
3339  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
3340 
3342  CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
3343  CGF.getProfileCount(lhsExpr));
3344 
3345  CGF.EmitBlock(LHSBlock);
3346  CGF.incrementProfileCounter(E);
3347  eval.begin(CGF);
3348  Value *LHS = Visit(lhsExpr);
3349  eval.end(CGF);
3350 
3351  LHSBlock = Builder.GetInsertBlock();
3352  Builder.CreateBr(ContBlock);
3353 
3354  CGF.EmitBlock(RHSBlock);
3355  eval.begin(CGF);
3356  Value *RHS = Visit(rhsExpr);
3357  eval.end(CGF);
3358 
3359  RHSBlock = Builder.GetInsertBlock();
3360  CGF.EmitBlock(ContBlock);
3361 
3362  // If the LHS or RHS is a throw expression, it will be legitimately null.
3363  if (!LHS)
3364  return RHS;
3365  if (!RHS)
3366  return LHS;
3367 
3368  // Create a PHI node for the real part.
3369  llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
3370  PN->addIncoming(LHS, LHSBlock);
3371  PN->addIncoming(RHS, RHSBlock);
3372  return PN;
3373 }
3374 
3375 Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
3376  return Visit(E->getChosenSubExpr());
3377 }
3378 
3379 Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
3380  QualType Ty = VE->getType();
3381 
3382  if (Ty->isVariablyModifiedType())
3383  CGF.EmitVariablyModifiedType(Ty);
3384 
3385  llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
3386  llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
3387  llvm::Type *ArgTy = ConvertType(VE->getType());
3388 
3389  // If EmitVAArg fails, we fall back to the LLVM instruction.
3390  if (!ArgPtr)
3391  return Builder.CreateVAArg(ArgValue, ArgTy);
3392 
3393  // FIXME Volatility.
3394  llvm::Value *Val = Builder.CreateLoad(ArgPtr);
3395 
3396  // If EmitVAArg promoted the type, we must truncate it.
3397  if (ArgTy != Val->getType()) {
3398  if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy())
3399  Val = Builder.CreateIntToPtr(Val, ArgTy);
3400  else
3401  Val = Builder.CreateTrunc(Val, ArgTy);
3402  }
3403 
3404  return Val;
3405 }
3406 
3407 Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
3408  return CGF.EmitBlockLiteral(block);
3409 }
3410 
3411 Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
3412  Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
3413  llvm::Type *DstTy = ConvertType(E->getType());
3414 
3415  // Going from vec4->vec3 or vec3->vec4 is a special case and requires
3416  // a shuffle vector instead of a bitcast.
3417  llvm::Type *SrcTy = Src->getType();
3418  if (isa<llvm::VectorType>(DstTy) && isa<llvm::VectorType>(SrcTy)) {
3419  unsigned numElementsDst = cast<llvm::VectorType>(DstTy)->getNumElements();
3420  unsigned numElementsSrc = cast<llvm::VectorType>(SrcTy)->getNumElements();
3421  if ((numElementsDst == 3 && numElementsSrc == 4)
3422  || (numElementsDst == 4 && numElementsSrc == 3)) {
3423 
3424 
3425  // In the case of going from int4->float3, a bitcast is needed before
3426  // doing a shuffle.
3427  llvm::Type *srcElemTy =
3428  cast<llvm::VectorType>(SrcTy)->getElementType();
3429  llvm::Type *dstElemTy =
3430  cast<llvm::VectorType>(DstTy)->getElementType();
3431 
3432  if ((srcElemTy->isIntegerTy() && dstElemTy->isFloatTy())
3433  || (srcElemTy->isFloatTy() && dstElemTy->isIntegerTy())) {
3434  // Create a float type of the same size as the source or destination.
3435  llvm::VectorType *newSrcTy = llvm::VectorType::get(dstElemTy,
3436  numElementsSrc);
3437 
3438  Src = Builder.CreateBitCast(Src, newSrcTy, "astypeCast");
3439  }
3440 
3441  llvm::Value *UnV = llvm::UndefValue::get(Src->getType());
3442 
3444  Args.push_back(Builder.getInt32(0));
3445  Args.push_back(Builder.getInt32(1));
3446  Args.push_back(Builder.getInt32(2));
3447 
3448  if (numElementsDst == 4)
3449  Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
3450 
3451  llvm::Constant *Mask = llvm::ConstantVector::get(Args);
3452 
3453  return Builder.CreateShuffleVector(Src, UnV, Mask, "astype");
3454  }
3455  }
3456 
3457  return Builder.CreateBitCast(Src, DstTy, "astype");
3458 }
3459 
3460 Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
3461  return CGF.EmitAtomicExpr(E).getScalarVal();
3462 }
3463 
3464 //===----------------------------------------------------------------------===//
3465 // Entry Point into this File
3466 //===----------------------------------------------------------------------===//
3467 
3468 /// EmitScalarExpr - Emit the computation of the specified expression of scalar
3469 /// type, ignoring the result.
3470 Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
3471  assert(E && hasScalarEvaluationKind(E->getType()) &&
3472  "Invalid scalar expression to emit");
3473 
3474  return ScalarExprEmitter(*this, IgnoreResultAssign)
3475  .Visit(const_cast<Expr *>(E));
3476 }
3477 
3478 /// EmitScalarConversion - Emit a conversion from the specified type to the
3479 /// specified destination type, both of which are LLVM scalar types.
3481  QualType DstTy) {
3482  assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
3483  "Invalid scalar expression to emit");
3484  return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy);
3485 }
3486 
3487 /// EmitComplexToScalarConversion - Emit a conversion from the specified complex
3488 /// type to the specified destination type, where the destination type is an
3489 /// LLVM scalar type.
3491  QualType SrcTy,
3492  QualType DstTy) {
3493  assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
3494  "Invalid complex -> scalar conversion");
3495  return ScalarExprEmitter(*this).EmitComplexToScalarConversion(Src, SrcTy,
3496  DstTy);
3497 }
3498 
3499 
3502  bool isInc, bool isPre) {
3503  return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
3504 }
3505 
3507  llvm::Value *V;
3508  // object->isa or (*object).isa
3509  // Generate code as for: *(Class*)object
3510  // build Class* type
3511  llvm::Type *ClassPtrTy = ConvertType(E->getType());
3512 
3513  Expr *BaseExpr = E->getBase();
3514  if (BaseExpr->isRValue()) {
3515  V = CreateMemTemp(E->getType(), "resval");
3516  llvm::Value *Src = EmitScalarExpr(BaseExpr);
3517  Builder.CreateStore(Src, V);
3518  V = ScalarExprEmitter(*this).EmitLoadOfLValue(
3520  } else {
3521  if (E->isArrow())
3522  V = ScalarExprEmitter(*this).EmitLoadOfLValue(BaseExpr);
3523  else
3524  V = EmitLValue(BaseExpr).getAddress();
3525  }
3526 
3527  // build Class* type
3528  ClassPtrTy = ClassPtrTy->getPointerTo();
3529  V = Builder.CreateBitCast(V, ClassPtrTy);
3530  return MakeNaturalAlignAddrLValue(V, E->getType());
3531 }
3532 
3533 
3535  const CompoundAssignOperator *E) {
3536  ScalarExprEmitter Scalar(*this);
3537  Value *Result = nullptr;
3538  switch (E->getOpcode()) {
3539 #define COMPOUND_OP(Op) \
3540  case BO_##Op##Assign: \
3541  return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
3542  Result)
3543  COMPOUND_OP(Mul);
3544  COMPOUND_OP(Div);
3545  COMPOUND_OP(Rem);
3546  COMPOUND_OP(Add);
3547  COMPOUND_OP(Sub);
3548  COMPOUND_OP(Shl);
3549  COMPOUND_OP(Shr);
3550  COMPOUND_OP(And);
3551  COMPOUND_OP(Xor);
3552  COMPOUND_OP(Or);
3553 #undef COMPOUND_OP
3554 
3555  case BO_PtrMemD:
3556  case BO_PtrMemI:
3557  case BO_Mul:
3558  case BO_Div:
3559  case BO_Rem:
3560  case BO_Add:
3561  case BO_Sub:
3562  case BO_Shl:
3563  case BO_Shr:
3564  case BO_LT:
3565  case BO_GT:
3566  case BO_LE:
3567  case BO_GE:
3568  case BO_EQ:
3569  case BO_NE:
3570  case BO_And:
3571  case BO_Xor:
3572  case BO_Or:
3573  case BO_LAnd:
3574  case BO_LOr:
3575  case BO_Assign:
3576  case BO_Comma:
3577  llvm_unreachable("Not valid compound assignment operators");
3578  }
3579 
3580  llvm_unreachable("Unhandled compound assignment operator");
3581 }
Kind getKind() const
Definition: Type.h:2006
Defines the clang::ASTContext interface.
unsigned getNumInits() const
Definition: Expr.h:3789
CastKind getCastKind() const
Definition: Expr.h:2709
The null pointer literal (C++11 [lex.nullptr])
Definition: ExprCXX.h:466
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
Definition: ASTMatchers.h:1110
static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E, llvm::Value *InVal, bool IsInc)
bool isNullPtrType() const
Definition: Type.h:5439
bool isSignedOverflowDefined() const
Definition: LangOptions.h:121
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition: DeclCXX.h:206
bool isEvaluatable(const ASTContext &Ctx) const
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
Definition: RecordLayout.h:177
static Opcode getOpForCompoundAssignment(Opcode Opc)
Definition: Expr.h:3051
bool getValue() const
Definition: ExprCXX.h:446
QualType getType() const
Retrieves the type of the base class.
Definition: DeclCXX.h:252
Expr * getExpr(unsigned Index)
getExpr - Return the Expr at the specified index.
Definition: Expr.h:3479
A type trait used in the implementation of various C++11 and Library TR1 trait templates.
Definition: ExprCXX.h:2083
CompoundStmt * getSubStmt()
Definition: Expr.h:3412
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
bool isOne() const
isOne - Test whether the quantity equals one.
Definition: CharUnits.h:119
static llvm::Constant * getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty)
bool isArgumentType() const
Definition: Expr.h:2013
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:163
TypeSourceInfo * getTypeSourceInfo() const
Definition: Expr.h:1918
unsigned getPackLength() const
Retrieve the length of the parameter pack.
Definition: ExprCXX.h:3539
const Expr * getResultExpr() const
Definition: Expr.h:4514
const ObjCObjectType * getObjectType() const
Definition: Type.h:4820
An Embarcadero array type trait, as used in the implementation of __array_rank and __array_extent...
Definition: ExprCXX.h:2182
bool isBooleanType() const
Definition: Type.h:5489
const LangOptions & getLangOpts() const
[ARC] Consumes a retainable object pointer that has just been produced, e.g. as the return value of a...
Represents a prvalue temporary that is written into memory so that a reference can bind to it...
Definition: ExprCXX.h:3746
static Value * buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool negMul, bool negAdd)
llvm::Value * getAddress() const
Definition: CGValue.h:265
#define COMPOUND_OP(Op)
Expr * getIndexExpr(unsigned Idx)
Definition: Expr.h:1939
bool hadArrayRangeDesignator() const
Definition: Expr.h:3902
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
bool isCanonical() const
Definition: Type.h:5060
field_iterator field_begin() const
Definition: Decl.cpp:3629
CK_Dynamic - A C++ dynamic_cast.
UnaryExprOrTypeTrait getKind() const
Definition: Expr.h:2008
An implicit indirection through a C++ base class, when the field found is in a base class...
Definition: Expr.h:1800
unsigned getValue() const
Definition: Expr.h:1349
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:808
bool isArrow() const
Definition: ExprObjC.h:1397
bool isVoidType() const
Definition: Type.h:5426
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index. Accessed should be false if we this expression is used as an lvalue, for instance in "&Arr[Idx]".
Definition: CGExpr.cpp:700
An object to manage conditionally-evaluated expressions.
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
bool isVolatileQualified() const
Definition: CGValue.h:199
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition: Type.h:1742
Converts between different integral complex types. _Complex char -> _Complex long long _Complex unsig...
bool getValue() const
Definition: ExprCXX.h:3365
bool isReferenceType() const
Definition: Type.h:5241
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
Converting between two Objective-C object types, which can occur when performing reference binding to...
const CXXRecordDecl * getPointeeCXXRecordDecl() const
Definition: Type.cpp:1490
[ARC] Causes a value of block type to be copied to the heap, if it is not already there...
llvm::APSInt getShuffleMaskIdx(const ASTContext &Ctx, unsigned N) const
Definition: Expr.h:3490
Expr * getSubExpr()
Definition: Expr.h:2713
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy)
bool isFPContractable() const
Definition: Expr.h:3082
An r-value expression (a pr-value in the C++11 taxonomy) produces a temporary value.
Definition: Specifiers.h:95
Expr * getLHS() const
Definition: Expr.h:2964
Converts a floating point complex to bool by comparing against 0+0i.
T * getAttr() const
Definition: DeclBase.h:484
Describes an C or C++ initializer list.
Definition: Expr.h:3759
Expr * getChosenSubExpr() const
Definition: Expr.h:3605
BinaryOperatorKind
static bool hasScalarEvaluationKind(QualType T)
CharUnits getAlignment() const
Definition: CGValue.h:261
Expr * getTrueExpr() const
Definition: Expr.h:3344
uint32_t Offset
Definition: CacheTokens.cpp:43
path_iterator path_begin()
Definition: Expr.h:2729
unsigned char PointerWidthInBits
The width of a pointer into the generic address space.
const Expr * getSubExpr() const
Definition: Expr.h:3690
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:2918
RecordDecl * getDecl() const
Definition: Type.h:3527
bool isUnsignedIntegerType() const
Definition: Type.cpp:1723
static Value * tryEmitFMulAdd(const BinOpInfo &op, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool isSub=false)
static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef< Expr * > VL, ArrayRef< Expr * > IL, Expr *Step, Expr *CalcStep)
Creates clause with a list of variables VL and a linear step Step.
static llvm::Constant * getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx, unsigned Off, llvm::Type *I32Ty)
uint64_t getFieldOffset(unsigned FieldNo) const
Definition: RecordLayout.h:181
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
bool isExtVectorType() const
Definition: Type.h:5301
A default argument (C++ [dcl.fct.default]).
Definition: ExprCXX.h:862
QualType getType() const
Definition: Decl.h:538
Checking the operand of a load. Must be suitably sized and aligned.
Represents the this expression in C++.
Definition: ExprCXX.h:770
field_iterator field_end() const
Definition: Decl.h:3352
#define HANDLEBINOP(OP)
CXXBaseSpecifier * getBase() const
For a base class node, returns the base specifier.
Definition: Expr.h:1865
Sema - This implements semantic analysis and AST building for C.
Definition: Sema.h:258
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
llvm::APInt getValue() const
Definition: Expr.h:1262
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition: ExprCXX.h:1940
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Qualifiers::ObjCLifetime getObjCLifetime() const
getObjCLifetime - Returns lifetime attribute of this type.
Definition: Type.h:976
Causes a block literal to by copied to the heap and then autoreleased.
CastKind
CastKind - The kind of operation required for a conversion.
A field in a dependent type, known only by its name.
Definition: Expr.h:1797
const Expr * getExpr() const
Get the initialization expression that will be used.
Definition: ExprCXX.h:977
VAArgExpr, used for the builtin function __builtin_va_arg.
Definition: Expr.h:3670
ID
Defines the set of possible language-specific address spaces.
Definition: AddressSpaces.h:27
QualType getPointeeType() const
Definition: Type.cpp:414
Converts between different floating point complex types. _Complex float -> _Complex double...
bool isRealFloatingType() const
Floating point categories.
Definition: Type.cpp:1776
const ObjCMethodDecl * getMethodDecl() const
Definition: ExprObjC.h:1248
bool isSignedIntegerOrEnumerationType() const
Definition: Type.cpp:1699
unsigned getArrayExprIndex() const
For an array element node, returns the index into the array of expressions.
Definition: Expr.h:1849
Converts an integral complex to an integral real of the source's element type by discarding the imagi...
bool isAnyComplexType() const
Definition: Type.h:5295
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited...
bool getValue() const
Definition: ExprCXX.h:2126
SourceLocation getExprLoc() const LLVM_READONLY
Definition: ExprObjC.h:1418
bool isVariableArrayType() const
Definition: Type.h:5280
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:224
bool isFloatingType() const
Definition: Type.cpp:1760
ObjCSelectorExpr used for @selector in Objective-C.
Definition: ExprObjC.h:396
Represents an expression that computes the length of a parameter pack.
Definition: ExprCXX.h:3473
Expr * getSubExpr() const
Definition: Expr.h:1699
bool EvaluateAsInt(llvm::APSInt &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition: Expr.h:3528
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T)
An expression that sends a message to the given Objective-C object or class.
Definition: ExprObjC.h:858
unsigned getNumComponents() const
Definition: Expr.h:1935
Converts from an integral complex to a floating complex. _Complex unsigned -> _Complex float...
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys=None)
Represents a reference to a non-type template parameter that has been substituted with a template arg...
Definition: ExprCXX.h:3558
bool isGLValue() const
Definition: Expr.h:253
QualType getComputationLHSType() const
Definition: Expr.h:3133
The result type of a method or function.
bool isUnsignedIntegerOrEnumerationType() const
Definition: Type.cpp:1739
llvm::IRBuilder< PreserveNames, llvm::ConstantFolder, CGBuilderInserterTy > CGBuilderTy
Definition: CGBuilder.h:49
QualType getComputationResultType() const
Definition: Expr.h:3136
unsigned getNumSubExprs() const
Definition: Expr.h:3473
Expr * getBase() const
Definition: ExprObjC.h:1395
There is no lifetime qualification on this type.
Definition: Type.h:130
A C++ dynamic_cast expression (C++ [expr.dynamic.cast]).
Definition: ExprCXX.h:269
Kind
bool getValue() const
Definition: ExprObjC.h:71
ASTContext & getContext() const
Encodes a location in the source. The SourceManager can decode this to get at the full include stack...
bool hasIntegerRepresentation() const
Determine whether this type has an integer representation of some sort, e.g., it is an integer type o...
Definition: Type.cpp:1576
SourceLocation getExprLoc() const LLVM_READONLY
Definition: Expr.h:1761
Represents a new-expression for memory allocation and constructor calls, e.g: "new CXXNewExpr(foo)"...
Definition: ExprCXX.h:1623
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx, SmallVectorImpl< PartialDiagnosticAt > *Diag=nullptr) const
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition: Expr.h:4665
bool isCompoundAssignmentOp() const
Definition: Expr.h:3048
SanitizerSet SanOpts
Sanitizers enabled for this function.
bool getValue() const
Definition: ExprCXX.h:2290
const CodeGenOptions & getCodeGenOpts() const
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
Converts from an integral real to an integral complex whose element type matches the source...
llvm::AllocaInst * CreateMemTemp(QualType T, const Twine &Name="tmp")
Definition: CGExpr.cpp:80
QualType getReturnType() const
Definition: DeclObjC.h:330
const T * castAs() const
Definition: Type.h:5586
bool isVectorType() const
Definition: Type.h:5298
An expression trait intrinsic.
Definition: ExprCXX.h:2252
uint64_t getValue() const
Definition: ExprCXX.h:2231
Assigning into this object requires a lifetime extension.
Definition: Type.h:147
bool isBitField() const
Definition: CGValue.h:195
QualType getType() const
Return the type wrapped by this type source info.
Definition: Decl.h:68
Opcode getOpcode() const
Definition: Expr.h:1696
const OffsetOfNode & getComponent(unsigned Idx) const
Definition: Expr.h:1925
SourceLocation getExprLoc() const LLVM_READONLY
Definition: Expr.cpp:193
QualType getPointeeType() const
Definition: Type.h:2139
Represents a C11 generic selection.
Definition: Expr.h:4446
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
QualType getCallReturnType(const ASTContext &Ctx) const
Definition: Expr.cpp:1247
bool isArrow() const
Definition: Expr.h:2548
AddrLabelExpr - The GNU address of label extension, representing &&label.
Definition: Expr.h:3357
QualType getType() const
Definition: Expr.h:125
Converts a floating point complex to floating point real of the source's element type. Just discards the imaginary component. _Complex long double -> long double.
SourceLocation getExprLoc() const LLVM_READONLY
Definition: Expr.h:854
static const Type * getElementType(const Expr *BaseExpr)
uint64_t SanitizerMask
Definition: Sanitizers.h:24
const Expr * getExpr() const
Definition: ExprCXX.h:911
Represents a delete expression for memory deallocation and destructor calls, e.g. "delete[] pArray"...
Definition: ExprCXX.h:1819
Converts an integral complex to bool by comparing against 0+0i.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
Definition: ASTMatchers.h:1639
bool isShiftOp() const
Definition: Expr.h:2996
static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, BuiltinType::Kind ElemKind)
static Value * emitPointerArithmetic(CodeGenFunction &CGF, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
Kind getKind() const
Determine what kind of offsetof node this is.
Definition: Expr.h:1843
Checking the destination of a store. Must be suitably sized and aligned.
bool isHalfType() const
Definition: Type.h:5432
A conversion of a floating point real to a floating point complex of the original type...
llvm::APFloat getValue() const
Definition: Expr.h:1377
[ARC] Reclaim a retainable object pointer object that may have been produced and autoreleased as part...
const VariableArrayType * getAsVariableArrayType(QualType T) const
Definition: ASTContext.h:2006
FieldDecl * getField() const
For a field offsetof node, returns the field.
Definition: Expr.h:1855
QualType getNonReferenceType() const
Definition: Type.h:5182
#define VISITCOMP(CODE, UI, SI, FP)
path_iterator path_end()
Definition: Expr.h:2730
Represents a C++11 noexcept expression (C++ [expr.unary.noexcept]).
Definition: ExprCXX.h:3337
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy)
const T * getAs() const
Definition: Type.h:5555
Expr * getFalseExpr() const
Definition: Expr.h:3350
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition: Expr.h:2066
QualType getTypeOfArgument() const
Definition: Expr.h:2040
QualType getCanonicalType() const
Definition: Type.h:5055
[ARC] Produces a retainable object pointer so that it may be consumed, e.g. by being passed to a cons...
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition: Sanitizers.h:52
bool isFunctionType() const
Definition: Type.h:5229
Converts from T to _Atomic(T).
Converts from a floating complex to an integral complex. _Complex float -> _Complex int...
SourceLocation getExprLoc() const LLVM_READONLY
Definition: Expr.h:2957
LabelDecl * getLabel() const
Definition: Expr.h:3379
An index into an array.
Definition: Expr.h:1793
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition: ExprObjC.h:474
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
A use of a default initializer in a constructor or in aggregate initialization.
Definition: ExprCXX.h:952
Expr * getBase() const
Definition: Expr.h:2405
Reading or writing from this object requires a barrier call.
Definition: Type.h:144
const Expr * getSubExpr() const
Definition: Expr.h:1638
Represents a C++ struct/union/class.
Definition: DeclCXX.h:285
BoundNodesTreeBuilder *const Builder
Opcode getOpcode() const
Definition: Expr.h:2961
llvm::Type * ConvertType(QualType T)
LValue EmitLValue(const Expr *E)
Definition: CGExpr.cpp:831
static bool ShouldNullCheckClassCastValue(const CastExpr *CE)
bool isEventT() const
Definition: Type.h:5375
Converts from _Atomic(T) to T.
bool isArrayType() const
Definition: Type.h:5271
#define fabs(__x)
Definition: tgmath.h:556
std::pair< llvm::Value *, QualType > getVLASize(const VariableArrayType *vla)
Defines the clang::TargetInfo interface.
Expr * getRHS() const
Definition: Expr.h:2966
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
A reference to a declared variable, function, enum, etc. [C99 6.5.1p2].
Definition: Expr.h:899
static RValue get(llvm::Value *V)
Definition: CGValue.h:71
IntrinsicType
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Definition: CGDebugInfo.h:542
const Expr * getInit(unsigned Init) const
Definition: Expr.h:3794
A boolean literal, per ([C++ lex.bool] Boolean literals).
Definition: ExprCXX.h:434
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g., it is an signed integer type or a vector.
Definition: Type.cpp:1713
static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E, CodeGenFunction &CGF)
Represents an implicitly-generated value initialization of an object of a given type.
Definition: Expr.h:4352
bool isIntegerType() const
Definition: Type.h:5448
Expr * IgnoreParens() LLVM_READONLY
Definition: Expr.cpp:2408