clang  3.8.0
CGCall.cpp
Go to the documentation of this file.
1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGCall.h"
16 #include "ABIInfo.h"
17 #include "CGCXXABI.h"
18 #include "CGCleanup.h"
19 #include "CodeGenFunction.h"
20 #include "CodeGenModule.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/Decl.h"
23 #include "clang/AST/DeclCXX.h"
24 #include "clang/AST/DeclObjC.h"
26 #include "clang/Basic/TargetInfo.h"
29 #include "llvm/ADT/StringExtras.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/CallSite.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/InlineAsm.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/Transforms/Utils/Local.h"
37 using namespace clang;
38 using namespace CodeGen;
39 
40 /***/
41 
43  switch (CC) {
44  default: return llvm::CallingConv::C;
45  case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
46  case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
47  case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
48  case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64;
49  case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
50  case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
51  case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
52  case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
53  // TODO: Add support for __pascal to LLVM.
55  // TODO: Add support for __vectorcall to LLVM.
56  case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
57  case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
58  case CC_SpirKernel: return llvm::CallingConv::SPIR_KERNEL;
59  }
60 }
61 
62 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
63 /// qualification.
64 /// FIXME: address space qualification?
66  QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
67  return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
68 }
69 
70 /// Returns the canonical formal type of the given C++ method.
72  return MD->getType()->getCanonicalTypeUnqualified()
74 }
75 
76 /// Returns the "extra-canonicalized" return type, which discards
77 /// qualifiers on the return type. Codegen doesn't care about them,
78 /// and it makes ABI code a little easier to be able to assume that
79 /// all parameter and return types are top-level unqualified.
82 }
83 
84 /// Arrange the argument and result information for a value of the given
85 /// unprototyped freestanding function type.
86 const CGFunctionInfo &
88  // When translating an unprototyped function type, always use a
89  // variadic type.
90  return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
91  /*instanceMethod=*/false,
92  /*chainCall=*/false, None,
93  FTNP->getExtInfo(), RequiredArgs(0));
94 }
95 
96 /// Adds the formal paramaters in FPT to the given prefix. If any parameter in
97 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
98 static void appendParameterTypes(const CodeGenTypes &CGT,
100  const CanQual<FunctionProtoType> &FPT,
101  const FunctionDecl *FD) {
102  // Fast path: unknown target.
103  if (FD == nullptr) {
104  prefix.append(FPT->param_type_begin(), FPT->param_type_end());
105  return;
106  }
107 
108  // In the vast majority cases, we'll have precisely FPT->getNumParams()
109  // parameters; the only thing that can change this is the presence of
110  // pass_object_size. So, we preallocate for the common case.
111  prefix.reserve(prefix.size() + FPT->getNumParams());
112 
113  assert(FD->getNumParams() == FPT->getNumParams());
114  for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
115  prefix.push_back(FPT->getParamType(I));
116  if (FD->getParamDecl(I)->hasAttr<PassObjectSizeAttr>())
117  prefix.push_back(CGT.getContext().getSizeType());
118  }
119 }
120 
121 /// Arrange the LLVM function layout for a value of the given function
122 /// type, on top of any implicit parameters already stored.
123 static const CGFunctionInfo &
124 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
127  const FunctionDecl *FD) {
128  RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
129  // FIXME: Kill copy.
130  appendParameterTypes(CGT, prefix, FTP, FD);
131  CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
132  return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
133  /*chainCall=*/false, prefix,
134  FTP->getExtInfo(), required);
135 }
136 
137 /// Arrange the argument and result information for a value of the
138 /// given freestanding function type.
139 const CGFunctionInfo &
141  const FunctionDecl *FD) {
143  return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
144  FTP, FD);
145 }
146 
147 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
148  // Set the appropriate calling convention for the Function.
149  if (D->hasAttr<StdCallAttr>())
150  return CC_X86StdCall;
151 
152  if (D->hasAttr<FastCallAttr>())
153  return CC_X86FastCall;
154 
155  if (D->hasAttr<ThisCallAttr>())
156  return CC_X86ThisCall;
157 
158  if (D->hasAttr<VectorCallAttr>())
159  return CC_X86VectorCall;
160 
161  if (D->hasAttr<PascalAttr>())
162  return CC_X86Pascal;
163 
164  if (PcsAttr *PCS = D->getAttr<PcsAttr>())
165  return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
166 
167  if (D->hasAttr<IntelOclBiccAttr>())
168  return CC_IntelOclBicc;
169 
170  if (D->hasAttr<MSABIAttr>())
171  return IsWindows ? CC_C : CC_X86_64Win64;
172 
173  if (D->hasAttr<SysVABIAttr>())
174  return IsWindows ? CC_X86_64SysV : CC_C;
175 
176  return CC_C;
177 }
178 
179 /// Arrange the argument and result information for a call to an
180 /// unknown C++ non-static member function of the given abstract type.
181 /// (Zero value of RD means we don't have any meaningful "this" argument type,
182 /// so fall back to a generic pointer type).
183 /// The member function must be an ordinary function, i.e. not a
184 /// constructor or destructor.
185 const CGFunctionInfo &
187  const FunctionProtoType *FTP,
188  const CXXMethodDecl *MD) {
190 
191  // Add the 'this' pointer.
192  if (RD)
193  argTypes.push_back(GetThisType(Context, RD));
194  else
195  argTypes.push_back(Context.VoidPtrTy);
196 
198  *this, true, argTypes,
200 }
201 
202 /// Arrange the argument and result information for a declaration or
203 /// definition of the given C++ non-static member function. The
204 /// member function must be an ordinary function, i.e. not a
205 /// constructor or destructor.
206 const CGFunctionInfo &
208  assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
209  assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
210 
212 
213  if (MD->isInstance()) {
214  // The abstract case is perfectly fine.
215  const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
216  return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
217  }
218 
219  return arrangeFreeFunctionType(prototype, MD);
220 }
221 
222 const CGFunctionInfo &
224  StructorType Type) {
225 
227  argTypes.push_back(GetThisType(Context, MD->getParent()));
228 
229  GlobalDecl GD;
230  if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
231  GD = GlobalDecl(CD, toCXXCtorType(Type));
232  } else {
233  auto *DD = dyn_cast<CXXDestructorDecl>(MD);
234  GD = GlobalDecl(DD, toCXXDtorType(Type));
235  }
236 
238 
239  // Add the formal parameters.
240  appendParameterTypes(*this, argTypes, FTP, MD);
241 
242  TheCXXABI.buildStructorSignature(MD, Type, argTypes);
243 
244  RequiredArgs required =
245  (MD->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All);
246 
247  FunctionType::ExtInfo extInfo = FTP->getExtInfo();
248  CanQualType resultType = TheCXXABI.HasThisReturn(GD)
249  ? argTypes.front()
250  : TheCXXABI.hasMostDerivedReturn(GD)
251  ? CGM.getContext().VoidPtrTy
252  : Context.VoidTy;
253  return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
254  /*chainCall=*/false, argTypes, extInfo,
255  required);
256 }
257 
258 /// Arrange a call to a C++ method, passing the given arguments.
259 const CGFunctionInfo &
261  const CXXConstructorDecl *D,
262  CXXCtorType CtorKind,
263  unsigned ExtraArgs) {
264  // FIXME: Kill copy.
266  for (const auto &Arg : args)
267  ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
268 
270  RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs);
271  GlobalDecl GD(D, CtorKind);
272  CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
273  ? ArgTypes.front()
274  : TheCXXABI.hasMostDerivedReturn(GD)
275  ? CGM.getContext().VoidPtrTy
276  : Context.VoidTy;
277 
278  FunctionType::ExtInfo Info = FPT->getExtInfo();
279  return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
280  /*chainCall=*/false, ArgTypes, Info,
281  Required);
282 }
283 
284 /// Arrange the argument and result information for the declaration or
285 /// definition of the given function.
286 const CGFunctionInfo &
288  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
289  if (MD->isInstance())
290  return arrangeCXXMethodDeclaration(MD);
291 
293 
294  assert(isa<FunctionType>(FTy));
295 
296  // When declaring a function without a prototype, always use a
297  // non-variadic type.
298  if (isa<FunctionNoProtoType>(FTy)) {
301  noProto->getReturnType(), /*instanceMethod=*/false,
302  /*chainCall=*/false, None, noProto->getExtInfo(), RequiredArgs::All);
303  }
304 
305  assert(isa<FunctionProtoType>(FTy));
307 }
308 
309 /// Arrange the argument and result information for the declaration or
310 /// definition of an Objective-C method.
311 const CGFunctionInfo &
313  // It happens that this is the same as a call with no optional
314  // arguments, except also using the formal 'self' type.
316 }
317 
318 /// Arrange the argument and result information for the function type
319 /// through which to perform a send to the given Objective-C method,
320 /// using the given receiver type. The receiver type is not always
321 /// the 'self' type of the method or even an Objective-C pointer type.
322 /// This is *not* the right method for actually performing such a
323 /// message send, due to the possibility of optional arguments.
324 const CGFunctionInfo &
326  QualType receiverType) {
328  argTys.push_back(Context.getCanonicalParamType(receiverType));
329  argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
330  // FIXME: Kill copy?
331  for (const auto *I : MD->params()) {
332  argTys.push_back(Context.getCanonicalParamType(I->getType()));
333  }
334 
335  FunctionType::ExtInfo einfo;
336  bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
337  einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
338 
339  if (getContext().getLangOpts().ObjCAutoRefCount &&
340  MD->hasAttr<NSReturnsRetainedAttr>())
341  einfo = einfo.withProducesResult(true);
342 
343  RequiredArgs required =
344  (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
345 
347  GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
348  /*chainCall=*/false, argTys, einfo, required);
349 }
350 
351 const CGFunctionInfo &
353  // FIXME: Do we need to handle ObjCMethodDecl?
354  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
355 
356  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
358 
359  if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
361 
362  return arrangeFunctionDeclaration(FD);
363 }
364 
365 /// Arrange a thunk that takes 'this' as the first parameter followed by
366 /// varargs. Return a void pointer, regardless of the actual return type.
367 /// The body of the thunk will end in a musttail call to a function of the
368 /// correct type, and the caller will bitcast the function to the correct
369 /// prototype.
370 const CGFunctionInfo &
372  assert(MD->isVirtual() && "only virtual memptrs have thunks");
374  CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
375  return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
376  /*chainCall=*/false, ArgTys,
377  FTP->getExtInfo(), RequiredArgs(1));
378 }
379 
380 const CGFunctionInfo &
382  CXXCtorType CT) {
383  assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
384 
387  const CXXRecordDecl *RD = CD->getParent();
388  ArgTys.push_back(GetThisType(Context, RD));
389  if (CT == Ctor_CopyingClosure)
390  ArgTys.push_back(*FTP->param_type_begin());
391  if (RD->getNumVBases() > 0)
392  ArgTys.push_back(Context.IntTy);
394  /*IsVariadic=*/false, /*IsCXXMethod=*/true);
395  return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
396  /*chainCall=*/false, ArgTys,
398 }
399 
400 /// Arrange a call as unto a free function, except possibly with an
401 /// additional number of formal parameters considered required.
402 static const CGFunctionInfo &
404  CodeGenModule &CGM,
405  const CallArgList &args,
406  const FunctionType *fnType,
407  unsigned numExtraRequiredArgs,
408  bool chainCall) {
409  assert(args.size() >= numExtraRequiredArgs);
410 
411  // In most cases, there are no optional arguments.
412  RequiredArgs required = RequiredArgs::All;
413 
414  // If we have a variadic prototype, the required arguments are the
415  // extra prefix plus the arguments in the prototype.
416  if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
417  if (proto->isVariadic())
418  required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
419 
420  // If we don't have a prototype at all, but we're supposed to
421  // explicitly use the variadic convention for unprototyped calls,
422  // treat all of the arguments as required but preserve the nominal
423  // possibility of variadics.
424  } else if (CGM.getTargetCodeGenInfo()
425  .isNoProtoCallVariadic(args,
426  cast<FunctionNoProtoType>(fnType))) {
427  required = RequiredArgs(args.size());
428  }
429 
430  // FIXME: Kill copy.
432  for (const auto &arg : args)
433  argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
435  /*instanceMethod=*/false, chainCall,
436  argTypes, fnType->getExtInfo(), required);
437 }
438 
439 /// Figure out the rules for calling a function with the given formal
440 /// type using the given arguments. The arguments are necessary
441 /// because the function might be unprototyped, in which case it's
442 /// target-dependent in crazy ways.
443 const CGFunctionInfo &
445  const FunctionType *fnType,
446  bool chainCall) {
447  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
448  chainCall ? 1 : 0, chainCall);
449 }
450 
451 /// A block function call is essentially a free-function call with an
452 /// extra implicit argument.
453 const CGFunctionInfo &
455  const FunctionType *fnType) {
456  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
457  /*chainCall=*/false);
458 }
459 
460 const CGFunctionInfo &
462  const CallArgList &args,
464  RequiredArgs required) {
465  // FIXME: Kill copy.
467  for (const auto &Arg : args)
468  argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
470  GetReturnType(resultType), /*instanceMethod=*/false,
471  /*chainCall=*/false, argTypes, info, required);
472 }
473 
474 /// Arrange a call to a C++ method, passing the given arguments.
475 const CGFunctionInfo &
477  const FunctionProtoType *FPT,
478  RequiredArgs required) {
479  // FIXME: Kill copy.
481  for (const auto &Arg : args)
482  argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
483 
484  FunctionType::ExtInfo info = FPT->getExtInfo();
486  GetReturnType(FPT->getReturnType()), /*instanceMethod=*/true,
487  /*chainCall=*/false, argTypes, info, required);
488 }
489 
491  QualType resultType, const FunctionArgList &args,
492  const FunctionType::ExtInfo &info, bool isVariadic) {
493  // FIXME: Kill copy.
495  for (auto Arg : args)
496  argTypes.push_back(Context.getCanonicalParamType(Arg->getType()));
497 
498  RequiredArgs required =
499  (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
501  GetReturnType(resultType), /*instanceMethod=*/false,
502  /*chainCall=*/false, argTypes, info, required);
503 }
504 
507  getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
509 }
510 
511 /// Arrange the argument and result information for an abstract value
512 /// of a given function type. This is the method which all of the
513 /// above functions ultimately defer to.
514 const CGFunctionInfo &
516  bool instanceMethod,
517  bool chainCall,
518  ArrayRef<CanQualType> argTypes,
520  RequiredArgs required) {
521  assert(std::all_of(argTypes.begin(), argTypes.end(),
522  std::mem_fun_ref(&CanQualType::isCanonicalAsParam)));
523 
524  unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
525 
526  // Lookup or create unique function info.
527  llvm::FoldingSetNodeID ID;
528  CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, required,
529  resultType, argTypes);
530 
531  void *insertPos = nullptr;
532  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
533  if (FI)
534  return *FI;
535 
536  // Construct the function info. We co-allocate the ArgInfos.
537  FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
538  resultType, argTypes, required);
539  FunctionInfos.InsertNode(FI, insertPos);
540 
541  bool inserted = FunctionsBeingProcessed.insert(FI).second;
542  (void)inserted;
543  assert(inserted && "Recursively being processed?");
544 
545  // Compute ABI information.
546  getABIInfo().computeInfo(*FI);
547 
548  // Loop over all of the computed argument and return value info. If any of
549  // them are direct or extend without a specified coerce type, specify the
550  // default now.
551  ABIArgInfo &retInfo = FI->getReturnInfo();
552  if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
553  retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
554 
555  for (auto &I : FI->arguments())
556  if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
557  I.info.setCoerceToType(ConvertType(I.type));
558 
559  bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
560  assert(erased && "Not in set?");
561 
562  return *FI;
563 }
564 
566  bool instanceMethod,
567  bool chainCall,
568  const FunctionType::ExtInfo &info,
569  CanQualType resultType,
570  ArrayRef<CanQualType> argTypes,
571  RequiredArgs required) {
572  void *buffer = operator new(sizeof(CGFunctionInfo) +
573  sizeof(ArgInfo) * (argTypes.size() + 1));
574  CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
575  FI->CallingConvention = llvmCC;
576  FI->EffectiveCallingConvention = llvmCC;
577  FI->ASTCallingConvention = info.getCC();
578  FI->InstanceMethod = instanceMethod;
579  FI->ChainCall = chainCall;
580  FI->NoReturn = info.getNoReturn();
581  FI->ReturnsRetained = info.getProducesResult();
582  FI->Required = required;
583  FI->HasRegParm = info.getHasRegParm();
584  FI->RegParm = info.getRegParm();
585  FI->ArgStruct = nullptr;
586  FI->ArgStructAlign = 0;
587  FI->NumArgs = argTypes.size();
588  FI->getArgsBuffer()[0].type = resultType;
589  for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
590  FI->getArgsBuffer()[i + 1].type = argTypes[i];
591  return FI;
592 }
593 
594 /***/
595 
596 namespace {
597 // ABIArgInfo::Expand implementation.
598 
599 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
600 struct TypeExpansion {
601  enum TypeExpansionKind {
602  // Elements of constant arrays are expanded recursively.
603  TEK_ConstantArray,
604  // Record fields are expanded recursively (but if record is a union, only
605  // the field with the largest size is expanded).
606  TEK_Record,
607  // For complex types, real and imaginary parts are expanded recursively.
608  TEK_Complex,
609  // All other types are not expandable.
610  TEK_None
611  };
612 
613  const TypeExpansionKind Kind;
614 
615  TypeExpansion(TypeExpansionKind K) : Kind(K) {}
616  virtual ~TypeExpansion() {}
617 };
618 
619 struct ConstantArrayExpansion : TypeExpansion {
620  QualType EltTy;
621  uint64_t NumElts;
622 
623  ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
624  : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
625  static bool classof(const TypeExpansion *TE) {
626  return TE->Kind == TEK_ConstantArray;
627  }
628 };
629 
630 struct RecordExpansion : TypeExpansion {
632 
634 
635  RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
637  : TypeExpansion(TEK_Record), Bases(Bases), Fields(Fields) {}
638  static bool classof(const TypeExpansion *TE) {
639  return TE->Kind == TEK_Record;
640  }
641 };
642 
643 struct ComplexExpansion : TypeExpansion {
644  QualType EltTy;
645 
646  ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
647  static bool classof(const TypeExpansion *TE) {
648  return TE->Kind == TEK_Complex;
649  }
650 };
651 
652 struct NoExpansion : TypeExpansion {
653  NoExpansion() : TypeExpansion(TEK_None) {}
654  static bool classof(const TypeExpansion *TE) {
655  return TE->Kind == TEK_None;
656  }
657 };
658 } // namespace
659 
660 static std::unique_ptr<TypeExpansion>
661 getTypeExpansion(QualType Ty, const ASTContext &Context) {
662  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
663  return llvm::make_unique<ConstantArrayExpansion>(
664  AT->getElementType(), AT->getSize().getZExtValue());
665  }
666  if (const RecordType *RT = Ty->getAs<RecordType>()) {
669  const RecordDecl *RD = RT->getDecl();
670  assert(!RD->hasFlexibleArrayMember() &&
671  "Cannot expand structure with flexible array.");
672  if (RD->isUnion()) {
673  // Unions can be here only in degenerative cases - all the fields are same
674  // after flattening. Thus we have to use the "largest" field.
675  const FieldDecl *LargestFD = nullptr;
676  CharUnits UnionSize = CharUnits::Zero();
677 
678  for (const auto *FD : RD->fields()) {
679  // Skip zero length bitfields.
680  if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
681  continue;
682  assert(!FD->isBitField() &&
683  "Cannot expand structure with bit-field members.");
684  CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
685  if (UnionSize < FieldSize) {
686  UnionSize = FieldSize;
687  LargestFD = FD;
688  }
689  }
690  if (LargestFD)
691  Fields.push_back(LargestFD);
692  } else {
693  if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
694  assert(!CXXRD->isDynamicClass() &&
695  "cannot expand vtable pointers in dynamic classes");
696  for (const CXXBaseSpecifier &BS : CXXRD->bases())
697  Bases.push_back(&BS);
698  }
699 
700  for (const auto *FD : RD->fields()) {
701  // Skip zero length bitfields.
702  if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
703  continue;
704  assert(!FD->isBitField() &&
705  "Cannot expand structure with bit-field members.");
706  Fields.push_back(FD);
707  }
708  }
709  return llvm::make_unique<RecordExpansion>(std::move(Bases),
710  std::move(Fields));
711  }
712  if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
713  return llvm::make_unique<ComplexExpansion>(CT->getElementType());
714  }
715  return llvm::make_unique<NoExpansion>();
716 }
717 
718 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
719  auto Exp = getTypeExpansion(Ty, Context);
720  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
721  return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
722  }
723  if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
724  int Res = 0;
725  for (auto BS : RExp->Bases)
726  Res += getExpansionSize(BS->getType(), Context);
727  for (auto FD : RExp->Fields)
728  Res += getExpansionSize(FD->getType(), Context);
729  return Res;
730  }
731  if (isa<ComplexExpansion>(Exp.get()))
732  return 2;
733  assert(isa<NoExpansion>(Exp.get()));
734  return 1;
735 }
736 
737 void
740  auto Exp = getTypeExpansion(Ty, Context);
741  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
742  for (int i = 0, n = CAExp->NumElts; i < n; i++) {
743  getExpandedTypes(CAExp->EltTy, TI);
744  }
745  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
746  for (auto BS : RExp->Bases)
747  getExpandedTypes(BS->getType(), TI);
748  for (auto FD : RExp->Fields)
749  getExpandedTypes(FD->getType(), TI);
750  } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
751  llvm::Type *EltTy = ConvertType(CExp->EltTy);
752  *TI++ = EltTy;
753  *TI++ = EltTy;
754  } else {
755  assert(isa<NoExpansion>(Exp.get()));
756  *TI++ = ConvertType(Ty);
757  }
758 }
759 
761  ConstantArrayExpansion *CAE,
762  Address BaseAddr,
763  llvm::function_ref<void(Address)> Fn) {
764  CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
765  CharUnits EltAlign =
766  BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
767 
768  for (int i = 0, n = CAE->NumElts; i < n; i++) {
769  llvm::Value *EltAddr =
770  CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
771  Fn(Address(EltAddr, EltAlign));
772  }
773 }
774 
775 void CodeGenFunction::ExpandTypeFromArgs(
777  assert(LV.isSimple() &&
778  "Unexpected non-simple lvalue during struct expansion.");
779 
780  auto Exp = getTypeExpansion(Ty, getContext());
781  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
782  forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
783  [&](Address EltAddr) {
784  LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
785  ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
786  });
787  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
788  Address This = LV.getAddress();
789  for (const CXXBaseSpecifier *BS : RExp->Bases) {
790  // Perform a single step derived-to-base conversion.
791  Address Base =
792  GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
793  /*NullCheckValue=*/false, SourceLocation());
794  LValue SubLV = MakeAddrLValue(Base, BS->getType());
795 
796  // Recurse onto bases.
797  ExpandTypeFromArgs(BS->getType(), SubLV, AI);
798  }
799  for (auto FD : RExp->Fields) {
800  // FIXME: What are the right qualifiers here?
801  LValue SubLV = EmitLValueForField(LV, FD);
802  ExpandTypeFromArgs(FD->getType(), SubLV, AI);
803  }
804  } else if (isa<ComplexExpansion>(Exp.get())) {
805  auto realValue = *AI++;
806  auto imagValue = *AI++;
807  EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
808  } else {
809  assert(isa<NoExpansion>(Exp.get()));
810  EmitStoreThroughLValue(RValue::get(*AI++), LV);
811  }
812 }
813 
814 void CodeGenFunction::ExpandTypeToArgs(
815  QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
816  SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
817  auto Exp = getTypeExpansion(Ty, getContext());
818  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
820  [&](Address EltAddr) {
821  RValue EltRV =
822  convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
823  ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
824  });
825  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
826  Address This = RV.getAggregateAddress();
827  for (const CXXBaseSpecifier *BS : RExp->Bases) {
828  // Perform a single step derived-to-base conversion.
829  Address Base =
830  GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
831  /*NullCheckValue=*/false, SourceLocation());
832  RValue BaseRV = RValue::getAggregate(Base);
833 
834  // Recurse onto bases.
835  ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs,
836  IRCallArgPos);
837  }
838 
839  LValue LV = MakeAddrLValue(This, Ty);
840  for (auto FD : RExp->Fields) {
841  RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
842  ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs,
843  IRCallArgPos);
844  }
845  } else if (isa<ComplexExpansion>(Exp.get())) {
846  ComplexPairTy CV = RV.getComplexVal();
847  IRCallArgs[IRCallArgPos++] = CV.first;
848  IRCallArgs[IRCallArgPos++] = CV.second;
849  } else {
850  assert(isa<NoExpansion>(Exp.get()));
851  assert(RV.isScalar() &&
852  "Unexpected non-scalar rvalue during struct expansion.");
853 
854  // Insert a bitcast as needed.
855  llvm::Value *V = RV.getScalarVal();
856  if (IRCallArgPos < IRFuncTy->getNumParams() &&
857  V->getType() != IRFuncTy->getParamType(IRCallArgPos))
858  V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
859 
860  IRCallArgs[IRCallArgPos++] = V;
861  }
862 }
863 
864 /// Create a temporary allocation for the purposes of coercion.
866  CharUnits MinAlign) {
867  // Don't use an alignment that's worse than what LLVM would prefer.
868  auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
869  CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
870 
871  return CGF.CreateTempAlloca(Ty, Align);
872 }
873 
874 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
875 /// accessing some number of bytes out of it, try to gep into the struct to get
876 /// at its inner goodness. Dive as deep as possible without entering an element
877 /// with an in-memory size smaller than DstSize.
878 static Address
880  llvm::StructType *SrcSTy,
881  uint64_t DstSize, CodeGenFunction &CGF) {
882  // We can't dive into a zero-element struct.
883  if (SrcSTy->getNumElements() == 0) return SrcPtr;
884 
885  llvm::Type *FirstElt = SrcSTy->getElementType(0);
886 
887  // If the first elt is at least as large as what we're looking for, or if the
888  // first element is the same size as the whole struct, we can enter it. The
889  // comparison must be made on the store size and not the alloca size. Using
890  // the alloca size may overstate the size of the load.
891  uint64_t FirstEltSize =
892  CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
893  if (FirstEltSize < DstSize &&
894  FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
895  return SrcPtr;
896 
897  // GEP into the first element.
898  SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
899 
900  // If the first element is a struct, recurse.
901  llvm::Type *SrcTy = SrcPtr.getElementType();
902  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
903  return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
904 
905  return SrcPtr;
906 }
907 
908 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
909 /// are either integers or pointers. This does a truncation of the value if it
910 /// is too large or a zero extension if it is too small.
911 ///
912 /// This behaves as if the value were coerced through memory, so on big-endian
913 /// targets the high bits are preserved in a truncation, while little-endian
914 /// targets preserve the low bits.
916  llvm::Type *Ty,
917  CodeGenFunction &CGF) {
918  if (Val->getType() == Ty)
919  return Val;
920 
921  if (isa<llvm::PointerType>(Val->getType())) {
922  // If this is Pointer->Pointer avoid conversion to and from int.
923  if (isa<llvm::PointerType>(Ty))
924  return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
925 
926  // Convert the pointer to an integer so we can play with its width.
927  Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
928  }
929 
930  llvm::Type *DestIntTy = Ty;
931  if (isa<llvm::PointerType>(DestIntTy))
932  DestIntTy = CGF.IntPtrTy;
933 
934  if (Val->getType() != DestIntTy) {
935  const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
936  if (DL.isBigEndian()) {
937  // Preserve the high bits on big-endian targets.
938  // That is what memory coercion does.
939  uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
940  uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
941 
942  if (SrcSize > DstSize) {
943  Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
944  Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
945  } else {
946  Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
947  Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
948  }
949  } else {
950  // Little-endian targets preserve the low bits. No shifts required.
951  Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
952  }
953  }
954 
955  if (isa<llvm::PointerType>(Ty))
956  Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
957  return Val;
958 }
959 
960 
961 
962 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
963 /// a pointer to an object of type \arg Ty, known to be aligned to
964 /// \arg SrcAlign bytes.
965 ///
966 /// This safely handles the case when the src type is smaller than the
967 /// destination type; in this situation the values of bits which not
968 /// present in the src are undefined.
970  CodeGenFunction &CGF) {
971  llvm::Type *SrcTy = Src.getElementType();
972 
973  // If SrcTy and Ty are the same, just do a load.
974  if (SrcTy == Ty)
975  return CGF.Builder.CreateLoad(Src);
976 
977  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
978 
979  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
980  Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
981  SrcTy = Src.getType()->getElementType();
982  }
983 
984  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
985 
986  // If the source and destination are integer or pointer types, just do an
987  // extension or truncation to the desired type.
988  if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
989  (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
990  llvm::Value *Load = CGF.Builder.CreateLoad(Src);
991  return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
992  }
993 
994  // If load is legal, just bitcast the src pointer.
995  if (SrcSize >= DstSize) {
996  // Generally SrcSize is never greater than DstSize, since this means we are
997  // losing bits. However, this can happen in cases where the structure has
998  // additional padding, for example due to a user specified alignment.
999  //
1000  // FIXME: Assert that we aren't truncating non-padding bits when have access
1001  // to that information.
1002  Src = CGF.Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(Ty));
1003  return CGF.Builder.CreateLoad(Src);
1004  }
1005 
1006  // Otherwise do coercion through memory. This is stupid, but simple.
1007  Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1008  Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
1009  Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.Int8PtrTy);
1010  CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1011  llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1012  false);
1013  return CGF.Builder.CreateLoad(Tmp);
1014 }
1015 
1016 // Function to store a first-class aggregate into memory. We prefer to
1017 // store the elements rather than the aggregate to be more friendly to
1018 // fast-isel.
1019 // FIXME: Do we need to recurse here?
1021  Address Dest, bool DestIsVolatile) {
1022  // Prefer scalar stores to first-class aggregate stores.
1023  if (llvm::StructType *STy =
1024  dyn_cast<llvm::StructType>(Val->getType())) {
1025  const llvm::StructLayout *Layout =
1026  CGF.CGM.getDataLayout().getStructLayout(STy);
1027 
1028  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1029  auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
1030  Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
1031  llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1032  CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1033  }
1034  } else {
1035  CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1036  }
1037 }
1038 
1039 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1040 /// where the source and destination may have different types. The
1041 /// destination is known to be aligned to \arg DstAlign bytes.
1042 ///
1043 /// This safely handles the case when the src type is larger than the
1044 /// destination type; the upper bits of the src will be lost.
1046  Address Dst,
1047  bool DstIsVolatile,
1048  CodeGenFunction &CGF) {
1049  llvm::Type *SrcTy = Src->getType();
1050  llvm::Type *DstTy = Dst.getType()->getElementType();
1051  if (SrcTy == DstTy) {
1052  CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1053  return;
1054  }
1055 
1056  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1057 
1058  if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1059  Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1060  DstTy = Dst.getType()->getElementType();
1061  }
1062 
1063  // If the source and destination are integer or pointer types, just do an
1064  // extension or truncation to the desired type.
1065  if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1066  (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1067  Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1068  CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1069  return;
1070  }
1071 
1072  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1073 
1074  // If store is legal, just bitcast the src pointer.
1075  if (SrcSize <= DstSize) {
1076  Dst = CGF.Builder.CreateBitCast(Dst, llvm::PointerType::getUnqual(SrcTy));
1077  BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1078  } else {
1079  // Otherwise do coercion through memory. This is stupid, but
1080  // simple.
1081 
1082  // Generally SrcSize is never greater than DstSize, since this means we are
1083  // losing bits. However, this can happen in cases where the structure has
1084  // additional padding, for example due to a user specified alignment.
1085  //
1086  // FIXME: Assert that we aren't truncating non-padding bits when have access
1087  // to that information.
1088  Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1089  CGF.Builder.CreateStore(Src, Tmp);
1090  Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
1091  Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.Int8PtrTy);
1092  CGF.Builder.CreateMemCpy(DstCasted, Casted,
1093  llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1094  false);
1095  }
1096 }
1097 
1099  const ABIArgInfo &info) {
1100  if (unsigned offset = info.getDirectOffset()) {
1101  addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1102  addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1103  CharUnits::fromQuantity(offset));
1104  addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1105  }
1106  return addr;
1107 }
1108 
1109 namespace {
1110 
1111 /// Encapsulates information about the way function arguments from
1112 /// CGFunctionInfo should be passed to actual LLVM IR function.
1113 class ClangToLLVMArgMapping {
1114  static const unsigned InvalidIndex = ~0U;
1115  unsigned InallocaArgNo;
1116  unsigned SRetArgNo;
1117  unsigned TotalIRArgs;
1118 
1119  /// Arguments of LLVM IR function corresponding to single Clang argument.
1120  struct IRArgs {
1121  unsigned PaddingArgIndex;
1122  // Argument is expanded to IR arguments at positions
1123  // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1124  unsigned FirstArgIndex;
1125  unsigned NumberOfArgs;
1126 
1127  IRArgs()
1128  : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1129  NumberOfArgs(0) {}
1130  };
1131 
1132  SmallVector<IRArgs, 8> ArgInfo;
1133 
1134 public:
1135  ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1136  bool OnlyRequiredArgs = false)
1137  : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1138  ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1139  construct(Context, FI, OnlyRequiredArgs);
1140  }
1141 
1142  bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1143  unsigned getInallocaArgNo() const {
1144  assert(hasInallocaArg());
1145  return InallocaArgNo;
1146  }
1147 
1148  bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1149  unsigned getSRetArgNo() const {
1150  assert(hasSRetArg());
1151  return SRetArgNo;
1152  }
1153 
1154  unsigned totalIRArgs() const { return TotalIRArgs; }
1155 
1156  bool hasPaddingArg(unsigned ArgNo) const {
1157  assert(ArgNo < ArgInfo.size());
1158  return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1159  }
1160  unsigned getPaddingArgNo(unsigned ArgNo) const {
1161  assert(hasPaddingArg(ArgNo));
1162  return ArgInfo[ArgNo].PaddingArgIndex;
1163  }
1164 
1165  /// Returns index of first IR argument corresponding to ArgNo, and their
1166  /// quantity.
1167  std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1168  assert(ArgNo < ArgInfo.size());
1169  return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1170  ArgInfo[ArgNo].NumberOfArgs);
1171  }
1172 
1173 private:
1174  void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1175  bool OnlyRequiredArgs);
1176 };
1177 
1178 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1179  const CGFunctionInfo &FI,
1180  bool OnlyRequiredArgs) {
1181  unsigned IRArgNo = 0;
1182  bool SwapThisWithSRet = false;
1183  const ABIArgInfo &RetAI = FI.getReturnInfo();
1184 
1185  if (RetAI.getKind() == ABIArgInfo::Indirect) {
1186  SwapThisWithSRet = RetAI.isSRetAfterThis();
1187  SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1188  }
1189 
1190  unsigned ArgNo = 0;
1191  unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1193  ++I, ++ArgNo) {
1194  assert(I != FI.arg_end());
1195  QualType ArgType = I->type;
1196  const ABIArgInfo &AI = I->info;
1197  // Collect data about IR arguments corresponding to Clang argument ArgNo.
1198  auto &IRArgs = ArgInfo[ArgNo];
1199 
1200  if (AI.getPaddingType())
1201  IRArgs.PaddingArgIndex = IRArgNo++;
1202 
1203  switch (AI.getKind()) {
1204  case ABIArgInfo::Extend:
1205  case ABIArgInfo::Direct: {
1206  // FIXME: handle sseregparm someday...
1207  llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1208  if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1209  IRArgs.NumberOfArgs = STy->getNumElements();
1210  } else {
1211  IRArgs.NumberOfArgs = 1;
1212  }
1213  break;
1214  }
1215  case ABIArgInfo::Indirect:
1216  IRArgs.NumberOfArgs = 1;
1217  break;
1218  case ABIArgInfo::Ignore:
1219  case ABIArgInfo::InAlloca:
1220  // ignore and inalloca doesn't have matching LLVM parameters.
1221  IRArgs.NumberOfArgs = 0;
1222  break;
1223  case ABIArgInfo::Expand: {
1224  IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1225  break;
1226  }
1227  }
1228 
1229  if (IRArgs.NumberOfArgs > 0) {
1230  IRArgs.FirstArgIndex = IRArgNo;
1231  IRArgNo += IRArgs.NumberOfArgs;
1232  }
1233 
1234  // Skip over the sret parameter when it comes second. We already handled it
1235  // above.
1236  if (IRArgNo == 1 && SwapThisWithSRet)
1237  IRArgNo++;
1238  }
1239  assert(ArgNo == ArgInfo.size());
1240 
1241  if (FI.usesInAlloca())
1242  InallocaArgNo = IRArgNo++;
1243 
1244  TotalIRArgs = IRArgNo;
1245 }
1246 } // namespace
1247 
1248 /***/
1249 
1251  return FI.getReturnInfo().isIndirect();
1252 }
1253 
1255  return ReturnTypeUsesSRet(FI) &&
1256  getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1257 }
1258 
1260  if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1261  switch (BT->getKind()) {
1262  default:
1263  return false;
1264  case BuiltinType::Float:
1266  case BuiltinType::Double:
1268  case BuiltinType::LongDouble:
1270  }
1271  }
1272 
1273  return false;
1274 }
1275 
1277  if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1278  if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1279  if (BT->getKind() == BuiltinType::LongDouble)
1281  }
1282  }
1283 
1284  return false;
1285 }
1286 
1288  const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1289  return GetFunctionType(FI);
1290 }
1291 
1292 llvm::FunctionType *
1294 
1295  bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1296  (void)Inserted;
1297  assert(Inserted && "Recursively being processed?");
1298 
1299  llvm::Type *resultType = nullptr;
1300  const ABIArgInfo &retAI = FI.getReturnInfo();
1301  switch (retAI.getKind()) {
1302  case ABIArgInfo::Expand:
1303  llvm_unreachable("Invalid ABI kind for return argument");
1304 
1305  case ABIArgInfo::Extend:
1306  case ABIArgInfo::Direct:
1307  resultType = retAI.getCoerceToType();
1308  break;
1309 
1310  case ABIArgInfo::InAlloca:
1311  if (retAI.getInAllocaSRet()) {
1312  // sret things on win32 aren't void, they return the sret pointer.
1313  QualType ret = FI.getReturnType();
1314  llvm::Type *ty = ConvertType(ret);
1315  unsigned addressSpace = Context.getTargetAddressSpace(ret);
1316  resultType = llvm::PointerType::get(ty, addressSpace);
1317  } else {
1318  resultType = llvm::Type::getVoidTy(getLLVMContext());
1319  }
1320  break;
1321 
1322  case ABIArgInfo::Indirect:
1323  case ABIArgInfo::Ignore:
1324  resultType = llvm::Type::getVoidTy(getLLVMContext());
1325  break;
1326  }
1327 
1328  ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1329  SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1330 
1331  // Add type for sret argument.
1332  if (IRFunctionArgs.hasSRetArg()) {
1333  QualType Ret = FI.getReturnType();
1334  llvm::Type *Ty = ConvertType(Ret);
1335  unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1336  ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1337  llvm::PointerType::get(Ty, AddressSpace);
1338  }
1339 
1340  // Add type for inalloca argument.
1341  if (IRFunctionArgs.hasInallocaArg()) {
1342  auto ArgStruct = FI.getArgStruct();
1343  assert(ArgStruct);
1344  ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1345  }
1346 
1347  // Add in all of the required arguments.
1348  unsigned ArgNo = 0;
1350  ie = it + FI.getNumRequiredArgs();
1351  for (; it != ie; ++it, ++ArgNo) {
1352  const ABIArgInfo &ArgInfo = it->info;
1353 
1354  // Insert a padding type to ensure proper alignment.
1355  if (IRFunctionArgs.hasPaddingArg(ArgNo))
1356  ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1357  ArgInfo.getPaddingType();
1358 
1359  unsigned FirstIRArg, NumIRArgs;
1360  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1361 
1362  switch (ArgInfo.getKind()) {
1363  case ABIArgInfo::Ignore:
1364  case ABIArgInfo::InAlloca:
1365  assert(NumIRArgs == 0);
1366  break;
1367 
1368  case ABIArgInfo::Indirect: {
1369  assert(NumIRArgs == 1);
1370  // indirect arguments are always on the stack, which is addr space #0.
1371  llvm::Type *LTy = ConvertTypeForMem(it->type);
1372  ArgTypes[FirstIRArg] = LTy->getPointerTo();
1373  break;
1374  }
1375 
1376  case ABIArgInfo::Extend:
1377  case ABIArgInfo::Direct: {
1378  // Fast-isel and the optimizer generally like scalar values better than
1379  // FCAs, so we flatten them if this is safe to do for this argument.
1380  llvm::Type *argType = ArgInfo.getCoerceToType();
1381  llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1382  if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1383  assert(NumIRArgs == st->getNumElements());
1384  for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1385  ArgTypes[FirstIRArg + i] = st->getElementType(i);
1386  } else {
1387  assert(NumIRArgs == 1);
1388  ArgTypes[FirstIRArg] = argType;
1389  }
1390  break;
1391  }
1392 
1393  case ABIArgInfo::Expand:
1394  auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1395  getExpandedTypes(it->type, ArgTypesIter);
1396  assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1397  break;
1398  }
1399  }
1400 
1401  bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1402  assert(Erased && "Not in set?");
1403 
1404  return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1405 }
1406 
1408  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1409  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1410 
1411  if (!isFuncTypeConvertible(FPT))
1412  return llvm::StructType::get(getLLVMContext());
1413 
1414  const CGFunctionInfo *Info;
1415  if (isa<CXXDestructorDecl>(MD))
1416  Info =
1418  else
1419  Info = &arrangeCXXMethodDeclaration(MD);
1420  return GetFunctionType(*Info);
1421 }
1422 
1424  llvm::AttrBuilder &FuncAttrs,
1425  const FunctionProtoType *FPT) {
1426  if (!FPT)
1427  return;
1428 
1430  FPT->isNothrow(Ctx))
1431  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1432 }
1433 
1435  StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1436  AttributeListType &PAL, unsigned &CallingConv, bool AttrOnCallSite) {
1437  llvm::AttrBuilder FuncAttrs;
1438  llvm::AttrBuilder RetAttrs;
1439  bool HasOptnone = false;
1440 
1441  CallingConv = FI.getEffectiveCallingConvention();
1442 
1443  if (FI.isNoReturn())
1444  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1445 
1446  // If we have information about the function prototype, we can learn
1447  // attributes form there.
1449  CalleeInfo.getCalleeFunctionProtoType());
1450 
1451  const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
1452 
1453  // FIXME: handle sseregparm someday...
1454  if (TargetDecl) {
1455  if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1456  FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1457  if (TargetDecl->hasAttr<NoThrowAttr>())
1458  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1459  if (TargetDecl->hasAttr<NoReturnAttr>())
1460  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1461  if (TargetDecl->hasAttr<NoDuplicateAttr>())
1462  FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1463 
1464  if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1466  getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1467  // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1468  // These attributes are not inherited by overloads.
1469  const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1470  if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1471  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1472  }
1473 
1474  // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1475  if (TargetDecl->hasAttr<ConstAttr>()) {
1476  FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1477  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1478  } else if (TargetDecl->hasAttr<PureAttr>()) {
1479  FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1480  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1481  } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1482  FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1483  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1484  }
1485  if (TargetDecl->hasAttr<RestrictAttr>())
1486  RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1487  if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
1488  RetAttrs.addAttribute(llvm::Attribute::NonNull);
1489 
1490  HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1491  }
1492 
1493  // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1494  if (!HasOptnone) {
1495  if (CodeGenOpts.OptimizeSize)
1496  FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1497  if (CodeGenOpts.OptimizeSize == 2)
1498  FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1499  }
1500 
1501  if (CodeGenOpts.DisableRedZone)
1502  FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1503  if (CodeGenOpts.NoImplicitFloat)
1504  FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1505  if (CodeGenOpts.EnableSegmentedStacks &&
1506  !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1507  FuncAttrs.addAttribute("split-stack");
1508 
1509  if (AttrOnCallSite) {
1510  // Attributes that should go on the call site only.
1511  if (!CodeGenOpts.SimplifyLibCalls ||
1512  CodeGenOpts.isNoBuiltinFunc(Name.data()))
1513  FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1514  if (!CodeGenOpts.TrapFuncName.empty())
1515  FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1516  } else {
1517  // Attributes that should go on the function, but not the call site.
1518  if (!CodeGenOpts.DisableFPElim) {
1519  FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1520  } else if (CodeGenOpts.OmitLeafFramePointer) {
1521  FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1522  FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1523  } else {
1524  FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1525  FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1526  }
1527 
1528  bool DisableTailCalls =
1529  CodeGenOpts.DisableTailCalls ||
1530  (TargetDecl && TargetDecl->hasAttr<DisableTailCallsAttr>());
1531  FuncAttrs.addAttribute("disable-tail-calls",
1532  llvm::toStringRef(DisableTailCalls));
1533 
1534  FuncAttrs.addAttribute("less-precise-fpmad",
1535  llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1536  FuncAttrs.addAttribute("no-infs-fp-math",
1537  llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1538  FuncAttrs.addAttribute("no-nans-fp-math",
1539  llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1540  FuncAttrs.addAttribute("unsafe-fp-math",
1541  llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1542  FuncAttrs.addAttribute("use-soft-float",
1543  llvm::toStringRef(CodeGenOpts.SoftFloat));
1544  FuncAttrs.addAttribute("stack-protector-buffer-size",
1545  llvm::utostr(CodeGenOpts.SSPBufferSize));
1546 
1547  if (CodeGenOpts.StackRealignment)
1548  FuncAttrs.addAttribute("stackrealign");
1549 
1550  // Add target-cpu and target-features attributes to functions. If
1551  // we have a decl for the function and it has a target attribute then
1552  // parse that and add it to the feature set.
1553  StringRef TargetCPU = getTarget().getTargetOpts().CPU;
1554  const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
1555  if (FD && FD->hasAttr<TargetAttr>()) {
1556  llvm::StringMap<bool> FeatureMap;
1557  getFunctionFeatureMap(FeatureMap, FD);
1558 
1559  // Produce the canonical string for this set of features.
1560  std::vector<std::string> Features;
1561  for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(),
1562  ie = FeatureMap.end();
1563  it != ie; ++it)
1564  Features.push_back((it->second ? "+" : "-") + it->first().str());
1565 
1566  // Now add the target-cpu and target-features to the function.
1567  // While we populated the feature map above, we still need to
1568  // get and parse the target attribute so we can get the cpu for
1569  // the function.
1570  const auto *TD = FD->getAttr<TargetAttr>();
1571  TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse();
1572  if (ParsedAttr.second != "")
1573  TargetCPU = ParsedAttr.second;
1574  if (TargetCPU != "")
1575  FuncAttrs.addAttribute("target-cpu", TargetCPU);
1576  if (!Features.empty()) {
1577  std::sort(Features.begin(), Features.end());
1578  FuncAttrs.addAttribute(
1579  "target-features",
1580  llvm::join(Features.begin(), Features.end(), ","));
1581  }
1582  } else {
1583  // Otherwise just add the existing target cpu and target features to the
1584  // function.
1585  std::vector<std::string> &Features = getTarget().getTargetOpts().Features;
1586  if (TargetCPU != "")
1587  FuncAttrs.addAttribute("target-cpu", TargetCPU);
1588  if (!Features.empty()) {
1589  std::sort(Features.begin(), Features.end());
1590  FuncAttrs.addAttribute(
1591  "target-features",
1592  llvm::join(Features.begin(), Features.end(), ","));
1593  }
1594  }
1595  }
1596 
1597  ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1598 
1599  QualType RetTy = FI.getReturnType();
1600  const ABIArgInfo &RetAI = FI.getReturnInfo();
1601  switch (RetAI.getKind()) {
1602  case ABIArgInfo::Extend:
1603  if (RetTy->hasSignedIntegerRepresentation())
1604  RetAttrs.addAttribute(llvm::Attribute::SExt);
1605  else if (RetTy->hasUnsignedIntegerRepresentation())
1606  RetAttrs.addAttribute(llvm::Attribute::ZExt);
1607  // FALL THROUGH
1608  case ABIArgInfo::Direct:
1609  if (RetAI.getInReg())
1610  RetAttrs.addAttribute(llvm::Attribute::InReg);
1611  break;
1612  case ABIArgInfo::Ignore:
1613  break;
1614 
1615  case ABIArgInfo::InAlloca:
1616  case ABIArgInfo::Indirect: {
1617  // inalloca and sret disable readnone and readonly
1618  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1619  .removeAttribute(llvm::Attribute::ReadNone);
1620  break;
1621  }
1622 
1623  case ABIArgInfo::Expand:
1624  llvm_unreachable("Invalid ABI kind for return argument");
1625  }
1626 
1627  if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
1628  QualType PTy = RefTy->getPointeeType();
1629  if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1630  RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1631  .getQuantity());
1632  else if (getContext().getTargetAddressSpace(PTy) == 0)
1633  RetAttrs.addAttribute(llvm::Attribute::NonNull);
1634  }
1635 
1636  // Attach return attributes.
1637  if (RetAttrs.hasAttributes()) {
1638  PAL.push_back(llvm::AttributeSet::get(
1639  getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs));
1640  }
1641 
1642  // Attach attributes to sret.
1643  if (IRFunctionArgs.hasSRetArg()) {
1644  llvm::AttrBuilder SRETAttrs;
1645  SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1646  if (RetAI.getInReg())
1647  SRETAttrs.addAttribute(llvm::Attribute::InReg);
1648  PAL.push_back(llvm::AttributeSet::get(
1649  getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs));
1650  }
1651 
1652  // Attach attributes to inalloca argument.
1653  if (IRFunctionArgs.hasInallocaArg()) {
1654  llvm::AttrBuilder Attrs;
1655  Attrs.addAttribute(llvm::Attribute::InAlloca);
1656  PAL.push_back(llvm::AttributeSet::get(
1657  getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs));
1658  }
1659 
1660  unsigned ArgNo = 0;
1662  E = FI.arg_end();
1663  I != E; ++I, ++ArgNo) {
1664  QualType ParamType = I->type;
1665  const ABIArgInfo &AI = I->info;
1666  llvm::AttrBuilder Attrs;
1667 
1668  // Add attribute for padding argument, if necessary.
1669  if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
1670  if (AI.getPaddingInReg())
1671  PAL.push_back(llvm::AttributeSet::get(
1672  getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1,
1673  llvm::Attribute::InReg));
1674  }
1675 
1676  // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
1677  // have the corresponding parameter variable. It doesn't make
1678  // sense to do it here because parameters are so messed up.
1679  switch (AI.getKind()) {
1680  case ABIArgInfo::Extend:
1681  if (ParamType->isSignedIntegerOrEnumerationType())
1682  Attrs.addAttribute(llvm::Attribute::SExt);
1683  else if (ParamType->isUnsignedIntegerOrEnumerationType()) {
1684  if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType))
1685  Attrs.addAttribute(llvm::Attribute::SExt);
1686  else
1687  Attrs.addAttribute(llvm::Attribute::ZExt);
1688  }
1689  // FALL THROUGH
1690  case ABIArgInfo::Direct:
1691  if (ArgNo == 0 && FI.isChainCall())
1692  Attrs.addAttribute(llvm::Attribute::Nest);
1693  else if (AI.getInReg())
1694  Attrs.addAttribute(llvm::Attribute::InReg);
1695  break;
1696 
1697  case ABIArgInfo::Indirect: {
1698  if (AI.getInReg())
1699  Attrs.addAttribute(llvm::Attribute::InReg);
1700 
1701  if (AI.getIndirectByVal())
1702  Attrs.addAttribute(llvm::Attribute::ByVal);
1703 
1704  CharUnits Align = AI.getIndirectAlign();
1705 
1706  // In a byval argument, it is important that the required
1707  // alignment of the type is honored, as LLVM might be creating a
1708  // *new* stack object, and needs to know what alignment to give
1709  // it. (Sometimes it can deduce a sensible alignment on its own,
1710  // but not if clang decides it must emit a packed struct, or the
1711  // user specifies increased alignment requirements.)
1712  //
1713  // This is different from indirect *not* byval, where the object
1714  // exists already, and the align attribute is purely
1715  // informative.
1716  assert(!Align.isZero());
1717 
1718  // For now, only add this when we have a byval argument.
1719  // TODO: be less lazy about updating test cases.
1720  if (AI.getIndirectByVal())
1721  Attrs.addAlignmentAttr(Align.getQuantity());
1722 
1723  // byval disables readnone and readonly.
1724  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1725  .removeAttribute(llvm::Attribute::ReadNone);
1726  break;
1727  }
1728  case ABIArgInfo::Ignore:
1729  case ABIArgInfo::Expand:
1730  continue;
1731 
1732  case ABIArgInfo::InAlloca:
1733  // inalloca disables readnone and readonly.
1734  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1735  .removeAttribute(llvm::Attribute::ReadNone);
1736  continue;
1737  }
1738 
1739  if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
1740  QualType PTy = RefTy->getPointeeType();
1741  if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1742  Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1743  .getQuantity());
1744  else if (getContext().getTargetAddressSpace(PTy) == 0)
1745  Attrs.addAttribute(llvm::Attribute::NonNull);
1746  }
1747 
1748  if (Attrs.hasAttributes()) {
1749  unsigned FirstIRArg, NumIRArgs;
1750  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1751  for (unsigned i = 0; i < NumIRArgs; i++)
1752  PAL.push_back(llvm::AttributeSet::get(getLLVMContext(),
1753  FirstIRArg + i + 1, Attrs));
1754  }
1755  }
1756  assert(ArgNo == FI.arg_size());
1757 
1758  if (FuncAttrs.hasAttributes())
1759  PAL.push_back(llvm::
1760  AttributeSet::get(getLLVMContext(),
1761  llvm::AttributeSet::FunctionIndex,
1762  FuncAttrs));
1763 }
1764 
1765 /// An argument came in as a promoted argument; demote it back to its
1766 /// declared type.
1768  const VarDecl *var,
1769  llvm::Value *value) {
1770  llvm::Type *varType = CGF.ConvertType(var->getType());
1771 
1772  // This can happen with promotions that actually don't change the
1773  // underlying type, like the enum promotions.
1774  if (value->getType() == varType) return value;
1775 
1776  assert((varType->isIntegerTy() || varType->isFloatingPointTy())
1777  && "unexpected promotion type");
1778 
1779  if (isa<llvm::IntegerType>(varType))
1780  return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
1781 
1782  return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
1783 }
1784 
1785 /// Returns the attribute (either parameter attribute, or function
1786 /// attribute), which declares argument ArgNo to be non-null.
1787 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
1788  QualType ArgType, unsigned ArgNo) {
1789  // FIXME: __attribute__((nonnull)) can also be applied to:
1790  // - references to pointers, where the pointee is known to be
1791  // nonnull (apparently a Clang extension)
1792  // - transparent unions containing pointers
1793  // In the former case, LLVM IR cannot represent the constraint. In
1794  // the latter case, we have no guarantee that the transparent union
1795  // is in fact passed as a pointer.
1796  if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
1797  return nullptr;
1798  // First, check attribute on parameter itself.
1799  if (PVD) {
1800  if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
1801  return ParmNNAttr;
1802  }
1803  // Check function attributes.
1804  if (!FD)
1805  return nullptr;
1806  for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
1807  if (NNAttr->isNonNull(ArgNo))
1808  return NNAttr;
1809  }
1810  return nullptr;
1811 }
1812 
1814  llvm::Function *Fn,
1815  const FunctionArgList &Args) {
1816  if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
1817  // Naked functions don't have prologues.
1818  return;
1819 
1820  // If this is an implicit-return-zero function, go ahead and
1821  // initialize the return value. TODO: it might be nice to have
1822  // a more general mechanism for this that didn't require synthesized
1823  // return statements.
1824  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
1825  if (FD->hasImplicitReturnZero()) {
1826  QualType RetTy = FD->getReturnType().getUnqualifiedType();
1827  llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
1828  llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
1829  Builder.CreateStore(Zero, ReturnValue);
1830  }
1831  }
1832 
1833  // FIXME: We no longer need the types from FunctionArgList; lift up and
1834  // simplify.
1835 
1836  ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
1837  // Flattened function arguments.
1839  FnArgs.reserve(IRFunctionArgs.totalIRArgs());
1840  for (auto &Arg : Fn->args()) {
1841  FnArgs.push_back(&Arg);
1842  }
1843  assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
1844 
1845  // If we're using inalloca, all the memory arguments are GEPs off of the last
1846  // parameter, which is a pointer to the complete memory area.
1847  Address ArgStruct = Address::invalid();
1848  const llvm::StructLayout *ArgStructLayout = nullptr;
1849  if (IRFunctionArgs.hasInallocaArg()) {
1850  ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
1851  ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
1852  FI.getArgStructAlignment());
1853 
1854  assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
1855  }
1856 
1857  // Name the struct return parameter.
1858  if (IRFunctionArgs.hasSRetArg()) {
1859  auto AI = FnArgs[IRFunctionArgs.getSRetArgNo()];
1860  AI->setName("agg.result");
1861  AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1,
1862  llvm::Attribute::NoAlias));
1863  }
1864 
1865  // Track if we received the parameter as a pointer (indirect, byval, or
1866  // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
1867  // into a local alloca for us.
1869  ArgVals.reserve(Args.size());
1870 
1871  // Create a pointer value for every parameter declaration. This usually
1872  // entails copying one or more LLVM IR arguments into an alloca. Don't push
1873  // any cleanups or do anything that might unwind. We do that separately, so
1874  // we can push the cleanups in the correct order for the ABI.
1875  assert(FI.arg_size() == Args.size() &&
1876  "Mismatch between function signature & arguments.");
1877  unsigned ArgNo = 0;
1879  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1880  i != e; ++i, ++info_it, ++ArgNo) {
1881  const VarDecl *Arg = *i;
1882  QualType Ty = info_it->type;
1883  const ABIArgInfo &ArgI = info_it->info;
1884 
1885  bool isPromoted =
1886  isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
1887 
1888  unsigned FirstIRArg, NumIRArgs;
1889  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1890 
1891  switch (ArgI.getKind()) {
1892  case ABIArgInfo::InAlloca: {
1893  assert(NumIRArgs == 0);
1894  auto FieldIndex = ArgI.getInAllocaFieldIndex();
1895  CharUnits FieldOffset =
1896  CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
1897  Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
1898  Arg->getName());
1899  ArgVals.push_back(ParamValue::forIndirect(V));
1900  break;
1901  }
1902 
1903  case ABIArgInfo::Indirect: {
1904  assert(NumIRArgs == 1);
1905  Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
1906 
1907  if (!hasScalarEvaluationKind(Ty)) {
1908  // Aggregates and complex variables are accessed by reference. All we
1909  // need to do is realign the value, if requested.
1910  Address V = ParamAddr;
1911  if (ArgI.getIndirectRealign()) {
1912  Address AlignedTemp = CreateMemTemp(Ty, "coerce");
1913 
1914  // Copy from the incoming argument pointer to the temporary with the
1915  // appropriate alignment.
1916  //
1917  // FIXME: We should have a common utility for generating an aggregate
1918  // copy.
1920  auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
1921  Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
1922  Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
1923  Builder.CreateMemCpy(Dst, Src, SizeVal, false);
1924  V = AlignedTemp;
1925  }
1926  ArgVals.push_back(ParamValue::forIndirect(V));
1927  } else {
1928  // Load scalar value from indirect argument.
1929  llvm::Value *V =
1930  EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getLocStart());
1931 
1932  if (isPromoted)
1933  V = emitArgumentDemotion(*this, Arg, V);
1934  ArgVals.push_back(ParamValue::forDirect(V));
1935  }
1936  break;
1937  }
1938 
1939  case ABIArgInfo::Extend:
1940  case ABIArgInfo::Direct: {
1941 
1942  // If we have the trivial case, handle it with no muss and fuss.
1943  if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
1944  ArgI.getCoerceToType() == ConvertType(Ty) &&
1945  ArgI.getDirectOffset() == 0) {
1946  assert(NumIRArgs == 1);
1947  auto AI = FnArgs[FirstIRArg];
1948  llvm::Value *V = AI;
1949 
1950  if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
1951  if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
1952  PVD->getFunctionScopeIndex()))
1953  AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1954  AI->getArgNo() + 1,
1955  llvm::Attribute::NonNull));
1956 
1957  QualType OTy = PVD->getOriginalType();
1958  if (const auto *ArrTy =
1959  getContext().getAsConstantArrayType(OTy)) {
1960  // A C99 array parameter declaration with the static keyword also
1961  // indicates dereferenceability, and if the size is constant we can
1962  // use the dereferenceable attribute (which requires the size in
1963  // bytes).
1964  if (ArrTy->getSizeModifier() == ArrayType::Static) {
1965  QualType ETy = ArrTy->getElementType();
1966  uint64_t ArrSize = ArrTy->getSize().getZExtValue();
1967  if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
1968  ArrSize) {
1969  llvm::AttrBuilder Attrs;
1970  Attrs.addDereferenceableAttr(
1971  getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
1972  AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1973  AI->getArgNo() + 1, Attrs));
1974  } else if (getContext().getTargetAddressSpace(ETy) == 0) {
1975  AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1976  AI->getArgNo() + 1,
1977  llvm::Attribute::NonNull));
1978  }
1979  }
1980  } else if (const auto *ArrTy =
1982  // For C99 VLAs with the static keyword, we don't know the size so
1983  // we can't use the dereferenceable attribute, but in addrspace(0)
1984  // we know that it must be nonnull.
1985  if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
1986  !getContext().getTargetAddressSpace(ArrTy->getElementType()))
1987  AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1988  AI->getArgNo() + 1,
1989  llvm::Attribute::NonNull));
1990  }
1991 
1992  const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
1993  if (!AVAttr)
1994  if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
1995  AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
1996  if (AVAttr) {
1997  llvm::Value *AlignmentValue =
1998  EmitScalarExpr(AVAttr->getAlignment());
1999  llvm::ConstantInt *AlignmentCI =
2000  cast<llvm::ConstantInt>(AlignmentValue);
2001  unsigned Alignment =
2002  std::min((unsigned) AlignmentCI->getZExtValue(),
2003  +llvm::Value::MaximumAlignment);
2004 
2005  llvm::AttrBuilder Attrs;
2006  Attrs.addAlignmentAttr(Alignment);
2007  AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2008  AI->getArgNo() + 1, Attrs));
2009  }
2010  }
2011 
2012  if (Arg->getType().isRestrictQualified())
2013  AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2014  AI->getArgNo() + 1,
2015  llvm::Attribute::NoAlias));
2016 
2017  // Ensure the argument is the correct type.
2018  if (V->getType() != ArgI.getCoerceToType())
2019  V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2020 
2021  if (isPromoted)
2022  V = emitArgumentDemotion(*this, Arg, V);
2023 
2024  if (const CXXMethodDecl *MD =
2025  dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) {
2026  if (MD->isVirtual() && Arg == CXXABIThisDecl)
2027  V = CGM.getCXXABI().
2028  adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V);
2029  }
2030 
2031  // Because of merging of function types from multiple decls it is
2032  // possible for the type of an argument to not match the corresponding
2033  // type in the function type. Since we are codegening the callee
2034  // in here, add a cast to the argument type.
2035  llvm::Type *LTy = ConvertType(Arg->getType());
2036  if (V->getType() != LTy)
2037  V = Builder.CreateBitCast(V, LTy);
2038 
2039  ArgVals.push_back(ParamValue::forDirect(V));
2040  break;
2041  }
2042 
2043  Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2044  Arg->getName());
2045 
2046  // Pointer to store into.
2047  Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2048 
2049  // Fast-isel and the optimizer generally like scalar values better than
2050  // FCAs, so we flatten them if this is safe to do for this argument.
2051  llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2052  if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2053  STy->getNumElements() > 1) {
2054  auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
2055  uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2056  llvm::Type *DstTy = Ptr.getElementType();
2057  uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2058 
2059  Address AddrToStoreInto = Address::invalid();
2060  if (SrcSize <= DstSize) {
2061  AddrToStoreInto =
2062  Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
2063  } else {
2064  AddrToStoreInto =
2065  CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2066  }
2067 
2068  assert(STy->getNumElements() == NumIRArgs);
2069  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2070  auto AI = FnArgs[FirstIRArg + i];
2071  AI->setName(Arg->getName() + ".coerce" + Twine(i));
2072  auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
2073  Address EltPtr =
2074  Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
2075  Builder.CreateStore(AI, EltPtr);
2076  }
2077 
2078  if (SrcSize > DstSize) {
2079  Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2080  }
2081 
2082  } else {
2083  // Simple case, just do a coerced store of the argument into the alloca.
2084  assert(NumIRArgs == 1);
2085  auto AI = FnArgs[FirstIRArg];
2086  AI->setName(Arg->getName() + ".coerce");
2087  CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
2088  }
2089 
2090  // Match to what EmitParmDecl is expecting for this type.
2092  llvm::Value *V =
2093  EmitLoadOfScalar(Alloca, false, Ty, Arg->getLocStart());
2094  if (isPromoted)
2095  V = emitArgumentDemotion(*this, Arg, V);
2096  ArgVals.push_back(ParamValue::forDirect(V));
2097  } else {
2098  ArgVals.push_back(ParamValue::forIndirect(Alloca));
2099  }
2100  break;
2101  }
2102 
2103  case ABIArgInfo::Expand: {
2104  // If this structure was expanded into multiple arguments then
2105  // we need to create a temporary and reconstruct it from the
2106  // arguments.
2107  Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2108  LValue LV = MakeAddrLValue(Alloca, Ty);
2109  ArgVals.push_back(ParamValue::forIndirect(Alloca));
2110 
2111  auto FnArgIter = FnArgs.begin() + FirstIRArg;
2112  ExpandTypeFromArgs(Ty, LV, FnArgIter);
2113  assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2114  for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2115  auto AI = FnArgs[FirstIRArg + i];
2116  AI->setName(Arg->getName() + "." + Twine(i));
2117  }
2118  break;
2119  }
2120 
2121  case ABIArgInfo::Ignore:
2122  assert(NumIRArgs == 0);
2123  // Initialize the local variable appropriately.
2124  if (!hasScalarEvaluationKind(Ty)) {
2125  ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2126  } else {
2127  llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2128  ArgVals.push_back(ParamValue::forDirect(U));
2129  }
2130  break;
2131  }
2132  }
2133 
2134  if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2135  for (int I = Args.size() - 1; I >= 0; --I)
2136  EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2137  } else {
2138  for (unsigned I = 0, E = Args.size(); I != E; ++I)
2139  EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2140  }
2141 }
2142 
2143 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2144  while (insn->use_empty()) {
2145  llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2146  if (!bitcast) return;
2147 
2148  // This is "safe" because we would have used a ConstantExpr otherwise.
2149  insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2150  bitcast->eraseFromParent();
2151  }
2152 }
2153 
2154 /// Try to emit a fused autorelease of a return result.
2156  llvm::Value *result) {
2157  // We must be immediately followed the cast.
2158  llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2159  if (BB->empty()) return nullptr;
2160  if (&BB->back() != result) return nullptr;
2161 
2162  llvm::Type *resultType = result->getType();
2163 
2164  // result is in a BasicBlock and is therefore an Instruction.
2165  llvm::Instruction *generator = cast<llvm::Instruction>(result);
2166 
2168 
2169  // Look for:
2170  // %generator = bitcast %type1* %generator2 to %type2*
2171  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2172  // We would have emitted this as a constant if the operand weren't
2173  // an Instruction.
2174  generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2175 
2176  // Require the generator to be immediately followed by the cast.
2177  if (generator->getNextNode() != bitcast)
2178  return nullptr;
2179 
2180  insnsToKill.push_back(bitcast);
2181  }
2182 
2183  // Look for:
2184  // %generator = call i8* @objc_retain(i8* %originalResult)
2185  // or
2186  // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2187  llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2188  if (!call) return nullptr;
2189 
2190  bool doRetainAutorelease;
2191 
2192  if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2193  doRetainAutorelease = true;
2194  } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2196  doRetainAutorelease = false;
2197 
2198  // If we emitted an assembly marker for this call (and the
2199  // ARCEntrypoints field should have been set if so), go looking
2200  // for that call. If we can't find it, we can't do this
2201  // optimization. But it should always be the immediately previous
2202  // instruction, unless we needed bitcasts around the call.
2204  llvm::Instruction *prev = call->getPrevNode();
2205  assert(prev);
2206  if (isa<llvm::BitCastInst>(prev)) {
2207  prev = prev->getPrevNode();
2208  assert(prev);
2209  }
2210  assert(isa<llvm::CallInst>(prev));
2211  assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2213  insnsToKill.push_back(prev);
2214  }
2215  } else {
2216  return nullptr;
2217  }
2218 
2219  result = call->getArgOperand(0);
2220  insnsToKill.push_back(call);
2221 
2222  // Keep killing bitcasts, for sanity. Note that we no longer care
2223  // about precise ordering as long as there's exactly one use.
2224  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2225  if (!bitcast->hasOneUse()) break;
2226  insnsToKill.push_back(bitcast);
2227  result = bitcast->getOperand(0);
2228  }
2229 
2230  // Delete all the unnecessary instructions, from latest to earliest.
2232  i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
2233  (*i)->eraseFromParent();
2234 
2235  // Do the fused retain/autorelease if we were asked to.
2236  if (doRetainAutorelease)
2237  result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2238 
2239  // Cast back to the result type.
2240  return CGF.Builder.CreateBitCast(result, resultType);
2241 }
2242 
2243 /// If this is a +1 of the value of an immutable 'self', remove it.
2245  llvm::Value *result) {
2246  // This is only applicable to a method with an immutable 'self'.
2247  const ObjCMethodDecl *method =
2248  dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2249  if (!method) return nullptr;
2250  const VarDecl *self = method->getSelfDecl();
2251  if (!self->getType().isConstQualified()) return nullptr;
2252 
2253  // Look for a retain call.
2254  llvm::CallInst *retainCall =
2255  dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2256  if (!retainCall ||
2257  retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2258  return nullptr;
2259 
2260  // Look for an ordinary load of 'self'.
2261  llvm::Value *retainedValue = retainCall->getArgOperand(0);
2262  llvm::LoadInst *load =
2263  dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2264  if (!load || load->isAtomic() || load->isVolatile() ||
2265  load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2266  return nullptr;
2267 
2268  // Okay! Burn it all down. This relies for correctness on the
2269  // assumption that the retain is emitted as part of the return and
2270  // that thereafter everything is used "linearly".
2271  llvm::Type *resultType = result->getType();
2272  eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2273  assert(retainCall->use_empty());
2274  retainCall->eraseFromParent();
2275  eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2276 
2277  return CGF.Builder.CreateBitCast(load, resultType);
2278 }
2279 
2280 /// Emit an ARC autorelease of the result of a function.
2281 ///
2282 /// \return the value to actually return from the function
2284  llvm::Value *result) {
2285  // If we're returning 'self', kill the initial retain. This is a
2286  // heuristic attempt to "encourage correctness" in the really unfortunate
2287  // case where we have a return of self during a dealloc and we desperately
2288  // need to avoid the possible autorelease.
2289  if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2290  return self;
2291 
2292  // At -O0, try to emit a fused retain/autorelease.
2293  if (CGF.shouldUseFusedARCCalls())
2294  if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2295  return fused;
2296 
2297  return CGF.EmitARCAutoreleaseReturnValue(result);
2298 }
2299 
2300 /// Heuristically search for a dominating store to the return-value slot.
2301 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2302  // Check if a User is a store which pointerOperand is the ReturnValue.
2303  // We are looking for stores to the ReturnValue, not for stores of the
2304  // ReturnValue to some other location.
2305  auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2306  auto *SI = dyn_cast<llvm::StoreInst>(U);
2307  if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2308  return nullptr;
2309  // These aren't actually possible for non-coerced returns, and we
2310  // only care about non-coerced returns on this code path.
2311  assert(!SI->isAtomic() && !SI->isVolatile());
2312  return SI;
2313  };
2314  // If there are multiple uses of the return-value slot, just check
2315  // for something immediately preceding the IP. Sometimes this can
2316  // happen with how we generate implicit-returns; it can also happen
2317  // with noreturn cleanups.
2318  if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2319  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2320  if (IP->empty()) return nullptr;
2321  llvm::Instruction *I = &IP->back();
2322 
2323  // Skip lifetime markers
2324  for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2325  IE = IP->rend();
2326  II != IE; ++II) {
2327  if (llvm::IntrinsicInst *Intrinsic =
2328  dyn_cast<llvm::IntrinsicInst>(&*II)) {
2329  if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2330  const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2331  ++II;
2332  if (II == IE)
2333  break;
2334  if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2335  continue;
2336  }
2337  }
2338  I = &*II;
2339  break;
2340  }
2341 
2342  return GetStoreIfValid(I);
2343  }
2344 
2345  llvm::StoreInst *store =
2346  GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2347  if (!store) return nullptr;
2348 
2349  // Now do a first-and-dirty dominance check: just walk up the
2350  // single-predecessors chain from the current insertion point.
2351  llvm::BasicBlock *StoreBB = store->getParent();
2352  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2353  while (IP != StoreBB) {
2354  if (!(IP = IP->getSinglePredecessor()))
2355  return nullptr;
2356  }
2357 
2358  // Okay, the store's basic block dominates the insertion point; we
2359  // can do our thing.
2360  return store;
2361 }
2362 
2364  bool EmitRetDbgLoc,
2365  SourceLocation EndLoc) {
2366  if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2367  // Naked functions don't have epilogues.
2368  Builder.CreateUnreachable();
2369  return;
2370  }
2371 
2372  // Functions with no result always return void.
2373  if (!ReturnValue.isValid()) {
2374  Builder.CreateRetVoid();
2375  return;
2376  }
2377 
2378  llvm::DebugLoc RetDbgLoc;
2379  llvm::Value *RV = nullptr;
2380  QualType RetTy = FI.getReturnType();
2381  const ABIArgInfo &RetAI = FI.getReturnInfo();
2382 
2383  switch (RetAI.getKind()) {
2384  case ABIArgInfo::InAlloca:
2385  // Aggregrates get evaluated directly into the destination. Sometimes we
2386  // need to return the sret value in a register, though.
2387  assert(hasAggregateEvaluationKind(RetTy));
2388  if (RetAI.getInAllocaSRet()) {
2389  llvm::Function::arg_iterator EI = CurFn->arg_end();
2390  --EI;
2391  llvm::Value *ArgStruct = &*EI;
2392  llvm::Value *SRet = Builder.CreateStructGEP(
2393  nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2394  RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2395  }
2396  break;
2397 
2398  case ABIArgInfo::Indirect: {
2399  auto AI = CurFn->arg_begin();
2400  if (RetAI.isSRetAfterThis())
2401  ++AI;
2402  switch (getEvaluationKind(RetTy)) {
2403  case TEK_Complex: {
2404  ComplexPairTy RT =
2405  EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2406  EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2407  /*isInit*/ true);
2408  break;
2409  }
2410  case TEK_Aggregate:
2411  // Do nothing; aggregrates get evaluated directly into the destination.
2412  break;
2413  case TEK_Scalar:
2414  EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2415  MakeNaturalAlignAddrLValue(&*AI, RetTy),
2416  /*isInit*/ true);
2417  break;
2418  }
2419  break;
2420  }
2421 
2422  case ABIArgInfo::Extend:
2423  case ABIArgInfo::Direct:
2424  if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2425  RetAI.getDirectOffset() == 0) {
2426  // The internal return value temp always will have pointer-to-return-type
2427  // type, just do a load.
2428 
2429  // If there is a dominating store to ReturnValue, we can elide
2430  // the load, zap the store, and usually zap the alloca.
2431  if (llvm::StoreInst *SI =
2433  // Reuse the debug location from the store unless there is
2434  // cleanup code to be emitted between the store and return
2435  // instruction.
2436  if (EmitRetDbgLoc && !AutoreleaseResult)
2437  RetDbgLoc = SI->getDebugLoc();
2438  // Get the stored value and nuke the now-dead store.
2439  RV = SI->getValueOperand();
2440  SI->eraseFromParent();
2441 
2442  // If that was the only use of the return value, nuke it as well now.
2443  auto returnValueInst = ReturnValue.getPointer();
2444  if (returnValueInst->use_empty()) {
2445  if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
2446  alloca->eraseFromParent();
2447  ReturnValue = Address::invalid();
2448  }
2449  }
2450 
2451  // Otherwise, we have to do a simple load.
2452  } else {
2453  RV = Builder.CreateLoad(ReturnValue);
2454  }
2455  } else {
2456  // If the value is offset in memory, apply the offset now.
2457  Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2458 
2459  RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2460  }
2461 
2462  // In ARC, end functions that return a retainable type with a call
2463  // to objc_autoreleaseReturnValue.
2464  if (AutoreleaseResult) {
2465  assert(getLangOpts().ObjCAutoRefCount &&
2466  !FI.isReturnsRetained() &&
2467  RetTy->isObjCRetainableType());
2468  RV = emitAutoreleaseOfResult(*this, RV);
2469  }
2470 
2471  break;
2472 
2473  case ABIArgInfo::Ignore:
2474  break;
2475 
2476  case ABIArgInfo::Expand:
2477  llvm_unreachable("Invalid ABI kind for return argument");
2478  }
2479 
2480  llvm::Instruction *Ret;
2481  if (RV) {
2482  if (CurCodeDecl && SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) {
2483  if (auto RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>()) {
2484  SanitizerScope SanScope(this);
2485  llvm::Value *Cond = Builder.CreateICmpNE(
2486  RV, llvm::Constant::getNullValue(RV->getType()));
2487  llvm::Constant *StaticData[] = {
2488  EmitCheckSourceLocation(EndLoc),
2489  EmitCheckSourceLocation(RetNNAttr->getLocation()),
2490  };
2491  EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute),
2492  "nonnull_return", StaticData, None);
2493  }
2494  }
2495  Ret = Builder.CreateRet(RV);
2496  } else {
2497  Ret = Builder.CreateRetVoid();
2498  }
2499 
2500  if (RetDbgLoc)
2501  Ret->setDebugLoc(std::move(RetDbgLoc));
2502 }
2503 
2505  const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2506  return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
2507 }
2508 
2510  QualType Ty) {
2511  // FIXME: Generate IR in one pass, rather than going back and fixing up these
2512  // placeholders.
2513  llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
2514  llvm::Value *Placeholder =
2515  llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo());
2516  Placeholder = CGF.Builder.CreateDefaultAlignedLoad(Placeholder);
2517 
2518  // FIXME: When we generate this IR in one pass, we shouldn't need
2519  // this win32-specific alignment hack.
2521 
2522  return AggValueSlot::forAddr(Address(Placeholder, Align),
2523  Ty.getQualifiers(),
2527 }
2528 
2530  const VarDecl *param,
2531  SourceLocation loc) {
2532  // StartFunction converted the ABI-lowered parameter(s) into a
2533  // local alloca. We need to turn that into an r-value suitable
2534  // for EmitCall.
2535  Address local = GetAddrOfLocalVar(param);
2536 
2537  QualType type = param->getType();
2538 
2539  // For the most part, we just need to load the alloca, except:
2540  // 1) aggregate r-values are actually pointers to temporaries, and
2541  // 2) references to non-scalars are pointers directly to the aggregate.
2542  // I don't know why references to scalars are different here.
2543  if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
2544  if (!hasScalarEvaluationKind(ref->getPointeeType()))
2545  return args.add(RValue::getAggregate(local), type);
2546 
2547  // Locals which are references to scalars are represented
2548  // with allocas holding the pointer.
2549  return args.add(RValue::get(Builder.CreateLoad(local)), type);
2550  }
2551 
2552  assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
2553  "cannot emit delegate call arguments for inalloca arguments!");
2554 
2555  args.add(convertTempToRValue(local, type, loc), type);
2556 }
2557 
2558 static bool isProvablyNull(llvm::Value *addr) {
2559  return isa<llvm::ConstantPointerNull>(addr);
2560 }
2561 
2562 static bool isProvablyNonNull(llvm::Value *addr) {
2563  return isa<llvm::AllocaInst>(addr);
2564 }
2565 
2566 /// Emit the actual writing-back of a writeback.
2568  const CallArgList::Writeback &writeback) {
2569  const LValue &srcLV = writeback.Source;
2570  Address srcAddr = srcLV.getAddress();
2571  assert(!isProvablyNull(srcAddr.getPointer()) &&
2572  "shouldn't have writeback for provably null argument");
2573 
2574  llvm::BasicBlock *contBB = nullptr;
2575 
2576  // If the argument wasn't provably non-null, we need to null check
2577  // before doing the store.
2578  bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer());
2579  if (!provablyNonNull) {
2580  llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
2581  contBB = CGF.createBasicBlock("icr.done");
2582 
2583  llvm::Value *isNull =
2584  CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
2585  CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
2586  CGF.EmitBlock(writebackBB);
2587  }
2588 
2589  // Load the value to writeback.
2590  llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
2591 
2592  // Cast it back, in case we're writing an id to a Foo* or something.
2593  value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
2594  "icr.writeback-cast");
2595 
2596  // Perform the writeback.
2597 
2598  // If we have a "to use" value, it's something we need to emit a use
2599  // of. This has to be carefully threaded in: if it's done after the
2600  // release it's potentially undefined behavior (and the optimizer
2601  // will ignore it), and if it happens before the retain then the
2602  // optimizer could move the release there.
2603  if (writeback.ToUse) {
2604  assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
2605 
2606  // Retain the new value. No need to block-copy here: the block's
2607  // being passed up the stack.
2608  value = CGF.EmitARCRetainNonBlock(value);
2609 
2610  // Emit the intrinsic use here.
2611  CGF.EmitARCIntrinsicUse(writeback.ToUse);
2612 
2613  // Load the old value (primitively).
2614  llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
2615 
2616  // Put the new value in place (primitively).
2617  CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
2618 
2619  // Release the old value.
2620  CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
2621 
2622  // Otherwise, we can just do a normal lvalue store.
2623  } else {
2624  CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
2625  }
2626 
2627  // Jump to the continuation block.
2628  if (!provablyNonNull)
2629  CGF.EmitBlock(contBB);
2630 }
2631 
2633  const CallArgList &args) {
2634  for (const auto &I : args.writebacks())
2635  emitWriteback(CGF, I);
2636 }
2637 
2639  const CallArgList &CallArgs) {
2642  CallArgs.getCleanupsToDeactivate();
2643  // Iterate in reverse to increase the likelihood of popping the cleanup.
2644  for (const auto &I : llvm::reverse(Cleanups)) {
2645  CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
2646  I.IsActiveIP->eraseFromParent();
2647  }
2648 }
2649 
2650 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
2651  if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
2652  if (uop->getOpcode() == UO_AddrOf)
2653  return uop->getSubExpr();
2654  return nullptr;
2655 }
2656 
2657 /// Emit an argument that's being passed call-by-writeback. That is,
2658 /// we are passing the address of an __autoreleased temporary; it
2659 /// might be copy-initialized with the current value of the given
2660 /// address, but it will definitely be copied out of after the call.
2662  const ObjCIndirectCopyRestoreExpr *CRE) {
2663  LValue srcLV;
2664 
2665  // Make an optimistic effort to emit the address as an l-value.
2666  // This can fail if the argument expression is more complicated.
2667  if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
2668  srcLV = CGF.EmitLValue(lvExpr);
2669 
2670  // Otherwise, just emit it as a scalar.
2671  } else {
2672  Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
2673 
2674  QualType srcAddrType =
2675  CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
2676  srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
2677  }
2678  Address srcAddr = srcLV.getAddress();
2679 
2680  // The dest and src types don't necessarily match in LLVM terms
2681  // because of the crazy ObjC compatibility rules.
2682 
2683  llvm::PointerType *destType =
2684  cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
2685 
2686  // If the address is a constant null, just pass the appropriate null.
2687  if (isProvablyNull(srcAddr.getPointer())) {
2688  args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
2689  CRE->getType());
2690  return;
2691  }
2692 
2693  // Create the temporary.
2694  Address temp = CGF.CreateTempAlloca(destType->getElementType(),
2695  CGF.getPointerAlign(),
2696  "icr.temp");
2697  // Loading an l-value can introduce a cleanup if the l-value is __weak,
2698  // and that cleanup will be conditional if we can't prove that the l-value
2699  // isn't null, so we need to register a dominating point so that the cleanups
2700  // system will make valid IR.
2702 
2703  // Zero-initialize it if we're not doing a copy-initialization.
2704  bool shouldCopy = CRE->shouldCopy();
2705  if (!shouldCopy) {
2706  llvm::Value *null =
2707  llvm::ConstantPointerNull::get(
2708  cast<llvm::PointerType>(destType->getElementType()));
2709  CGF.Builder.CreateStore(null, temp);
2710  }
2711 
2712  llvm::BasicBlock *contBB = nullptr;
2713  llvm::BasicBlock *originBB = nullptr;
2714 
2715  // If the address is *not* known to be non-null, we need to switch.
2716  llvm::Value *finalArgument;
2717 
2718  bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer());
2719  if (provablyNonNull) {
2720  finalArgument = temp.getPointer();
2721  } else {
2722  llvm::Value *isNull =
2723  CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
2724 
2725  finalArgument = CGF.Builder.CreateSelect(isNull,
2726  llvm::ConstantPointerNull::get(destType),
2727  temp.getPointer(), "icr.argument");
2728 
2729  // If we need to copy, then the load has to be conditional, which
2730  // means we need control flow.
2731  if (shouldCopy) {
2732  originBB = CGF.Builder.GetInsertBlock();
2733  contBB = CGF.createBasicBlock("icr.cont");
2734  llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
2735  CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
2736  CGF.EmitBlock(copyBB);
2737  condEval.begin(CGF);
2738  }
2739  }
2740 
2741  llvm::Value *valueToUse = nullptr;
2742 
2743  // Perform a copy if necessary.
2744  if (shouldCopy) {
2745  RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
2746  assert(srcRV.isScalar());
2747 
2748  llvm::Value *src = srcRV.getScalarVal();
2749  src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
2750  "icr.cast");
2751 
2752  // Use an ordinary store, not a store-to-lvalue.
2753  CGF.Builder.CreateStore(src, temp);
2754 
2755  // If optimization is enabled, and the value was held in a
2756  // __strong variable, we need to tell the optimizer that this
2757  // value has to stay alive until we're doing the store back.
2758  // This is because the temporary is effectively unretained,
2759  // and so otherwise we can violate the high-level semantics.
2760  if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
2762  valueToUse = src;
2763  }
2764  }
2765 
2766  // Finish the control flow if we needed it.
2767  if (shouldCopy && !provablyNonNull) {
2768  llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
2769  CGF.EmitBlock(contBB);
2770 
2771  // Make a phi for the value to intrinsically use.
2772  if (valueToUse) {
2773  llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
2774  "icr.to-use");
2775  phiToUse->addIncoming(valueToUse, copyBB);
2776  phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
2777  originBB);
2778  valueToUse = phiToUse;
2779  }
2780 
2781  condEval.end(CGF);
2782  }
2783 
2784  args.addWriteback(srcLV, temp, valueToUse);
2785  args.add(RValue::get(finalArgument), CRE->getType());
2786 }
2787 
2789  assert(!StackBase && !StackCleanup.isValid());
2790 
2791  // Save the stack.
2792  llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
2793  StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
2794 }
2795 
2797  if (StackBase) {
2798  // Restore the stack after the call.
2799  llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
2800  CGF.Builder.CreateCall(F, StackBase);
2801  }
2802 }
2803 
2805  SourceLocation ArgLoc,
2806  const FunctionDecl *FD,
2807  unsigned ParmNum) {
2808  if (!SanOpts.has(SanitizerKind::NonnullAttribute) || !FD)
2809  return;
2810  auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr;
2811  unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
2812  auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo);
2813  if (!NNAttr)
2814  return;
2815  SanitizerScope SanScope(this);
2816  assert(RV.isScalar());
2817  llvm::Value *V = RV.getScalarVal();
2818  llvm::Value *Cond =
2819  Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
2820  llvm::Constant *StaticData[] = {
2821  EmitCheckSourceLocation(ArgLoc),
2822  EmitCheckSourceLocation(NNAttr->getLocation()),
2823  llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
2824  };
2825  EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute),
2826  "nonnull_arg", StaticData, None);
2827 }
2828 
2830  CallArgList &Args, ArrayRef<QualType> ArgTypes,
2831  llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
2832  const FunctionDecl *CalleeDecl, unsigned ParamsToSkip) {
2833  assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
2834 
2835  auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg) {
2836  if (CalleeDecl == nullptr || I >= CalleeDecl->getNumParams())
2837  return;
2838  auto *PS = CalleeDecl->getParamDecl(I)->getAttr<PassObjectSizeAttr>();
2839  if (PS == nullptr)
2840  return;
2841 
2842  const auto &Context = getContext();
2843  auto SizeTy = Context.getSizeType();
2844  auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
2845  llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T);
2846  Args.add(RValue::get(V), SizeTy);
2847  };
2848 
2849  // We *have* to evaluate arguments from right to left in the MS C++ ABI,
2850  // because arguments are destroyed left to right in the callee.
2852  // Insert a stack save if we're going to need any inalloca args.
2853  bool HasInAllocaArgs = false;
2854  for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
2855  I != E && !HasInAllocaArgs; ++I)
2856  HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
2857  if (HasInAllocaArgs) {
2858  assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
2859  Args.allocateArgumentMemory(*this);
2860  }
2861 
2862  // Evaluate each argument.
2863  size_t CallArgsStart = Args.size();
2864  for (int I = ArgTypes.size() - 1; I >= 0; --I) {
2865  CallExpr::const_arg_iterator Arg = ArgRange.begin() + I;
2866  EmitCallArg(Args, *Arg, ArgTypes[I]);
2867  EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(),
2868  CalleeDecl, ParamsToSkip + I);
2869  MaybeEmitImplicitObjectSize(I, *Arg);
2870  }
2871 
2872  // Un-reverse the arguments we just evaluated so they match up with the LLVM
2873  // IR function.
2874  std::reverse(Args.begin() + CallArgsStart, Args.end());
2875  return;
2876  }
2877 
2878  for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
2879  CallExpr::const_arg_iterator Arg = ArgRange.begin() + I;
2880  assert(Arg != ArgRange.end());
2881  EmitCallArg(Args, *Arg, ArgTypes[I]);
2882  EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(),
2883  CalleeDecl, ParamsToSkip + I);
2884  MaybeEmitImplicitObjectSize(I, *Arg);
2885  }
2886 }
2887 
2888 namespace {
2889 
2890 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
2891  DestroyUnpassedArg(Address Addr, QualType Ty)
2892  : Addr(Addr), Ty(Ty) {}
2893 
2894  Address Addr;
2895  QualType Ty;
2896 
2897  void Emit(CodeGenFunction &CGF, Flags flags) override {
2898  const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
2899  assert(!Dtor->isTrivial());
2900  CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
2901  /*Delegating=*/false, Addr);
2902  }
2903 };
2904 
2905 struct DisableDebugLocationUpdates {
2906  CodeGenFunction &CGF;
2907  bool disabledDebugInfo;
2908  DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
2909  if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
2910  CGF.disableDebugInfo();
2911  }
2912  ~DisableDebugLocationUpdates() {
2913  if (disabledDebugInfo)
2914  CGF.enableDebugInfo();
2915  }
2916 };
2917 
2918 } // end anonymous namespace
2919 
2921  QualType type) {
2922  DisableDebugLocationUpdates Dis(*this, E);
2923  if (const ObjCIndirectCopyRestoreExpr *CRE
2924  = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
2925  assert(getLangOpts().ObjCAutoRefCount);
2926  assert(getContext().hasSameType(E->getType(), type));
2927  return emitWritebackArg(*this, args, CRE);
2928  }
2929 
2930  assert(type->isReferenceType() == E->isGLValue() &&
2931  "reference binding to unmaterialized r-value!");
2932 
2933  if (E->isGLValue()) {
2934  assert(E->getObjectKind() == OK_Ordinary);
2935  return args.add(EmitReferenceBindingToExpr(E), type);
2936  }
2937 
2938  bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
2939 
2940  // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
2941  // However, we still have to push an EH-only cleanup in case we unwind before
2942  // we make it to the call.
2943  if (HasAggregateEvalKind &&
2945  // If we're using inalloca, use the argument memory. Otherwise, use a
2946  // temporary.
2947  AggValueSlot Slot;
2948  if (args.isUsingInAlloca())
2949  Slot = createPlaceholderSlot(*this, type);
2950  else
2951  Slot = CreateAggTemp(type, "agg.tmp");
2952 
2953  const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2954  bool DestroyedInCallee =
2955  RD && RD->hasNonTrivialDestructor() &&
2957  if (DestroyedInCallee)
2958  Slot.setExternallyDestructed();
2959 
2960  EmitAggExpr(E, Slot);
2961  RValue RV = Slot.asRValue();
2962  args.add(RV, type);
2963 
2964  if (DestroyedInCallee) {
2965  // Create a no-op GEP between the placeholder and the cleanup so we can
2966  // RAUW it successfully. It also serves as a marker of the first
2967  // instruction where the cleanup is active.
2968  pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
2969  type);
2970  // This unreachable is a temporary marker which will be removed later.
2971  llvm::Instruction *IsActive = Builder.CreateUnreachable();
2972  args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
2973  }
2974  return;
2975  }
2976 
2977  if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
2978  cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
2979  LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
2980  assert(L.isSimple());
2981  if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
2982  args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
2983  } else {
2984  // We can't represent a misaligned lvalue in the CallArgList, so copy
2985  // to an aligned temporary now.
2986  Address tmp = CreateMemTemp(type);
2987  EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile());
2988  args.add(RValue::getAggregate(tmp), type);
2989  }
2990  return;
2991  }
2992 
2993  args.add(EmitAnyExprToTemp(E), type);
2994 }
2995 
2996 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
2997  // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
2998  // implicitly widens null pointer constants that are arguments to varargs
2999  // functions to pointer-sized ints.
3000  if (!getTarget().getTriple().isOSWindows())
3001  return Arg->getType();
3002 
3003  if (Arg->getType()->isIntegerType() &&
3004  getContext().getTypeSize(Arg->getType()) <
3008  return getContext().getIntPtrType();
3009  }
3010 
3011  return Arg->getType();
3012 }
3013 
3014 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3015 // optimizer it can aggressively ignore unwind edges.
3016 void
3017 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3018  if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3019  !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3020  Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3022 }
3023 
3024 /// Emits a call to the given no-arguments nounwind runtime function.
3025 llvm::CallInst *
3027  const llvm::Twine &name) {
3028  return EmitNounwindRuntimeCall(callee, None, name);
3029 }
3030 
3031 /// Emits a call to the given nounwind runtime function.
3032 llvm::CallInst *
3035  const llvm::Twine &name) {
3036  llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3037  call->setDoesNotThrow();
3038  return call;
3039 }
3040 
3041 /// Emits a simple call (never an invoke) to the given no-arguments
3042 /// runtime function.
3043 llvm::CallInst *
3045  const llvm::Twine &name) {
3046  return EmitRuntimeCall(callee, None, name);
3047 }
3048 
3049 /// Emits a simple call (never an invoke) to the given runtime
3050 /// function.
3051 llvm::CallInst *
3054  const llvm::Twine &name) {
3055  llvm::CallInst *call = Builder.CreateCall(callee, args, name);
3056  call->setCallingConv(getRuntimeCC());
3057  return call;
3058 }
3059 
3060 // Calls which may throw must have operand bundles indicating which funclet
3061 // they are nested within.
3062 static void
3064  llvm::Instruction *CurrentFuncletPad,
3066  // There is no need for a funclet operand bundle if we aren't inside a funclet.
3067  if (!CurrentFuncletPad)
3068  return;
3069 
3070  // Skip intrinsics which cannot throw.
3071  auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3072  if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3073  return;
3074 
3075  BundleList.emplace_back("funclet", CurrentFuncletPad);
3076 }
3077 
3078 /// Emits a call or invoke to the given noreturn runtime function.
3080  ArrayRef<llvm::Value*> args) {
3082  getBundlesForFunclet(callee, CurrentFuncletPad, BundleList);
3083 
3084  if (getInvokeDest()) {
3085  llvm::InvokeInst *invoke =
3086  Builder.CreateInvoke(callee,
3087  getUnreachableBlock(),
3088  getInvokeDest(),
3089  args,
3090  BundleList);
3091  invoke->setDoesNotReturn();
3092  invoke->setCallingConv(getRuntimeCC());
3093  } else {
3094  llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3095  call->setDoesNotReturn();
3096  call->setCallingConv(getRuntimeCC());
3097  Builder.CreateUnreachable();
3098  }
3099 }
3100 
3101 /// Emits a call or invoke instruction to the given nullary runtime
3102 /// function.
3103 llvm::CallSite
3105  const Twine &name) {
3106  return EmitRuntimeCallOrInvoke(callee, None, name);
3107 }
3108 
3109 /// Emits a call or invoke instruction to the given runtime function.
3110 llvm::CallSite
3113  const Twine &name) {
3114  llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
3115  callSite.setCallingConv(getRuntimeCC());
3116  return callSite;
3117 }
3118 
3119 /// Emits a call or invoke instruction to the given function, depending
3120 /// on the current state of the EH stack.
3121 llvm::CallSite
3124  const Twine &Name) {
3125  llvm::BasicBlock *InvokeDest = getInvokeDest();
3126 
3127  llvm::Instruction *Inst;
3128  if (!InvokeDest)
3129  Inst = Builder.CreateCall(Callee, Args, Name);
3130  else {
3131  llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3132  Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
3133  EmitBlock(ContBB);
3134  }
3135 
3136  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3137  // optimizer it can aggressively ignore unwind edges.
3138  if (CGM.getLangOpts().ObjCAutoRefCount)
3139  AddObjCARCExceptionMetadata(Inst);
3140 
3141  return llvm::CallSite(Inst);
3142 }
3143 
3144 /// \brief Store a non-aggregate value to an address to initialize it. For
3145 /// initialization, a non-atomic store will be used.
3147  LValue Dst) {
3148  if (Src.isScalar())
3149  CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true);
3150  else
3151  CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true);
3152 }
3153 
3154 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3155  llvm::Value *New) {
3156  DeferredReplacements.push_back(std::make_pair(Old, New));
3157 }
3158 
3160  llvm::Value *Callee,
3161  ReturnValueSlot ReturnValue,
3162  const CallArgList &CallArgs,
3163  CGCalleeInfo CalleeInfo,
3164  llvm::Instruction **callOrInvoke) {
3165  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3166 
3167  // Handle struct-return functions by passing a pointer to the
3168  // location that we would like to return into.
3169  QualType RetTy = CallInfo.getReturnType();
3170  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3171 
3172  llvm::FunctionType *IRFuncTy =
3173  cast<llvm::FunctionType>(
3174  cast<llvm::PointerType>(Callee->getType())->getElementType());
3175 
3176  // If we're using inalloca, insert the allocation after the stack save.
3177  // FIXME: Do this earlier rather than hacking it in here!
3178  Address ArgMemory = Address::invalid();
3179  const llvm::StructLayout *ArgMemoryLayout = nullptr;
3180  if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3181  ArgMemoryLayout = CGM.getDataLayout().getStructLayout(ArgStruct);
3182  llvm::Instruction *IP = CallArgs.getStackBase();
3183  llvm::AllocaInst *AI;
3184  if (IP) {
3185  IP = IP->getNextNode();
3186  AI = new llvm::AllocaInst(ArgStruct, "argmem", IP);
3187  } else {
3188  AI = CreateTempAlloca(ArgStruct, "argmem");
3189  }
3190  auto Align = CallInfo.getArgStructAlignment();
3191  AI->setAlignment(Align.getQuantity());
3192  AI->setUsedWithInAlloca(true);
3193  assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3194  ArgMemory = Address(AI, Align);
3195  }
3196 
3197  // Helper function to drill into the inalloca allocation.
3198  auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
3199  auto FieldOffset =
3200  CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
3201  return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
3202  };
3203 
3204  ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3205  SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3206 
3207  // If the call returns a temporary with struct return, create a temporary
3208  // alloca to hold the result, unless one is given to us.
3209  Address SRetPtr = Address::invalid();
3210  size_t UnusedReturnSize = 0;
3211  if (RetAI.isIndirect() || RetAI.isInAlloca()) {
3212  if (!ReturnValue.isNull()) {
3213  SRetPtr = ReturnValue.getValue();
3214  } else {
3215  SRetPtr = CreateMemTemp(RetTy);
3216  if (HaveInsertPoint() && ReturnValue.isUnused()) {
3217  uint64_t size =
3218  CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3219  if (EmitLifetimeStart(size, SRetPtr.getPointer()))
3220  UnusedReturnSize = size;
3221  }
3222  }
3223  if (IRFunctionArgs.hasSRetArg()) {
3224  IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3225  } else {
3226  Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
3227  Builder.CreateStore(SRetPtr.getPointer(), Addr);
3228  }
3229  }
3230 
3231  assert(CallInfo.arg_size() == CallArgs.size() &&
3232  "Mismatch between function signature & arguments.");
3233  unsigned ArgNo = 0;
3234  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3235  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3236  I != E; ++I, ++info_it, ++ArgNo) {
3237  const ABIArgInfo &ArgInfo = info_it->info;
3238  RValue RV = I->RV;
3239 
3240  // Insert a padding argument to ensure proper alignment.
3241  if (IRFunctionArgs.hasPaddingArg(ArgNo))
3242  IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3243  llvm::UndefValue::get(ArgInfo.getPaddingType());
3244 
3245  unsigned FirstIRArg, NumIRArgs;
3246  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3247 
3248  switch (ArgInfo.getKind()) {
3249  case ABIArgInfo::InAlloca: {
3250  assert(NumIRArgs == 0);
3251  assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3252  if (RV.isAggregate()) {
3253  // Replace the placeholder with the appropriate argument slot GEP.
3254  llvm::Instruction *Placeholder =
3255  cast<llvm::Instruction>(RV.getAggregatePointer());
3256  CGBuilderTy::InsertPoint IP = Builder.saveIP();
3257  Builder.SetInsertPoint(Placeholder);
3258  Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3259  Builder.restoreIP(IP);
3260  deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3261  } else {
3262  // Store the RValue into the argument struct.
3263  Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3264  unsigned AS = Addr.getType()->getPointerAddressSpace();
3265  llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3266  // There are some cases where a trivial bitcast is not avoidable. The
3267  // definition of a type later in a translation unit may change it's type
3268  // from {}* to (%struct.foo*)*.
3269  if (Addr.getType() != MemType)
3270  Addr = Builder.CreateBitCast(Addr, MemType);
3271  LValue argLV = MakeAddrLValue(Addr, I->Ty);
3272  EmitInitStoreOfNonAggregate(*this, RV, argLV);
3273  }
3274  break;
3275  }
3276 
3277  case ABIArgInfo::Indirect: {
3278  assert(NumIRArgs == 1);
3279  if (RV.isScalar() || RV.isComplex()) {
3280  // Make a temporary alloca to pass the argument.
3281  Address Addr = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign());
3282  IRCallArgs[FirstIRArg] = Addr.getPointer();
3283 
3284  LValue argLV = MakeAddrLValue(Addr, I->Ty);
3285  EmitInitStoreOfNonAggregate(*this, RV, argLV);
3286  } else {
3287  // We want to avoid creating an unnecessary temporary+copy here;
3288  // however, we need one in three cases:
3289  // 1. If the argument is not byval, and we are required to copy the
3290  // source. (This case doesn't occur on any common architecture.)
3291  // 2. If the argument is byval, RV is not sufficiently aligned, and
3292  // we cannot force it to be sufficiently aligned.
3293  // 3. If the argument is byval, but RV is located in an address space
3294  // different than that of the argument (0).
3295  Address Addr = RV.getAggregateAddress();
3296  CharUnits Align = ArgInfo.getIndirectAlign();
3297  const llvm::DataLayout *TD = &CGM.getDataLayout();
3298  const unsigned RVAddrSpace = Addr.getType()->getAddressSpace();
3299  const unsigned ArgAddrSpace =
3300  (FirstIRArg < IRFuncTy->getNumParams()
3301  ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
3302  : 0);
3303  if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
3304  (ArgInfo.getIndirectByVal() && Addr.getAlignment() < Align &&
3305  llvm::getOrEnforceKnownAlignment(Addr.getPointer(),
3306  Align.getQuantity(), *TD)
3307  < Align.getQuantity()) ||
3308  (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
3309  // Create an aligned temporary, and copy to it.
3310  Address AI = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign());
3311  IRCallArgs[FirstIRArg] = AI.getPointer();
3312  EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
3313  } else {
3314  // Skip the extra memcpy call.
3315  IRCallArgs[FirstIRArg] = Addr.getPointer();
3316  }
3317  }
3318  break;
3319  }
3320 
3321  case ABIArgInfo::Ignore:
3322  assert(NumIRArgs == 0);
3323  break;
3324 
3325  case ABIArgInfo::Extend:
3326  case ABIArgInfo::Direct: {
3327  if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
3328  ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
3329  ArgInfo.getDirectOffset() == 0) {
3330  assert(NumIRArgs == 1);
3331  llvm::Value *V;
3332  if (RV.isScalar())
3333  V = RV.getScalarVal();
3334  else
3335  V = Builder.CreateLoad(RV.getAggregateAddress());
3336 
3337  // We might have to widen integers, but we should never truncate.
3338  if (ArgInfo.getCoerceToType() != V->getType() &&
3339  V->getType()->isIntegerTy())
3340  V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
3341 
3342  // If the argument doesn't match, perform a bitcast to coerce it. This
3343  // can happen due to trivial type mismatches.
3344  if (FirstIRArg < IRFuncTy->getNumParams() &&
3345  V->getType() != IRFuncTy->getParamType(FirstIRArg))
3346  V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
3347  IRCallArgs[FirstIRArg] = V;
3348  break;
3349  }
3350 
3351  // FIXME: Avoid the conversion through memory if possible.
3352  Address Src = Address::invalid();
3353  if (RV.isScalar() || RV.isComplex()) {
3354  Src = CreateMemTemp(I->Ty, "coerce");
3355  LValue SrcLV = MakeAddrLValue(Src, I->Ty);
3356  EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
3357  } else {
3358  Src = RV.getAggregateAddress();
3359  }
3360 
3361  // If the value is offset in memory, apply the offset now.
3362  Src = emitAddressAtOffset(*this, Src, ArgInfo);
3363 
3364  // Fast-isel and the optimizer generally like scalar values better than
3365  // FCAs, so we flatten them if this is safe to do for this argument.
3366  llvm::StructType *STy =
3367  dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
3368  if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
3369  llvm::Type *SrcTy = Src.getType()->getElementType();
3370  uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
3371  uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
3372 
3373  // If the source type is smaller than the destination type of the
3374  // coerce-to logic, copy the source value into a temp alloca the size
3375  // of the destination type to allow loading all of it. The bits past
3376  // the source value are left undef.
3377  if (SrcSize < DstSize) {
3378  Address TempAlloca
3379  = CreateTempAlloca(STy, Src.getAlignment(),
3380  Src.getName() + ".coerce");
3381  Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
3382  Src = TempAlloca;
3383  } else {
3384  Src = Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(STy));
3385  }
3386 
3387  auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
3388  assert(NumIRArgs == STy->getNumElements());
3389  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3390  auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
3391  Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
3392  llvm::Value *LI = Builder.CreateLoad(EltPtr);
3393  IRCallArgs[FirstIRArg + i] = LI;
3394  }
3395  } else {
3396  // In the simple case, just pass the coerced loaded value.
3397  assert(NumIRArgs == 1);
3398  IRCallArgs[FirstIRArg] =
3399  CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
3400  }
3401 
3402  break;
3403  }
3404 
3405  case ABIArgInfo::Expand:
3406  unsigned IRArgPos = FirstIRArg;
3407  ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos);
3408  assert(IRArgPos == FirstIRArg + NumIRArgs);
3409  break;
3410  }
3411  }
3412 
3413  if (ArgMemory.isValid()) {
3414  llvm::Value *Arg = ArgMemory.getPointer();
3415  if (CallInfo.isVariadic()) {
3416  // When passing non-POD arguments by value to variadic functions, we will
3417  // end up with a variadic prototype and an inalloca call site. In such
3418  // cases, we can't do any parameter mismatch checks. Give up and bitcast
3419  // the callee.
3420  unsigned CalleeAS =
3421  cast<llvm::PointerType>(Callee->getType())->getAddressSpace();
3422  Callee = Builder.CreateBitCast(
3423  Callee, getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS));
3424  } else {
3425  llvm::Type *LastParamTy =
3426  IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
3427  if (Arg->getType() != LastParamTy) {
3428 #ifndef NDEBUG
3429  // Assert that these structs have equivalent element types.
3430  llvm::StructType *FullTy = CallInfo.getArgStruct();
3431  llvm::StructType *DeclaredTy = cast<llvm::StructType>(
3432  cast<llvm::PointerType>(LastParamTy)->getElementType());
3433  assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
3434  for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
3435  DE = DeclaredTy->element_end(),
3436  FI = FullTy->element_begin();
3437  DI != DE; ++DI, ++FI)
3438  assert(*DI == *FI);
3439 #endif
3440  Arg = Builder.CreateBitCast(Arg, LastParamTy);
3441  }
3442  }
3443  assert(IRFunctionArgs.hasInallocaArg());
3444  IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
3445  }
3446 
3447  if (!CallArgs.getCleanupsToDeactivate().empty())
3448  deactivateArgCleanupsBeforeCall(*this, CallArgs);
3449 
3450  // If the callee is a bitcast of a function to a varargs pointer to function
3451  // type, check to see if we can remove the bitcast. This handles some cases
3452  // with unprototyped functions.
3453  if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
3454  if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
3455  llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
3456  llvm::FunctionType *CurFT =
3457  cast<llvm::FunctionType>(CurPT->getElementType());
3458  llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
3459 
3460  if (CE->getOpcode() == llvm::Instruction::BitCast &&
3461  ActualFT->getReturnType() == CurFT->getReturnType() &&
3462  ActualFT->getNumParams() == CurFT->getNumParams() &&
3463  ActualFT->getNumParams() == IRCallArgs.size() &&
3464  (CurFT->isVarArg() || !ActualFT->isVarArg())) {
3465  bool ArgsMatch = true;
3466  for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
3467  if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
3468  ArgsMatch = false;
3469  break;
3470  }
3471 
3472  // Strip the cast if we can get away with it. This is a nice cleanup,
3473  // but also allows us to inline the function at -O0 if it is marked
3474  // always_inline.
3475  if (ArgsMatch)
3476  Callee = CalleeF;
3477  }
3478  }
3479 
3480  assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
3481  for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
3482  // Inalloca argument can have different type.
3483  if (IRFunctionArgs.hasInallocaArg() &&
3484  i == IRFunctionArgs.getInallocaArgNo())
3485  continue;
3486  if (i < IRFuncTy->getNumParams())
3487  assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
3488  }
3489 
3490  unsigned CallingConv;
3492  CGM.ConstructAttributeList(Callee->getName(), CallInfo, CalleeInfo,
3493  AttributeList, CallingConv,
3494  /*AttrOnCallSite=*/true);
3495  llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
3496  AttributeList);
3497 
3498  bool CannotThrow;
3499  if (currentFunctionUsesSEHTry()) {
3500  // SEH cares about asynchronous exceptions, everything can "throw."
3501  CannotThrow = false;
3502  } else if (isCleanupPadScope() &&
3504  // The MSVC++ personality will implicitly terminate the program if an
3505  // exception is thrown. An unwind edge cannot be reached.
3506  CannotThrow = true;
3507  } else {
3508  // Otherwise, nowunind callsites will never throw.
3509  CannotThrow = Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
3510  llvm::Attribute::NoUnwind);
3511  }
3512  llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
3513 
3515  getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList);
3516 
3517  llvm::CallSite CS;
3518  if (!InvokeDest) {
3519  CS = Builder.CreateCall(Callee, IRCallArgs, BundleList);
3520  } else {
3521  llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
3522  CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs,
3523  BundleList);
3524  EmitBlock(Cont);
3525  }
3526  if (callOrInvoke)
3527  *callOrInvoke = CS.getInstruction();
3528 
3529  if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
3530  !CS.hasFnAttr(llvm::Attribute::NoInline))
3531  Attrs =
3532  Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
3533  llvm::Attribute::AlwaysInline);
3534 
3535  // Disable inlining inside SEH __try blocks.
3536  if (isSEHTryScope())
3537  Attrs =
3538  Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
3539  llvm::Attribute::NoInline);
3540 
3541  CS.setAttributes(Attrs);
3542  CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
3543 
3544  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3545  // optimizer it can aggressively ignore unwind edges.
3546  if (CGM.getLangOpts().ObjCAutoRefCount)
3547  AddObjCARCExceptionMetadata(CS.getInstruction());
3548 
3549  // If the call doesn't return, finish the basic block and clear the
3550  // insertion point; this allows the rest of IRgen to discard
3551  // unreachable code.
3552  if (CS.doesNotReturn()) {
3553  if (UnusedReturnSize)
3554  EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
3555  SRetPtr.getPointer());
3556 
3557  Builder.CreateUnreachable();
3558  Builder.ClearInsertionPoint();
3559 
3560  // FIXME: For now, emit a dummy basic block because expr emitters in
3561  // generally are not ready to handle emitting expressions at unreachable
3562  // points.
3563  EnsureInsertPoint();
3564 
3565  // Return a reasonable RValue.
3566  return GetUndefRValue(RetTy);
3567  }
3568 
3569  llvm::Instruction *CI = CS.getInstruction();
3570  if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
3571  CI->setName("call");
3572 
3573  // Emit any writebacks immediately. Arguably this should happen
3574  // after any return-value munging.
3575  if (CallArgs.hasWritebacks())
3576  emitWritebacks(*this, CallArgs);
3577 
3578  // The stack cleanup for inalloca arguments has to run out of the normal
3579  // lexical order, so deactivate it and run it manually here.
3580  CallArgs.freeArgumentMemory(*this);
3581 
3582  if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
3583  const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
3584  if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
3585  Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
3586  }
3587 
3588  RValue Ret = [&] {
3589  switch (RetAI.getKind()) {
3590  case ABIArgInfo::InAlloca:
3591  case ABIArgInfo::Indirect: {
3592  RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
3593  if (UnusedReturnSize)
3594  EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
3595  SRetPtr.getPointer());
3596  return ret;
3597  }
3598 
3599  case ABIArgInfo::Ignore:
3600  // If we are ignoring an argument that had a result, make sure to
3601  // construct the appropriate return value for our caller.
3602  return GetUndefRValue(RetTy);
3603 
3604  case ABIArgInfo::Extend:
3605  case ABIArgInfo::Direct: {
3606  llvm::Type *RetIRTy = ConvertType(RetTy);
3607  if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
3608  switch (getEvaluationKind(RetTy)) {
3609  case TEK_Complex: {
3610  llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
3611  llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
3612  return RValue::getComplex(std::make_pair(Real, Imag));
3613  }
3614  case TEK_Aggregate: {
3615  Address DestPtr = ReturnValue.getValue();
3616  bool DestIsVolatile = ReturnValue.isVolatile();
3617 
3618  if (!DestPtr.isValid()) {
3619  DestPtr = CreateMemTemp(RetTy, "agg.tmp");
3620  DestIsVolatile = false;
3621  }
3622  BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
3623  return RValue::getAggregate(DestPtr);
3624  }
3625  case TEK_Scalar: {
3626  // If the argument doesn't match, perform a bitcast to coerce it. This
3627  // can happen due to trivial type mismatches.
3628  llvm::Value *V = CI;
3629  if (V->getType() != RetIRTy)
3630  V = Builder.CreateBitCast(V, RetIRTy);
3631  return RValue::get(V);
3632  }
3633  }
3634  llvm_unreachable("bad evaluation kind");
3635  }
3636 
3637  Address DestPtr = ReturnValue.getValue();
3638  bool DestIsVolatile = ReturnValue.isVolatile();
3639 
3640  if (!DestPtr.isValid()) {
3641  DestPtr = CreateMemTemp(RetTy, "coerce");
3642  DestIsVolatile = false;
3643  }
3644 
3645  // If the value is offset in memory, apply the offset now.
3646  Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
3647  CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
3648 
3649  return convertTempToRValue(DestPtr, RetTy, SourceLocation());
3650  }
3651 
3652  case ABIArgInfo::Expand:
3653  llvm_unreachable("Invalid ABI kind for return argument");
3654  }
3655 
3656  llvm_unreachable("Unhandled ABIArgInfo::Kind");
3657  } ();
3658 
3659  const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
3660 
3661  if (Ret.isScalar() && TargetDecl) {
3662  if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
3663  llvm::Value *OffsetValue = nullptr;
3664  if (const auto *Offset = AA->getOffset())
3665  OffsetValue = EmitScalarExpr(Offset);
3666 
3667  llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
3668  llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
3669  EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
3670  OffsetValue);
3671  }
3672  }
3673 
3674  return Ret;
3675 }
3676 
3677 /* VarArg handling */
3678 
3680  VAListAddr = VE->isMicrosoftABI()
3681  ? EmitMSVAListRef(VE->getSubExpr())
3682  : EmitVAListRef(VE->getSubExpr());
3683  QualType Ty = VE->getType();
3684  if (VE->isMicrosoftABI())
3685  return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
3686  return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
3687 }
static CanQual< Type > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
ObjCIndirectCopyRestoreExpr - Represents the passing of a function argument by indirect copy-restore ...
Definition: ExprObjC.h:1464
Ignore - Ignore the argument (treat as void).
ReturnValueSlot - Contains the address where the return value of a function can be stored...
Definition: CGCall.h:151
bool useObjCFP2RetForComplexLongDouble() const
Check whether _Complex long double should use the "fp2ret" flavor of Objective-C message passing on t...
ExprObjectKind getObjectKind() const
getObjectKind - The object kind that this expression produces.
Definition: Expr.h:407
void getExpandedTypes(QualType Ty, SmallVectorImpl< llvm::Type * >::iterator &TI)
getExpandedTypes - Expand the type
Definition: CGCall.cpp:738
FunctionDecl - An instance of this class is created to represent a function declaration or definition...
Definition: Decl.h:1483
CK_LValueToRValue - A conversion which causes the extraction of an r-value from the operand gl-value...
void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, const FunctionDecl *CalleeDecl=nullptr, unsigned ParamsToSkip=0)
EmitCallArgs - Emit call arguments for a function.
StringRef getName() const
getName - Get the name of identifier for this declaration as a StringRef.
Definition: Decl.h:169
ObjCEntrypoints & getObjCEntrypoints() const
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2147
CanQualType VoidPtrTy
Definition: ASTContext.h:895
A (possibly-)qualified type.
Definition: Type.h:575
bool useObjCFPRetForRealType(RealType T) const
Check whether the given real type should use the "fpret" flavor of Objective-C message passing on thi...
bool isCanonicalAsParam() const
Determines if this canonical type is furthermore canonical as a parameter.
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI)
Return true iff the given type uses 'sret' when used as a return type.
Definition: CGCall.cpp:1250
llvm::Type * ConvertTypeForMem(QualType T)
CanQualType getReturnType() const
ExtInfo withCallingConv(CallingConv cc) const
Definition: Type.h:2954
static CanQual< FunctionProtoType > GetFormalType(const CXXMethodDecl *MD)
Returns the canonical formal type of the given C++ method.
Definition: CGCall.cpp:71
CXXCtorType getCtorType() const
Definition: GlobalDecl.h:62
unsigned getInAllocaFieldIndex() const
static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback)
Emit the actual writing-back of a writeback.
Definition: CGCall.cpp:2567
static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, CharUnits MinAlign)
Create a temporary allocation for the purposes of coercion.
Definition: CGCall.cpp:865
static llvm::Value * emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Emit an ARC autorelease of the result of a function.
Definition: CGCall.cpp:2283
static const CGFunctionInfo & arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, CodeGenModule &CGM, const CallArgList &args, const FunctionType *fnType, unsigned numExtraRequiredArgs, bool chainCall)
Arrange a call as unto a free function, except possibly with an additional number of formal parameter...
Definition: CGCall.cpp:403
CanQual< T > getUnqualifiedType() const
Retrieve the unqualified form of this type.
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:2847
const TargetInfo & getTarget() const
const CGFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > Ty, const FunctionDecl *FD)
Arrange the argument and result information for a value of the given freestanding function type...
Definition: CGCall.cpp:140
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:65
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, const FunctionDecl *FD, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
Definition: CGCall.cpp:2804
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateTempAlloca - This creates a alloca and inserts it into the entry block.
Definition: CGExpr.cpp:66
Extend - Valid only for integer argument types.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:171
Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr)
Generate code to get an argument from the passed in pointer and update it accordingly.
Definition: CGCall.cpp:3679
static bool isProvablyNull(llvm::Value *addr)
Definition: CGCall.cpp:2558
llvm::LoadInst * CreateDefaultAlignedLoad(llvm::Value *Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:135
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:77
Address getAddress() const
Definition: CGValue.h:331
const CGFunctionInfo & arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD)
Arrange the argument and result information for a call to an unknown C++ non-static member function o...
Definition: CGCall.cpp:186
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
CGCXXABI & getCXXABI() const
Definition: CodeGenTypes.h:175
bool hasNonTrivialDestructor() const
Determine whether this class has a non-trivial destructor (C++ [class.dtor]p3)
Definition: DeclCXX.h:1269
bool hasFlexibleArrayMember() const
Definition: Decl.h:3218
ASTContext & getContext() const
Definition: CodeGenTypes.h:172
const llvm::DataLayout & getDataLayout() const
The base class of the type hierarchy.
Definition: Type.h:1249
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition: CGExpr.cpp:1568
RValue asAggregateRValue() const
Definition: CGValue.h:435
static const NonNullAttr * getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo)
Returns the attribute (either parameter attribute, or function attribute), which declares argument Ar...
Definition: CGCall.cpp:1787
static CGFunctionInfo * create(unsigned llvmCC, bool instanceMethod, bool chainCall, const FunctionType::ExtInfo &extInfo, CanQualType resultType, ArrayRef< CanQualType > argTypes, RequiredArgs required)
Definition: CGCall.cpp:565
static int getExpansionSize(QualType Ty, const ASTContext &Context)
Definition: CGCall.cpp:718
bool isFuncTypeConvertible(const FunctionType *FT)
isFuncTypeConvertible - Utility to check whether a function type can be converted to an LLVM type (i...
bool isBlockPointerType() const
Definition: Type.h:5311
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
Retain the given object, with normal retain semantics.
Definition: CGObjC.cpp:1948
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
const CGFunctionInfo & arrangeFreeFunctionDeclaration(QualType ResTy, const FunctionArgList &Args, const FunctionType::ExtInfo &Info, bool isVariadic)
Definition: CGCall.cpp:490
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2134
bool hasUnsignedIntegerRepresentation() const
Determine whether this type has an unsigned integer representation of some sort, e.g., it is an unsigned integer type or a vector.
Definition: Type.cpp:1770
static llvm::Value * emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value)
An argument came in as a promoted argument; demote it back to its declared type.
Definition: CGCall.cpp:1767
void freeArgumentMemory(CodeGenFunction &CGF) const
Definition: CGCall.cpp:2796
Default closure variant of a ctor.
Definition: ABI.h:30
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
VarDecl - An instance of this class is created to represent a variable declaration or definition...
Definition: Decl.h:699
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:52
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
bool areArgsDestroyedLeftToRightInCallee() const
Are arguments to a call destroyed left to right in the callee? This is a fundamental language change...
Definition: TargetCXXABI.h:217
static llvm::Value * CreateCoercedLoad(Address Src, llvm::Type *Ty, CodeGenFunction &CGF)
CreateCoercedLoad - Create a load from.
Definition: CGCall.cpp:969
void setCoerceToType(llvm::Type *T)
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:1793
ObjCMethodDecl - Represents an instance or class method declaration.
Definition: DeclObjC.h:113
static unsigned ClangCallConvToLLVMCallConv(CallingConv CC)
Definition: CGCall.cpp:42
static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src, LValue Dst)
Store a non-aggregate value to an address to initialize it.
Definition: CGCall.cpp:3146
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:247
const CGFunctionInfo & arrangeCXXStructorDeclaration(const CXXMethodDecl *MD, StructorType Type)
Definition: CGCall.cpp:223
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
ParmVarDecl - Represents a parameter to a function.
Definition: Decl.h:1299
bool isObjCRetainableType() const
Definition: Type.cpp:3704
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant...
Definition: Expr.cpp:3257
static bool isProvablyNonNull(llvm::Value *addr)
Definition: CGCall.cpp:2562
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
Definition: CGCXXABI.h:107
RecordDecl - Represents a struct/union/class.
Definition: Decl.h:3166
const_arg_iterator arg_end() const
An object to manage conditionally-evaluated expressions.
ExtInfo withProducesResult(bool producesResult) const
Definition: Type.h:2941
static void emitWritebacks(CodeGenFunction &CGF, const CallArgList &args)
Definition: CGCall.cpp:2632
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
Definition: CGCall.cpp:2363
class LLVM_ALIGNAS(8) DependentTemplateSpecializationType const IdentifierInfo * Name
Represents a template specialization type whose template cannot be resolved, e.g. ...
Definition: Type.h:4381
bool hasAttr() const
Definition: DeclBase.h:498
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:91
llvm::Constant * objc_retainAutoreleasedReturnValue
id objc_retainAutoreleasedReturnValue(id);
bool isReferenceType() const
Definition: Type.h:5314
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
Do a fused retain/autorelease of the given object.
Definition: CGObjC.cpp:2162
FieldDecl - An instance of this class is created by Sema::ActOnField to represent a member of a struc...
Definition: Decl.h:2209
bool isAnyPointerType() const
Definition: Type.h:5308
StructorType getFromDtorType(CXXDtorType T)
Definition: CodeGenTypes.h:104
llvm::CallInst * EmitRuntimeCall(llvm::Value *callee, const Twine &name="")
bool getInAllocaSRet() const
Return true if this field of an inalloca struct should be returned to implement a struct return calli...
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:81
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
Autorelease the given object.
Definition: CGObjC.cpp:2152
param_range params()
Definition: DeclObjC.h:354
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE)
Emit an argument that's being passed call-by-writeback.
Definition: CGCall.cpp:2661
const CGFunctionInfo & arrangeLLVMFunctionInfo(CanQualType returnType, bool instanceMethod, bool chainCall, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, RequiredArgs args)
"Arrange" the LLVM information for a call or type with the given signature.
Definition: CGCall.cpp:515
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
Definition: CGCall.cpp:3122
static const EHPersonality & get(CodeGenModule &CGM, const FunctionDecl *FD)
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
llvm::Type * getCoerceToType() const
RValue EmitCall(const CGFunctionInfo &FnInfo, llvm::Value *Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, CGCalleeInfo CalleeInfo=CGCalleeInfo(), llvm::Instruction **callOrInvoke=nullptr)
EmitCall - Generate a call of the given function, expecting the given result type, and using the given argument list which specifies both the LLVM arguments and the types they were derived from.
Definition: CGCall.cpp:3159
unsigned getRegParm() const
Definition: Type.h:2916
i32 captured_struct **param SharedsTy A type which contains references the shared variables *param Shareds Context with the list of shared variables from the p *TaskFunction *param IfCond Not a nullptr if if clause was nullptr *otherwise *param PrivateVars List of references to private variables for the task *directive *param PrivateCopies List of private copies for each private variable in *p PrivateVars *param FirstprivateVars List of references to private variables for the *task directive *param FirstprivateCopies List of private copies for each private variable *in p FirstprivateVars *param FirstprivateInits List of references to auto generated variables *used for initialization of a single array element Used if firstprivate *variable is of array type *param Dependences List of dependences for the task construct
const Decl * getDecl() const
Definition: GlobalDecl.h:60
bool shouldCopy() const
shouldCopy - True if we should do the 'copy' part of the copy-restore.
Definition: ExprObjC.h:1494
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP)
Definition: CGCall.h:106
T * getAttr() const
Definition: DeclBase.h:495
unsigned getEffectiveCallingConvention() const
getEffectiveCallingConvention - Return the actual calling convention to use, which may depend on the ...
void EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, ArrayRef< llvm::Value * > args)
Emits a call or invoke to the given noreturn runtime function.
Definition: CGCall.cpp:3079
CharUnits getArgStructAlignment() const
const FunctionProtoType * getCalleeFunctionProtoType()
static bool hasScalarEvaluationKind(QualType T)
CharUnits getAlignment() const
Definition: CGValue.h:316
static llvm::Value * tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Try to emit a fused autorelease of a return result.
Definition: CGCall.cpp:2155
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:580
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
Definition: CGBuilder.h:176
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
uint32_t Offset
Definition: CacheTokens.cpp:44
Qualifiers::ObjCLifetime getObjCLifetime() const
Definition: CGValue.h:260
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed...
QualType getReturnType() const
Definition: Type.h:2977
virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const
Emit the target dependent code to load a value of.
const CXXRecordDecl * getParent() const
Returns the parent of this method declaration, which is the class in which this method is defined...
Definition: DeclCXX.h:1801
field_range fields() const
Definition: Decl.h:3295
const Expr * getSubExpr() const
Definition: Expr.h:3651
QualType getIntPtrType() const
Return a type compatible with "intptr_t" (C99 7.18.1.4), as defined by the target.
virtual CodeGen::Address EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const =0
EmitVAArg - Emit the target dependent code to load a value of.
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:272
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
const CGFunctionInfo & arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType)
Arrange the argument and result information for the function type through which to perform a send to ...
Definition: CGCall.cpp:325
bool isVariadic() const
Whether this function is variadic.
Definition: Decl.cpp:2454
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types...
Definition: Type.cpp:1886
const CGFunctionInfo & arrangeCXXMethodDeclaration(const CXXMethodDecl *MD)
Arrange the argument and result information for a declaration or definition of the given C++ non-stat...
Definition: CGCall.cpp:207
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
An ordinary object is located at an address in memory.
Definition: Specifiers.h:118
bool isValid() const
Definition: Address.h:36
detail::InMemoryDirectory::const_iterator I
QualType getCanonicalTypeInternal() const
Definition: Type.h:1973
QualType getType() const
Definition: Decl.h:530
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
Definition: CGCall.cpp:2920
const CGFunctionInfo & arrangeGlobalDeclaration(GlobalDecl GD)
Definition: CGCall.cpp:352
static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD)
Derives the 'this' type for codegen purposes, i.e.
Definition: CGCall.cpp:65
Represents a K&R-style 'int foo()' function, which has no information available about its arguments...
Definition: Type.h:3007
unsigned getNumRequiredArgs() const
llvm::CallSite EmitRuntimeCallOrInvoke(llvm::Value *callee, ArrayRef< llvm::Value * > args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
Definition: CGCall.cpp:3111
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource AlignSource=AlignmentSource::Type)
bool isUnion() const
Definition: Decl.h:2856
ExtInfo getExtInfo() const
Definition: Type.h:2986
CanQualType getCanonicalTypeUnqualified() const
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Represents a prototype with parameter type info, e.g.
Definition: Type.h:3041
ExceptionSpecificationType getExceptionSpecType() const
Get the kind of exception specification on this function.
Definition: Type.h:3193
llvm::CallInst * EmitNounwindRuntimeCall(llvm::Value *callee, const Twine &name="")
const TargetCodeGenInfo & getTargetCodeGenInfo()
writeback_const_range writebacks() const
Definition: CGCall.h:102
const TargetInfo & getTarget() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Definition: CGValue.h:38
ArrayRef< CallArgCleanup > getCleanupsToDeactivate() const
Definition: CGCall.h:114
static void appendParameterTypes(const CodeGenTypes &CGT, SmallVectorImpl< CanQualType > &prefix, const CanQual< FunctionProtoType > &FPT, const FunctionDecl *FD)
Adds the formal paramaters in FPT to the given prefix.
Definition: CGCall.cpp:98
ASTContext * Context
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse)
Definition: CGCall.h:91
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
Definition: CGCall.cpp:2529
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:3633
ID
Defines the set of possible language-specific address spaces.
Definition: AddressSpaces.h:27
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee...
Definition: Type.cpp:415
Address Temporary
The temporary alloca.
Definition: CGCall.h:67
llvm::Value * ToUse
A value to "use" after the writeback, or null.
Definition: CGCall.h:70
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition: Type.cpp:1716
llvm::Value * getPointer() const
Definition: Address.h:38
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty)
Definition: CGCall.cpp:2509
Expr - This represents one expression.
Definition: Expr.h:104
CXXDtorType getDtorType() const
Definition: GlobalDecl.h:67
static Address invalid()
Definition: Address.h:35
bool isInstance() const
Definition: DeclCXX.h:1728
bool isAggregate() const
Definition: CGValue.h:53
CGCXXABI & getCXXABI() const
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
Definition: CGCall.cpp:2504
static CanQualType GetReturnType(QualType RetTy)
Returns the "extra-canonicalized" return type, which discards qualifiers on the return type...
Definition: CGCall.cpp:80
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource AlignSource=AlignmentSource::Type, llvm::MDNode *TBAAInfo=nullptr, bool isInit=false, QualType TBAABaseTy=QualType(), uint64_t TBAAOffset=0, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
Definition: CGExpr.cpp:1348
const CGFunctionInfo & arrangeNullaryFunction()
getNullaryFunctionInfo - Get the function info for a void() function with standard CC...
Definition: CGCall.cpp:505
bool isVirtual() const
Definition: DeclCXX.h:1745
bool usesInAlloca() const
Return true if this function uses inalloca arguments.
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2345
llvm::Constant * objc_retain
id objc_retain(id);
CharUnits getIndirectAlign() const
RValue asRValue() const
Definition: CGValue.h:578
bool isMSVCXXPersonality() const
Definition: CGCleanup.h:637
const ParmVarDecl * getParamDecl(unsigned i) const
Definition: Decl.h:1927
bool getNoReturn() const
Definition: Type.h:2913
ASTContext & getContext() const
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
ImplicitParamDecl * getSelfDecl() const
Definition: DeclObjC.h:411
void add(RValue rvalue, QualType type, bool needscopy=false)
Definition: CGCall.h:81
static void eraseUnusedBitCasts(llvm::Instruction *insn)
Definition: CGCall.cpp:2143
A class for recording the number of arguments that a function signature requires. ...
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI)
Return true iff the given type uses an argument slot when 'sret' is used as a return type...
Definition: CGCall.cpp:1254
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, const ABIArgInfo &info)
Definition: CGCall.cpp:1098
Address EmitPointerWithAlignment(const Expr *Addr, AlignmentSource *Source=nullptr)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition: CGExpr.cpp:795
static llvm::StoreInst * findDominatingStoreToReturnValue(CodeGenFunction &CGF)
Heuristically search for a dominating store to the return-value slot.
Definition: CGCall.cpp:2301
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns 'th...
Definition: CGCXXABI.h:105
void Profile(llvm::FoldingSetNodeID &ID)
UnaryOperator - This represents the unary-expression's (except sizeof and alignof), the postinc/postdec operators from postfix-expression, and various extensions.
Definition: Expr.h:1654
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys=None)
class LLVM_ALIGNAS(8) TemplateSpecializationType unsigned NumArgs
Represents a type template specialization; the template must be a class template, a type alias templa...
Definition: Type.h:3988
bool isGLValue() const
Definition: Expr.h:249
llvm::Type * getPaddingType() const
ARCPreciseLifetime_t isARCPreciseLifetime() const
Definition: CGValue.h:279
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, IsZeroed_t isZeroed=IsNotZeroed)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:502
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition: Type.cpp:1756
static llvm::Value * CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF)
CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both are either integers or p...
Definition: CGCall.cpp:915
CallingConv
CallingConv - Specifies the calling convention that a function uses.
Definition: Specifiers.h:228
bool hasWritebacks() const
Definition: CGCall.h:97
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:28
static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, Address Dest, bool DestIsVolatile)
Definition: CGCall.cpp:1020
bool isVolatile() const
Definition: CGValue.h:295
std::string CPU
If given, the name of the target CPU to generate code for.
Definition: TargetOptions.h:31
The l-value was considered opaque, so the alignment was determined from a type.
bool isNothrow(const ASTContext &Ctx, bool ResultIfDependent=false) const
Determine whether this function type has a non-throwing exception specification.
Definition: Type.cpp:2783
static void CreateCoercedStore(llvm::Value *Src, Address Dst, bool DstIsVolatile, CodeGenFunction &CGF)
CreateCoercedStore - Create a store to.
Definition: CGCall.cpp:1045
Enumerates target-specific builtins in their own namespaces within namespace clang.
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:168
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
Assigning into this object requires the old value to be released and the new value to be retained...
Definition: Type.h:144
Kind
bool ReturnTypeUsesFPRet(QualType ResultType)
Return true iff the given type uses 'fpret' when used as a return type.
Definition: CGCall.cpp:1259
Address getValue() const
Definition: CGCall.h:171
bool isSimple() const
Definition: CGValue.h:246
ConstExprIterator const_arg_iterator
Definition: Expr.h:2220
static const Expr * maybeGetUnaryAddrOfOperand(const Expr *E)
Definition: CGCall.cpp:2650
ASTContext & getContext() const
Encodes a location in the source.
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Given a number of pointers, inform the optimizer that they're being intrinsically used up until this ...
Definition: CGObjC.cpp:1791
unsigned getNumParams() const
getNumParams - Return the number of parameters this function must have based on its FunctionType...
Definition: Decl.cpp:2743
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
Release the given object.
Definition: CGObjC.cpp:2036
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource AlignSource=AlignmentSource::Type, llvm::MDNode *TBAAInfo=nullptr, QualType TBAABaseTy=QualType(), uint64_t TBAAOffset=0, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
Definition: CGExpr.cpp:1236
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6.7.5p3.
Definition: Type.cpp:1876
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
Definition: CGCleanup.cpp:1176
An aggregate value slot.
Definition: CGValue.h:441
bool isVariadic() const
Definition: DeclObjC.h:421
const CGFunctionInfo & arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD)
Arrange the argument and result information for the declaration or definition of an Objective-C metho...
Definition: CGCall.cpp:312
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:1701
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
CanQualType getCanonicalParamType(QualType T) const
Return the canonical parameter type corresponding to the specific potentially non-canonical one...
void allocateArgumentMemory(CodeGenFunction &CGF)
Definition: CGCall.cpp:2788
const ConstantArrayType * getAsConstantArrayType(QualType T) const
Definition: ASTContext.h:2094
std::vector< std::string > Features
The list of target specific features to enable or disable – this should be a list of strings starting...
Definition: TargetOptions.h:47
static bool classof(const EHScope *Scope)
Definition: CGCleanup.h:419
Specifies that a value-dependent expression should be considered to never be a null pointer constant...
Definition: Expr.h:688
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:190
CanQualType VoidTy
Definition: ASTContext.h:881
llvm::InlineAsm * retainAutoreleasedReturnValueMarker
A void(void) inline asm to use to mark that the return value of a call will be immediately retain...
const CodeGenOptions & getCodeGenOpts() const
An aligned address.
Definition: Address.h:25
const LangOptions & getLangOpts() const
llvm::LLVMContext & getLLVMContext()
Definition: CodeGenTypes.h:176
QualType getReturnType() const
Definition: DeclObjC.h:330
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:5706
Complete object dtor.
Definition: ABI.h:36
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
bool ReturnTypeUsesFP2Ret(QualType ResultType)
Return true iff the given type uses 'fp2ret' when used as a return type.
Definition: CGCall.cpp:1276
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, llvm::AttrBuilder &FuncAttrs, const FunctionProtoType *FPT)
Definition: CGCall.cpp:1423
llvm::StringRef getName() const
Return the IR name of the pointer value.
Definition: Address.h:62
const CGFunctionInfo & arrangeCXXMethodCall(const CallArgList &args, const FunctionProtoType *type, RequiredArgs required)
Arrange a call to a C++ method, passing the given arguments.
Definition: CGCall.cpp:476
CXXCtorType
C++ constructor types.
Definition: ABI.h:25
const CGFunctionInfo & arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *type)
A block function call is essentially a free-function call with an extra implicit argument.
Definition: CGCall.cpp:454
void setExternallyDestructed(bool destructed=true)
Definition: CGValue.h:536
QualType getObjCSelType() const
Retrieve the type that corresponds to the predefined Objective-C 'SEL' type.
Definition: ASTContext.h:1603
static Address EnterStructPointerForCoercedAccess(Address SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF)
EnterStructPointerForCoercedAccess - Given a struct pointer that we are accessing some number of byte...
Definition: CGCall.cpp:879
FunctionArgList - Type for representing both the decl and type of parameters to a function...
Definition: CGCall.h:146
QualType getType() const
Definition: Expr.h:125
CGFunctionInfo - Class to encapsulate the information about a function definition.
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:67
This class organizes the cross-function state that is used while generating LLVM code.
bool canHaveCoerceToType() const
static const Type * getElementType(const Expr *BaseExpr)
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This)
Definition: CGClass.cpp:2269
llvm::Value * getAggregatePointer() const
Definition: CGValue.h:75
bool isScalar() const
Definition: CGValue.h:51
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition: CGValue.h:92
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:116
unsigned getDirectOffset() const
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
Definition: DeclCXX.cpp:1308
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
Definition: ASTMatchers.h:1723
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:58
CodeGenFunction::ComplexPairTy ComplexPairTy
Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset, const llvm::Twine &Name="")
Definition: CGBuilder.h:191
CXXDtorType toCXXDtorType(StructorType T)
Definition: CodeGenTypes.h:92
virtual void buildStructorSignature(const CXXMethodDecl *MD, StructorType T, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters...
LValue Source
The original argument.
Definition: CGCall.h:64
const CGFunctionInfo & arrangeFunctionDeclaration(const FunctionDecl *FD)
Arrange the argument and result information for the declaration or definition of the given function...
Definition: CGCall.cpp:287
bool getProducesResult() const
Definition: Type.h:2914
static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref< void(Address)> Fn)
Definition: CGCall.cpp:760
static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > FTP, const FunctionDecl *FD)
Arrange the LLVM function layout for a value of the given function type, on top of any implicit param...
Definition: CGCall.cpp:124
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:78
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CallingConv getCC() const
Definition: Type.h:2922
SourceLocation getLocStart() const LLVM_READONLY
Definition: Decl.h:624
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
detail::InMemoryDirectory::const_iterator E
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:121
static void getBundlesForFunclet(llvm::Value *Callee, llvm::Instruction *CurrentFuncletPad, SmallVectorImpl< llvm::OperandBundleDef > &BundleList)
Definition: CGCall.cpp:3063
const VariableArrayType * getAsVariableArrayType(QualType T) const
Definition: ASTContext.h:2097
static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype, unsigned additional)
Compute the arguments required by the given formal prototype, given that there may be some additional...
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:44
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:3544
Complex values, per C99 6.2.5p11.
Definition: Type.h:2087
StructorType getFromCtorType(CXXCtorType T)
Definition: CodeGenTypes.h:77
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:5675
TargetOptions & getTargetOpts() const
Retrieve the target options.
Pass it using the normal C aggregate rules for the ABI, potentially introducing extra copies and pass...
Definition: CGCXXABI.h:117
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
static llvm::Value * tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result)
If this is a +1 of the value of an immutable 'self', remove it.
Definition: CGCall.cpp:2244
Implements C++ ABI-specific code generation functions.
Definition: CGCXXABI.h:43
This class organizes the cross-module state that is used while lowering AST types to LLVM types...
Definition: CodeGenTypes.h:120
Expand - Only valid for aggregate argument types.
Address getAddress() const
Definition: CGValue.h:562
const CGFunctionInfo & arrangeMSMemberPointerThunk(const CXXMethodDecl *MD)
Arrange a thunk that takes 'this' as the first parameter followed by varargs.
Definition: CGCall.cpp:371
Base for LValueReferenceType and RValueReferenceType.
Definition: Type.h:2287
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1522
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Definition: Decl.h:1759
static std::unique_ptr< TypeExpansion > getTypeExpansion(QualType Ty, const ASTContext &Context)
Definition: CGCall.cpp:661
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
Definition: Type.h:5153
bool isComplex() const
Definition: CGValue.h:52
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:367
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl...
Represents a base class of a C++ class.
Definition: DeclCXX.h:157
llvm::MDNode * getNoObjCARCExceptionsMetadata()
Pass it on the stack using its defined layout.
Definition: CGCXXABI.h:122
uint64_t getPointerWidth(unsigned AddrSpace) const
Return the width of pointers on this target, for the specified address space.
llvm::Type * GetFunctionTypeForVTable(GlobalDecl GD)
GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable, given a CXXMethodDecl...
Definition: CGCall.cpp:1407
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate.h) and friends (in DeclFriend.h).
Represents a C++ struct/union/class.
Definition: DeclCXX.h:285
BoundNodesTreeBuilder *const Builder
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
virtual const CXXRecordDecl * getThisArgumentTypeForMethod(const CXXMethodDecl *MD)
Get the type of the implicit "this" parameter used by a method.
Definition: CGCXXABI.h:303
bool isUsingInAlloca() const
Returns if we're using an inalloca struct to pass arguments in memory.
Definition: CGCall.h:124
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
Definition: DeclBase.h:482
llvm::Type * ConvertType(QualType T)
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function...
Definition: CGCall.cpp:1813
CallingConv getDefaultCallingConvention(bool isVariadic, bool IsCXXMethod) const
Retrieves the default calling convention for the current target.
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
LValue EmitLValue(const Expr *E)
EmitLValue - Emit code to compute a designator that specifies the location of the expression...
Definition: CGExpr.cpp:944
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
llvm::Instruction * getStackBase() const
Definition: CGCall.h:119
This class is used for builtin types like 'int'.
Definition: Type.h:2011
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:70
const TargetInfo & getTarget() const
Definition: CodeGenTypes.h:174
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
Definition: CanonicalType.h:70
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition: CGExpr.cpp:1415
Copying closure variant of a ctor.
Definition: ABI.h:29
Defines the clang::TargetInfo interface.
const_arg_iterator arg_begin() const
static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows)
Definition: CGCall.cpp:147
bool getHasRegParm() const
Definition: Type.h:2915
const CGFunctionInfo & arrangeMSCtorClosure(const CXXConstructorDecl *CD, CXXCtorType CT)
Definition: CGCall.cpp:381
CanQualType IntTy
Definition: ASTContext.h:889
bool isMicrosoftABI() const
Returns whether this is really a Win64 ABI va_arg expression.
Definition: Expr.h:3656
unsigned getTargetAddressSpace(QualType T) const
Definition: ASTContext.h:2180
static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, const CallArgList &CallArgs)
Definition: CGCall.cpp:2638
const ABIInfo & getABIInfo() const
Definition: CodeGenTypes.h:173
static RValue get(llvm::Value *V)
Definition: CGValue.h:85
bool isVolatileQualified() const
Definition: CGValue.h:55
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments...
Definition: CGCall.cpp:444
CXXCtorType toCXXCtorType(StructorType T)
Definition: CodeGenTypes.h:65
bool getIndirectRealign() const
static RValue getAggregate(Address addr, bool isVolatile=false)
Definition: CGValue.h:106
bool isUnresolvedExceptionSpec(ExceptionSpecificationType ESpecType)
LValue - This represents an lvalue references.
Definition: CGValue.h:152
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
Definition: DeclCXX.h:728
Information for lazily generating a cleanup.
Definition: EHScopeStack.h:144
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g., it is an signed integer type or a vector.
Definition: Type.cpp:1730
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info, CGCalleeInfo CalleeInfo, AttributeListType &PAL, unsigned &CallingConv, bool AttrOnCallSite)
Get the LLVM attributes and calling convention to use for a particular function type.
Definition: CGCall.cpp:1434
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:56
Represents the canonical version of C arrays with a specified constant size.
Definition: Type.h:2480
CGCalleeInfo - Class to encapsulate the information about a callee to be used during the generation o...
A class which abstracts out some details necessary for making a call.
Definition: Type.h:2872
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:5568
Expr * IgnoreParens() LLVM_READONLY
IgnoreParens - Ignore parentheses.
Definition: Expr.cpp:2433
AttributeList - Represents a syntactic attribute.
Definition: AttributeList.h:72
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:5116
const CGFunctionInfo & arrangeCXXConstructorCall(const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraArgs)
Arrange a call to a C++ method, passing the given arguments.
Definition: CGCall.cpp:260
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1293