clang  3.7.0
CGCall.cpp
Go to the documentation of this file.
1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGCall.h"
16 #include "ABIInfo.h"
17 #include "CGCXXABI.h"
18 #include "CodeGenFunction.h"
19 #include "CodeGenModule.h"
20 #include "TargetInfo.h"
21 #include "clang/AST/Decl.h"
22 #include "clang/AST/DeclCXX.h"
23 #include "clang/AST/DeclObjC.h"
24 #include "clang/Basic/TargetInfo.h"
27 #include "llvm/ADT/StringExtras.h"
28 #include "llvm/IR/Attributes.h"
29 #include "llvm/IR/CallSite.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/InlineAsm.h"
32 #include "llvm/IR/Intrinsics.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 using namespace clang;
36 using namespace CodeGen;
37 
38 /***/
39 
41  switch (CC) {
42  default: return llvm::CallingConv::C;
43  case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
44  case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
45  case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
46  case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64;
47  case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
48  case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
49  case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
50  case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
51  // TODO: Add support for __pascal to LLVM.
53  // TODO: Add support for __vectorcall to LLVM.
54  case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
55  case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
56  case CC_SpirKernel: return llvm::CallingConv::SPIR_KERNEL;
57  }
58 }
59 
60 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
61 /// qualification.
62 /// FIXME: address space qualification?
64  QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
65  return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
66 }
67 
68 /// Returns the canonical formal type of the given C++ method.
70  return MD->getType()->getCanonicalTypeUnqualified()
72 }
73 
74 /// Returns the "extra-canonicalized" return type, which discards
75 /// qualifiers on the return type. Codegen doesn't care about them,
76 /// and it makes ABI code a little easier to be able to assume that
77 /// all parameter and return types are top-level unqualified.
80 }
81 
82 /// Arrange the argument and result information for a value of the given
83 /// unprototyped freestanding function type.
84 const CGFunctionInfo &
86  // When translating an unprototyped function type, always use a
87  // variadic type.
88  return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
89  /*instanceMethod=*/false,
90  /*chainCall=*/false, None,
91  FTNP->getExtInfo(), RequiredArgs(0));
92 }
93 
94 /// Arrange the LLVM function layout for a value of the given function
95 /// type, on top of any implicit parameters already stored.
96 static const CGFunctionInfo &
97 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
100  RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
101  // FIXME: Kill copy.
102  prefix.append(FTP->param_type_begin(), FTP->param_type_end());
103  CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
104  return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
105  /*chainCall=*/false, prefix,
106  FTP->getExtInfo(), required);
107 }
108 
109 /// Arrange the argument and result information for a value of the
110 /// given freestanding function type.
111 const CGFunctionInfo &
114  return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
115  FTP);
116 }
117 
118 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
119  // Set the appropriate calling convention for the Function.
120  if (D->hasAttr<StdCallAttr>())
121  return CC_X86StdCall;
122 
123  if (D->hasAttr<FastCallAttr>())
124  return CC_X86FastCall;
125 
126  if (D->hasAttr<ThisCallAttr>())
127  return CC_X86ThisCall;
128 
129  if (D->hasAttr<VectorCallAttr>())
130  return CC_X86VectorCall;
131 
132  if (D->hasAttr<PascalAttr>())
133  return CC_X86Pascal;
134 
135  if (PcsAttr *PCS = D->getAttr<PcsAttr>())
136  return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
137 
138  if (D->hasAttr<IntelOclBiccAttr>())
139  return CC_IntelOclBicc;
140 
141  if (D->hasAttr<MSABIAttr>())
142  return IsWindows ? CC_C : CC_X86_64Win64;
143 
144  if (D->hasAttr<SysVABIAttr>())
145  return IsWindows ? CC_X86_64SysV : CC_C;
146 
147  return CC_C;
148 }
149 
150 /// Arrange the argument and result information for a call to an
151 /// unknown C++ non-static member function of the given abstract type.
152 /// (Zero value of RD means we don't have any meaningful "this" argument type,
153 /// so fall back to a generic pointer type).
154 /// The member function must be an ordinary function, i.e. not a
155 /// constructor or destructor.
156 const CGFunctionInfo &
158  const FunctionProtoType *FTP) {
160 
161  // Add the 'this' pointer.
162  if (RD)
163  argTypes.push_back(GetThisType(Context, RD));
164  else
165  argTypes.push_back(Context.VoidPtrTy);
166 
168  *this, true, argTypes,
170 }
171 
172 /// Arrange the argument and result information for a declaration or
173 /// definition of the given C++ non-static member function. The
174 /// member function must be an ordinary function, i.e. not a
175 /// constructor or destructor.
176 const CGFunctionInfo &
178  assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
179  assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
180 
182 
183  if (MD->isInstance()) {
184  // The abstract case is perfectly fine.
185  const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
186  return arrangeCXXMethodType(ThisType, prototype.getTypePtr());
187  }
188 
189  return arrangeFreeFunctionType(prototype);
190 }
191 
192 const CGFunctionInfo &
194  StructorType Type) {
195 
197  argTypes.push_back(GetThisType(Context, MD->getParent()));
198 
199  GlobalDecl GD;
200  if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
201  GD = GlobalDecl(CD, toCXXCtorType(Type));
202  } else {
203  auto *DD = dyn_cast<CXXDestructorDecl>(MD);
204  GD = GlobalDecl(DD, toCXXDtorType(Type));
205  }
206 
208 
209  // Add the formal parameters.
210  argTypes.append(FTP->param_type_begin(), FTP->param_type_end());
211 
212  TheCXXABI.buildStructorSignature(MD, Type, argTypes);
213 
214  RequiredArgs required =
215  (MD->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All);
216 
217  FunctionType::ExtInfo extInfo = FTP->getExtInfo();
218  CanQualType resultType = TheCXXABI.HasThisReturn(GD)
219  ? argTypes.front()
220  : TheCXXABI.hasMostDerivedReturn(GD)
221  ? CGM.getContext().VoidPtrTy
222  : Context.VoidTy;
223  return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
224  /*chainCall=*/false, argTypes, extInfo,
225  required);
226 }
227 
228 /// Arrange a call to a C++ method, passing the given arguments.
229 const CGFunctionInfo &
231  const CXXConstructorDecl *D,
232  CXXCtorType CtorKind,
233  unsigned ExtraArgs) {
234  // FIXME: Kill copy.
236  for (const auto &Arg : args)
237  ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
238 
240  RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs);
241  GlobalDecl GD(D, CtorKind);
242  CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
243  ? ArgTypes.front()
244  : TheCXXABI.hasMostDerivedReturn(GD)
245  ? CGM.getContext().VoidPtrTy
246  : Context.VoidTy;
247 
248  FunctionType::ExtInfo Info = FPT->getExtInfo();
249  return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
250  /*chainCall=*/false, ArgTypes, Info,
251  Required);
252 }
253 
254 /// Arrange the argument and result information for the declaration or
255 /// definition of the given function.
256 const CGFunctionInfo &
258  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
259  if (MD->isInstance())
260  return arrangeCXXMethodDeclaration(MD);
261 
263 
264  assert(isa<FunctionType>(FTy));
265 
266  // When declaring a function without a prototype, always use a
267  // non-variadic type.
268  if (isa<FunctionNoProtoType>(FTy)) {
271  noProto->getReturnType(), /*instanceMethod=*/false,
272  /*chainCall=*/false, None, noProto->getExtInfo(), RequiredArgs::All);
273  }
274 
275  assert(isa<FunctionProtoType>(FTy));
277 }
278 
279 /// Arrange the argument and result information for the declaration or
280 /// definition of an Objective-C method.
281 const CGFunctionInfo &
283  // It happens that this is the same as a call with no optional
284  // arguments, except also using the formal 'self' type.
286 }
287 
288 /// Arrange the argument and result information for the function type
289 /// through which to perform a send to the given Objective-C method,
290 /// using the given receiver type. The receiver type is not always
291 /// the 'self' type of the method or even an Objective-C pointer type.
292 /// This is *not* the right method for actually performing such a
293 /// message send, due to the possibility of optional arguments.
294 const CGFunctionInfo &
296  QualType receiverType) {
298  argTys.push_back(Context.getCanonicalParamType(receiverType));
299  argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
300  // FIXME: Kill copy?
301  for (const auto *I : MD->params()) {
302  argTys.push_back(Context.getCanonicalParamType(I->getType()));
303  }
304 
305  FunctionType::ExtInfo einfo;
306  bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
307  einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
308 
309  if (getContext().getLangOpts().ObjCAutoRefCount &&
310  MD->hasAttr<NSReturnsRetainedAttr>())
311  einfo = einfo.withProducesResult(true);
312 
313  RequiredArgs required =
314  (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
315 
317  GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
318  /*chainCall=*/false, argTys, einfo, required);
319 }
320 
321 const CGFunctionInfo &
323  // FIXME: Do we need to handle ObjCMethodDecl?
324  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
325 
326  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
328 
329  if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
331 
332  return arrangeFunctionDeclaration(FD);
333 }
334 
335 /// Arrange a thunk that takes 'this' as the first parameter followed by
336 /// varargs. Return a void pointer, regardless of the actual return type.
337 /// The body of the thunk will end in a musttail call to a function of the
338 /// correct type, and the caller will bitcast the function to the correct
339 /// prototype.
340 const CGFunctionInfo &
342  assert(MD->isVirtual() && "only virtual memptrs have thunks");
344  CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
345  return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
346  /*chainCall=*/false, ArgTys,
347  FTP->getExtInfo(), RequiredArgs(1));
348 }
349 
350 const CGFunctionInfo &
352  CXXCtorType CT) {
353  assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
354 
357  const CXXRecordDecl *RD = CD->getParent();
358  ArgTys.push_back(GetThisType(Context, RD));
359  if (CT == Ctor_CopyingClosure)
360  ArgTys.push_back(*FTP->param_type_begin());
361  if (RD->getNumVBases() > 0)
362  ArgTys.push_back(Context.IntTy);
364  /*IsVariadic=*/false, /*IsCXXMethod=*/true);
365  return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
366  /*chainCall=*/false, ArgTys,
368 }
369 
370 /// Arrange a call as unto a free function, except possibly with an
371 /// additional number of formal parameters considered required.
372 static const CGFunctionInfo &
374  CodeGenModule &CGM,
375  const CallArgList &args,
376  const FunctionType *fnType,
377  unsigned numExtraRequiredArgs,
378  bool chainCall) {
379  assert(args.size() >= numExtraRequiredArgs);
380 
381  // In most cases, there are no optional arguments.
382  RequiredArgs required = RequiredArgs::All;
383 
384  // If we have a variadic prototype, the required arguments are the
385  // extra prefix plus the arguments in the prototype.
386  if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
387  if (proto->isVariadic())
388  required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
389 
390  // If we don't have a prototype at all, but we're supposed to
391  // explicitly use the variadic convention for unprototyped calls,
392  // treat all of the arguments as required but preserve the nominal
393  // possibility of variadics.
394  } else if (CGM.getTargetCodeGenInfo()
395  .isNoProtoCallVariadic(args,
396  cast<FunctionNoProtoType>(fnType))) {
397  required = RequiredArgs(args.size());
398  }
399 
400  // FIXME: Kill copy.
402  for (const auto &arg : args)
403  argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
405  /*instanceMethod=*/false, chainCall,
406  argTypes, fnType->getExtInfo(), required);
407 }
408 
409 /// Figure out the rules for calling a function with the given formal
410 /// type using the given arguments. The arguments are necessary
411 /// because the function might be unprototyped, in which case it's
412 /// target-dependent in crazy ways.
413 const CGFunctionInfo &
415  const FunctionType *fnType,
416  bool chainCall) {
417  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
418  chainCall ? 1 : 0, chainCall);
419 }
420 
421 /// A block function call is essentially a free-function call with an
422 /// extra implicit argument.
423 const CGFunctionInfo &
425  const FunctionType *fnType) {
426  return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
427  /*chainCall=*/false);
428 }
429 
430 const CGFunctionInfo &
432  const CallArgList &args,
434  RequiredArgs required) {
435  // FIXME: Kill copy.
437  for (const auto &Arg : args)
438  argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
440  GetReturnType(resultType), /*instanceMethod=*/false,
441  /*chainCall=*/false, argTypes, info, required);
442 }
443 
444 /// Arrange a call to a C++ method, passing the given arguments.
445 const CGFunctionInfo &
447  const FunctionProtoType *FPT,
448  RequiredArgs required) {
449  // FIXME: Kill copy.
451  for (const auto &Arg : args)
452  argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
453 
454  FunctionType::ExtInfo info = FPT->getExtInfo();
456  GetReturnType(FPT->getReturnType()), /*instanceMethod=*/true,
457  /*chainCall=*/false, argTypes, info, required);
458 }
459 
461  QualType resultType, const FunctionArgList &args,
462  const FunctionType::ExtInfo &info, bool isVariadic) {
463  // FIXME: Kill copy.
465  for (auto Arg : args)
466  argTypes.push_back(Context.getCanonicalParamType(Arg->getType()));
467 
468  RequiredArgs required =
469  (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
471  GetReturnType(resultType), /*instanceMethod=*/false,
472  /*chainCall=*/false, argTypes, info, required);
473 }
474 
477  getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
479 }
480 
481 /// Arrange the argument and result information for an abstract value
482 /// of a given function type. This is the method which all of the
483 /// above functions ultimately defer to.
484 const CGFunctionInfo &
486  bool instanceMethod,
487  bool chainCall,
488  ArrayRef<CanQualType> argTypes,
490  RequiredArgs required) {
491  assert(std::all_of(argTypes.begin(), argTypes.end(),
492  std::mem_fun_ref(&CanQualType::isCanonicalAsParam)));
493 
494  unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
495 
496  // Lookup or create unique function info.
497  llvm::FoldingSetNodeID ID;
498  CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, required,
499  resultType, argTypes);
500 
501  void *insertPos = nullptr;
502  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
503  if (FI)
504  return *FI;
505 
506  // Construct the function info. We co-allocate the ArgInfos.
507  FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
508  resultType, argTypes, required);
509  FunctionInfos.InsertNode(FI, insertPos);
510 
511  bool inserted = FunctionsBeingProcessed.insert(FI).second;
512  (void)inserted;
513  assert(inserted && "Recursively being processed?");
514 
515  // Compute ABI information.
516  getABIInfo().computeInfo(*FI);
517 
518  // Loop over all of the computed argument and return value info. If any of
519  // them are direct or extend without a specified coerce type, specify the
520  // default now.
521  ABIArgInfo &retInfo = FI->getReturnInfo();
522  if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
523  retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
524 
525  for (auto &I : FI->arguments())
526  if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
527  I.info.setCoerceToType(ConvertType(I.type));
528 
529  bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
530  assert(erased && "Not in set?");
531 
532  return *FI;
533 }
534 
536  bool instanceMethod,
537  bool chainCall,
538  const FunctionType::ExtInfo &info,
539  CanQualType resultType,
540  ArrayRef<CanQualType> argTypes,
541  RequiredArgs required) {
542  void *buffer = operator new(sizeof(CGFunctionInfo) +
543  sizeof(ArgInfo) * (argTypes.size() + 1));
544  CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
545  FI->CallingConvention = llvmCC;
546  FI->EffectiveCallingConvention = llvmCC;
547  FI->ASTCallingConvention = info.getCC();
548  FI->InstanceMethod = instanceMethod;
549  FI->ChainCall = chainCall;
550  FI->NoReturn = info.getNoReturn();
551  FI->ReturnsRetained = info.getProducesResult();
552  FI->Required = required;
553  FI->HasRegParm = info.getHasRegParm();
554  FI->RegParm = info.getRegParm();
555  FI->ArgStruct = nullptr;
556  FI->NumArgs = argTypes.size();
557  FI->getArgsBuffer()[0].type = resultType;
558  for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
559  FI->getArgsBuffer()[i + 1].type = argTypes[i];
560  return FI;
561 }
562 
563 /***/
564 
565 namespace {
566 // ABIArgInfo::Expand implementation.
567 
568 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
569 struct TypeExpansion {
570  enum TypeExpansionKind {
571  // Elements of constant arrays are expanded recursively.
572  TEK_ConstantArray,
573  // Record fields are expanded recursively (but if record is a union, only
574  // the field with the largest size is expanded).
575  TEK_Record,
576  // For complex types, real and imaginary parts are expanded recursively.
577  TEK_Complex,
578  // All other types are not expandable.
579  TEK_None
580  };
581 
582  const TypeExpansionKind Kind;
583 
584  TypeExpansion(TypeExpansionKind K) : Kind(K) {}
585  virtual ~TypeExpansion() {}
586 };
587 
588 struct ConstantArrayExpansion : TypeExpansion {
589  QualType EltTy;
590  uint64_t NumElts;
591 
592  ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
593  : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
594  static bool classof(const TypeExpansion *TE) {
595  return TE->Kind == TEK_ConstantArray;
596  }
597 };
598 
599 struct RecordExpansion : TypeExpansion {
601 
603 
604  RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
606  : TypeExpansion(TEK_Record), Bases(Bases), Fields(Fields) {}
607  static bool classof(const TypeExpansion *TE) {
608  return TE->Kind == TEK_Record;
609  }
610 };
611 
612 struct ComplexExpansion : TypeExpansion {
613  QualType EltTy;
614 
615  ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
616  static bool classof(const TypeExpansion *TE) {
617  return TE->Kind == TEK_Complex;
618  }
619 };
620 
621 struct NoExpansion : TypeExpansion {
622  NoExpansion() : TypeExpansion(TEK_None) {}
623  static bool classof(const TypeExpansion *TE) {
624  return TE->Kind == TEK_None;
625  }
626 };
627 } // namespace
628 
629 static std::unique_ptr<TypeExpansion>
630 getTypeExpansion(QualType Ty, const ASTContext &Context) {
631  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
632  return llvm::make_unique<ConstantArrayExpansion>(
633  AT->getElementType(), AT->getSize().getZExtValue());
634  }
635  if (const RecordType *RT = Ty->getAs<RecordType>()) {
638  const RecordDecl *RD = RT->getDecl();
639  assert(!RD->hasFlexibleArrayMember() &&
640  "Cannot expand structure with flexible array.");
641  if (RD->isUnion()) {
642  // Unions can be here only in degenerative cases - all the fields are same
643  // after flattening. Thus we have to use the "largest" field.
644  const FieldDecl *LargestFD = nullptr;
645  CharUnits UnionSize = CharUnits::Zero();
646 
647  for (const auto *FD : RD->fields()) {
648  // Skip zero length bitfields.
649  if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
650  continue;
651  assert(!FD->isBitField() &&
652  "Cannot expand structure with bit-field members.");
653  CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
654  if (UnionSize < FieldSize) {
655  UnionSize = FieldSize;
656  LargestFD = FD;
657  }
658  }
659  if (LargestFD)
660  Fields.push_back(LargestFD);
661  } else {
662  if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
663  assert(!CXXRD->isDynamicClass() &&
664  "cannot expand vtable pointers in dynamic classes");
665  for (const CXXBaseSpecifier &BS : CXXRD->bases())
666  Bases.push_back(&BS);
667  }
668 
669  for (const auto *FD : RD->fields()) {
670  // Skip zero length bitfields.
671  if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
672  continue;
673  assert(!FD->isBitField() &&
674  "Cannot expand structure with bit-field members.");
675  Fields.push_back(FD);
676  }
677  }
678  return llvm::make_unique<RecordExpansion>(std::move(Bases),
679  std::move(Fields));
680  }
681  if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
682  return llvm::make_unique<ComplexExpansion>(CT->getElementType());
683  }
684  return llvm::make_unique<NoExpansion>();
685 }
686 
687 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
688  auto Exp = getTypeExpansion(Ty, Context);
689  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
690  return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
691  }
692  if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
693  int Res = 0;
694  for (auto BS : RExp->Bases)
695  Res += getExpansionSize(BS->getType(), Context);
696  for (auto FD : RExp->Fields)
697  Res += getExpansionSize(FD->getType(), Context);
698  return Res;
699  }
700  if (isa<ComplexExpansion>(Exp.get()))
701  return 2;
702  assert(isa<NoExpansion>(Exp.get()));
703  return 1;
704 }
705 
706 void
709  auto Exp = getTypeExpansion(Ty, Context);
710  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
711  for (int i = 0, n = CAExp->NumElts; i < n; i++) {
712  getExpandedTypes(CAExp->EltTy, TI);
713  }
714  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
715  for (auto BS : RExp->Bases)
716  getExpandedTypes(BS->getType(), TI);
717  for (auto FD : RExp->Fields)
718  getExpandedTypes(FD->getType(), TI);
719  } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
720  llvm::Type *EltTy = ConvertType(CExp->EltTy);
721  *TI++ = EltTy;
722  *TI++ = EltTy;
723  } else {
724  assert(isa<NoExpansion>(Exp.get()));
725  *TI++ = ConvertType(Ty);
726  }
727 }
728 
729 void CodeGenFunction::ExpandTypeFromArgs(
731  assert(LV.isSimple() &&
732  "Unexpected non-simple lvalue during struct expansion.");
733 
734  auto Exp = getTypeExpansion(Ty, getContext());
735  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
736  for (int i = 0, n = CAExp->NumElts; i < n; i++) {
737  llvm::Value *EltAddr =
738  Builder.CreateConstGEP2_32(nullptr, LV.getAddress(), 0, i);
739  LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
740  ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
741  }
742  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
743  llvm::Value *This = LV.getAddress();
744  for (const CXXBaseSpecifier *BS : RExp->Bases) {
745  // Perform a single step derived-to-base conversion.
746  llvm::Value *Base =
747  GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
748  /*NullCheckValue=*/false, SourceLocation());
749  LValue SubLV = MakeAddrLValue(Base, BS->getType());
750 
751  // Recurse onto bases.
752  ExpandTypeFromArgs(BS->getType(), SubLV, AI);
753  }
754  for (auto FD : RExp->Fields) {
755  // FIXME: What are the right qualifiers here?
756  LValue SubLV = EmitLValueForField(LV, FD);
757  ExpandTypeFromArgs(FD->getType(), SubLV, AI);
758  }
759  } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
760  llvm::Value *RealAddr =
761  Builder.CreateStructGEP(nullptr, LV.getAddress(), 0, "real");
762  EmitStoreThroughLValue(RValue::get(*AI++),
763  MakeAddrLValue(RealAddr, CExp->EltTy));
764  llvm::Value *ImagAddr =
765  Builder.CreateStructGEP(nullptr, LV.getAddress(), 1, "imag");
766  EmitStoreThroughLValue(RValue::get(*AI++),
767  MakeAddrLValue(ImagAddr, CExp->EltTy));
768  } else {
769  assert(isa<NoExpansion>(Exp.get()));
770  EmitStoreThroughLValue(RValue::get(*AI++), LV);
771  }
772 }
773 
774 void CodeGenFunction::ExpandTypeToArgs(
775  QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
776  SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
777  auto Exp = getTypeExpansion(Ty, getContext());
778  if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
779  llvm::Value *Addr = RV.getAggregateAddr();
780  for (int i = 0, n = CAExp->NumElts; i < n; i++) {
781  llvm::Value *EltAddr = Builder.CreateConstGEP2_32(nullptr, Addr, 0, i);
782  RValue EltRV =
783  convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
784  ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
785  }
786  } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
787  llvm::Value *This = RV.getAggregateAddr();
788  for (const CXXBaseSpecifier *BS : RExp->Bases) {
789  // Perform a single step derived-to-base conversion.
790  llvm::Value *Base =
791  GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
792  /*NullCheckValue=*/false, SourceLocation());
793  RValue BaseRV = RValue::getAggregate(Base);
794 
795  // Recurse onto bases.
796  ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs,
797  IRCallArgPos);
798  }
799 
800  LValue LV = MakeAddrLValue(This, Ty);
801  for (auto FD : RExp->Fields) {
802  RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
803  ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs,
804  IRCallArgPos);
805  }
806  } else if (isa<ComplexExpansion>(Exp.get())) {
807  ComplexPairTy CV = RV.getComplexVal();
808  IRCallArgs[IRCallArgPos++] = CV.first;
809  IRCallArgs[IRCallArgPos++] = CV.second;
810  } else {
811  assert(isa<NoExpansion>(Exp.get()));
812  assert(RV.isScalar() &&
813  "Unexpected non-scalar rvalue during struct expansion.");
814 
815  // Insert a bitcast as needed.
816  llvm::Value *V = RV.getScalarVal();
817  if (IRCallArgPos < IRFuncTy->getNumParams() &&
818  V->getType() != IRFuncTy->getParamType(IRCallArgPos))
819  V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
820 
821  IRCallArgs[IRCallArgPos++] = V;
822  }
823 }
824 
825 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
826 /// accessing some number of bytes out of it, try to gep into the struct to get
827 /// at its inner goodness. Dive as deep as possible without entering an element
828 /// with an in-memory size smaller than DstSize.
829 static llvm::Value *
831  llvm::StructType *SrcSTy,
832  uint64_t DstSize, CodeGenFunction &CGF) {
833  // We can't dive into a zero-element struct.
834  if (SrcSTy->getNumElements() == 0) return SrcPtr;
835 
836  llvm::Type *FirstElt = SrcSTy->getElementType(0);
837 
838  // If the first elt is at least as large as what we're looking for, or if the
839  // first element is the same size as the whole struct, we can enter it. The
840  // comparison must be made on the store size and not the alloca size. Using
841  // the alloca size may overstate the size of the load.
842  uint64_t FirstEltSize =
843  CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
844  if (FirstEltSize < DstSize &&
845  FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
846  return SrcPtr;
847 
848  // GEP into the first element.
849  SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcSTy, SrcPtr, 0, 0, "coerce.dive");
850 
851  // If the first element is a struct, recurse.
852  llvm::Type *SrcTy =
853  cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
854  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
855  return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
856 
857  return SrcPtr;
858 }
859 
860 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
861 /// are either integers or pointers. This does a truncation of the value if it
862 /// is too large or a zero extension if it is too small.
863 ///
864 /// This behaves as if the value were coerced through memory, so on big-endian
865 /// targets the high bits are preserved in a truncation, while little-endian
866 /// targets preserve the low bits.
868  llvm::Type *Ty,
869  CodeGenFunction &CGF) {
870  if (Val->getType() == Ty)
871  return Val;
872 
873  if (isa<llvm::PointerType>(Val->getType())) {
874  // If this is Pointer->Pointer avoid conversion to and from int.
875  if (isa<llvm::PointerType>(Ty))
876  return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
877 
878  // Convert the pointer to an integer so we can play with its width.
879  Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
880  }
881 
882  llvm::Type *DestIntTy = Ty;
883  if (isa<llvm::PointerType>(DestIntTy))
884  DestIntTy = CGF.IntPtrTy;
885 
886  if (Val->getType() != DestIntTy) {
887  const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
888  if (DL.isBigEndian()) {
889  // Preserve the high bits on big-endian targets.
890  // That is what memory coercion does.
891  uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
892  uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
893 
894  if (SrcSize > DstSize) {
895  Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
896  Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
897  } else {
898  Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
899  Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
900  }
901  } else {
902  // Little-endian targets preserve the low bits. No shifts required.
903  Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
904  }
905  }
906 
907  if (isa<llvm::PointerType>(Ty))
908  Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
909  return Val;
910 }
911 
912 
913 
914 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
915 /// a pointer to an object of type \arg Ty, known to be aligned to
916 /// \arg SrcAlign bytes.
917 ///
918 /// This safely handles the case when the src type is smaller than the
919 /// destination type; in this situation the values of bits which not
920 /// present in the src are undefined.
922  llvm::Type *Ty, CharUnits SrcAlign,
923  CodeGenFunction &CGF) {
924  llvm::Type *SrcTy =
925  cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
926 
927  // If SrcTy and Ty are the same, just do a load.
928  if (SrcTy == Ty)
929  return CGF.Builder.CreateAlignedLoad(SrcPtr, SrcAlign.getQuantity());
930 
931  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
932 
933  if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
934  SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
935  SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
936  }
937 
938  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
939 
940  // If the source and destination are integer or pointer types, just do an
941  // extension or truncation to the desired type.
942  if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
943  (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
944  llvm::LoadInst *Load =
945  CGF.Builder.CreateAlignedLoad(SrcPtr, SrcAlign.getQuantity());
946  return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
947  }
948 
949  // If load is legal, just bitcast the src pointer.
950  if (SrcSize >= DstSize) {
951  // Generally SrcSize is never greater than DstSize, since this means we are
952  // losing bits. However, this can happen in cases where the structure has
953  // additional padding, for example due to a user specified alignment.
954  //
955  // FIXME: Assert that we aren't truncating non-padding bits when have access
956  // to that information.
957  llvm::Value *Casted =
958  CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
959  return CGF.Builder.CreateAlignedLoad(Casted, SrcAlign.getQuantity());
960  }
961 
962  // Otherwise do coercion through memory. This is stupid, but
963  // simple.
964  llvm::AllocaInst *Tmp = CGF.CreateTempAlloca(Ty);
965  Tmp->setAlignment(SrcAlign.getQuantity());
966  llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
967  llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
968  llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy);
969  CGF.Builder.CreateMemCpy(Casted, SrcCasted,
970  llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
971  SrcAlign.getQuantity(), false);
972  return CGF.Builder.CreateAlignedLoad(Tmp, SrcAlign.getQuantity());
973 }
974 
975 // Function to store a first-class aggregate into memory. We prefer to
976 // store the elements rather than the aggregate to be more friendly to
977 // fast-isel.
978 // FIXME: Do we need to recurse here?
980  llvm::Value *DestPtr, bool DestIsVolatile,
981  CharUnits DestAlign) {
982  // Prefer scalar stores to first-class aggregate stores.
983  if (llvm::StructType *STy =
984  dyn_cast<llvm::StructType>(Val->getType())) {
985  const llvm::StructLayout *Layout =
986  CGF.CGM.getDataLayout().getStructLayout(STy);
987 
988  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
989  llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(STy, DestPtr, 0, i);
990  llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
991  uint64_t EltOffset = Layout->getElementOffset(i);
992  CharUnits EltAlign =
993  DestAlign.alignmentAtOffset(CharUnits::fromQuantity(EltOffset));
994  CGF.Builder.CreateAlignedStore(Elt, EltPtr, EltAlign.getQuantity(),
995  DestIsVolatile);
996  }
997  } else {
998  CGF.Builder.CreateAlignedStore(Val, DestPtr, DestAlign.getQuantity(),
999  DestIsVolatile);
1000  }
1001 }
1002 
1003 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1004 /// where the source and destination may have different types. The
1005 /// destination is known to be aligned to \arg DstAlign bytes.
1006 ///
1007 /// This safely handles the case when the src type is larger than the
1008 /// destination type; the upper bits of the src will be lost.
1010  llvm::Value *DstPtr,
1011  bool DstIsVolatile,
1012  CharUnits DstAlign,
1013  CodeGenFunction &CGF) {
1014  llvm::Type *SrcTy = Src->getType();
1015  llvm::Type *DstTy =
1016  cast<llvm::PointerType>(DstPtr->getType())->getElementType();
1017  if (SrcTy == DstTy) {
1018  CGF.Builder.CreateAlignedStore(Src, DstPtr, DstAlign.getQuantity(),
1019  DstIsVolatile);
1020  return;
1021  }
1022 
1023  uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1024 
1025  if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1026  DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
1027  DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
1028  }
1029 
1030  // If the source and destination are integer or pointer types, just do an
1031  // extension or truncation to the desired type.
1032  if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1033  (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1034  Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1035  CGF.Builder.CreateAlignedStore(Src, DstPtr, DstAlign.getQuantity(),
1036  DstIsVolatile);
1037  return;
1038  }
1039 
1040  uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1041 
1042  // If store is legal, just bitcast the src pointer.
1043  if (SrcSize <= DstSize) {
1044  llvm::Value *Casted =
1045  CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
1046  BuildAggStore(CGF, Src, Casted, DstIsVolatile, DstAlign);
1047  } else {
1048  // Otherwise do coercion through memory. This is stupid, but
1049  // simple.
1050 
1051  // Generally SrcSize is never greater than DstSize, since this means we are
1052  // losing bits. However, this can happen in cases where the structure has
1053  // additional padding, for example due to a user specified alignment.
1054  //
1055  // FIXME: Assert that we aren't truncating non-padding bits when have access
1056  // to that information.
1057  llvm::AllocaInst *Tmp = CGF.CreateTempAlloca(SrcTy);
1058  Tmp->setAlignment(DstAlign.getQuantity());
1059  CGF.Builder.CreateAlignedStore(Src, Tmp, DstAlign.getQuantity());
1060  llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
1061  llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
1062  llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy);
1063  CGF.Builder.CreateMemCpy(DstCasted, Casted,
1064  llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1065  DstAlign.getQuantity(), false);
1066  }
1067 }
1068 
1069 namespace {
1070 
1071 /// Encapsulates information about the way function arguments from
1072 /// CGFunctionInfo should be passed to actual LLVM IR function.
1073 class ClangToLLVMArgMapping {
1074  static const unsigned InvalidIndex = ~0U;
1075  unsigned InallocaArgNo;
1076  unsigned SRetArgNo;
1077  unsigned TotalIRArgs;
1078 
1079  /// Arguments of LLVM IR function corresponding to single Clang argument.
1080  struct IRArgs {
1081  unsigned PaddingArgIndex;
1082  // Argument is expanded to IR arguments at positions
1083  // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1084  unsigned FirstArgIndex;
1085  unsigned NumberOfArgs;
1086 
1087  IRArgs()
1088  : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1089  NumberOfArgs(0) {}
1090  };
1091 
1092  SmallVector<IRArgs, 8> ArgInfo;
1093 
1094 public:
1095  ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1096  bool OnlyRequiredArgs = false)
1097  : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1098  ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1099  construct(Context, FI, OnlyRequiredArgs);
1100  }
1101 
1102  bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1103  unsigned getInallocaArgNo() const {
1104  assert(hasInallocaArg());
1105  return InallocaArgNo;
1106  }
1107 
1108  bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1109  unsigned getSRetArgNo() const {
1110  assert(hasSRetArg());
1111  return SRetArgNo;
1112  }
1113 
1114  unsigned totalIRArgs() const { return TotalIRArgs; }
1115 
1116  bool hasPaddingArg(unsigned ArgNo) const {
1117  assert(ArgNo < ArgInfo.size());
1118  return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1119  }
1120  unsigned getPaddingArgNo(unsigned ArgNo) const {
1121  assert(hasPaddingArg(ArgNo));
1122  return ArgInfo[ArgNo].PaddingArgIndex;
1123  }
1124 
1125  /// Returns index of first IR argument corresponding to ArgNo, and their
1126  /// quantity.
1127  std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1128  assert(ArgNo < ArgInfo.size());
1129  return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1130  ArgInfo[ArgNo].NumberOfArgs);
1131  }
1132 
1133 private:
1134  void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1135  bool OnlyRequiredArgs);
1136 };
1137 
1138 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1139  const CGFunctionInfo &FI,
1140  bool OnlyRequiredArgs) {
1141  unsigned IRArgNo = 0;
1142  bool SwapThisWithSRet = false;
1143  const ABIArgInfo &RetAI = FI.getReturnInfo();
1144 
1145  if (RetAI.getKind() == ABIArgInfo::Indirect) {
1146  SwapThisWithSRet = RetAI.isSRetAfterThis();
1147  SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1148  }
1149 
1150  unsigned ArgNo = 0;
1151  unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1152  for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1153  ++I, ++ArgNo) {
1154  assert(I != FI.arg_end());
1155  QualType ArgType = I->type;
1156  const ABIArgInfo &AI = I->info;
1157  // Collect data about IR arguments corresponding to Clang argument ArgNo.
1158  auto &IRArgs = ArgInfo[ArgNo];
1159 
1160  if (AI.getPaddingType())
1161  IRArgs.PaddingArgIndex = IRArgNo++;
1162 
1163  switch (AI.getKind()) {
1164  case ABIArgInfo::Extend:
1165  case ABIArgInfo::Direct: {
1166  // FIXME: handle sseregparm someday...
1167  llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1168  if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1169  IRArgs.NumberOfArgs = STy->getNumElements();
1170  } else {
1171  IRArgs.NumberOfArgs = 1;
1172  }
1173  break;
1174  }
1175  case ABIArgInfo::Indirect:
1176  IRArgs.NumberOfArgs = 1;
1177  break;
1178  case ABIArgInfo::Ignore:
1179  case ABIArgInfo::InAlloca:
1180  // ignore and inalloca doesn't have matching LLVM parameters.
1181  IRArgs.NumberOfArgs = 0;
1182  break;
1183  case ABIArgInfo::Expand: {
1184  IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1185  break;
1186  }
1187  }
1188 
1189  if (IRArgs.NumberOfArgs > 0) {
1190  IRArgs.FirstArgIndex = IRArgNo;
1191  IRArgNo += IRArgs.NumberOfArgs;
1192  }
1193 
1194  // Skip over the sret parameter when it comes second. We already handled it
1195  // above.
1196  if (IRArgNo == 1 && SwapThisWithSRet)
1197  IRArgNo++;
1198  }
1199  assert(ArgNo == ArgInfo.size());
1200 
1201  if (FI.usesInAlloca())
1202  InallocaArgNo = IRArgNo++;
1203 
1204  TotalIRArgs = IRArgNo;
1205 }
1206 } // namespace
1207 
1208 /***/
1209 
1211  return FI.getReturnInfo().isIndirect();
1212 }
1213 
1215  return ReturnTypeUsesSRet(FI) &&
1216  getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1217 }
1218 
1220  if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1221  switch (BT->getKind()) {
1222  default:
1223  return false;
1224  case BuiltinType::Float:
1226  case BuiltinType::Double:
1228  case BuiltinType::LongDouble:
1230  }
1231  }
1232 
1233  return false;
1234 }
1235 
1237  if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1238  if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1239  if (BT->getKind() == BuiltinType::LongDouble)
1241  }
1242  }
1243 
1244  return false;
1245 }
1246 
1248  const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1249  return GetFunctionType(FI);
1250 }
1251 
1252 llvm::FunctionType *
1254 
1255  bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1256  (void)Inserted;
1257  assert(Inserted && "Recursively being processed?");
1258 
1259  llvm::Type *resultType = nullptr;
1260  const ABIArgInfo &retAI = FI.getReturnInfo();
1261  switch (retAI.getKind()) {
1262  case ABIArgInfo::Expand:
1263  llvm_unreachable("Invalid ABI kind for return argument");
1264 
1265  case ABIArgInfo::Extend:
1266  case ABIArgInfo::Direct:
1267  resultType = retAI.getCoerceToType();
1268  break;
1269 
1270  case ABIArgInfo::InAlloca:
1271  if (retAI.getInAllocaSRet()) {
1272  // sret things on win32 aren't void, they return the sret pointer.
1273  QualType ret = FI.getReturnType();
1274  llvm::Type *ty = ConvertType(ret);
1275  unsigned addressSpace = Context.getTargetAddressSpace(ret);
1276  resultType = llvm::PointerType::get(ty, addressSpace);
1277  } else {
1278  resultType = llvm::Type::getVoidTy(getLLVMContext());
1279  }
1280  break;
1281 
1282  case ABIArgInfo::Indirect: {
1283  assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
1284  resultType = llvm::Type::getVoidTy(getLLVMContext());
1285  break;
1286  }
1287 
1288  case ABIArgInfo::Ignore:
1289  resultType = llvm::Type::getVoidTy(getLLVMContext());
1290  break;
1291  }
1292 
1293  ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1294  SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1295 
1296  // Add type for sret argument.
1297  if (IRFunctionArgs.hasSRetArg()) {
1298  QualType Ret = FI.getReturnType();
1299  llvm::Type *Ty = ConvertType(Ret);
1300  unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1301  ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1302  llvm::PointerType::get(Ty, AddressSpace);
1303  }
1304 
1305  // Add type for inalloca argument.
1306  if (IRFunctionArgs.hasInallocaArg()) {
1307  auto ArgStruct = FI.getArgStruct();
1308  assert(ArgStruct);
1309  ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1310  }
1311 
1312  // Add in all of the required arguments.
1313  unsigned ArgNo = 0;
1315  ie = it + FI.getNumRequiredArgs();
1316  for (; it != ie; ++it, ++ArgNo) {
1317  const ABIArgInfo &ArgInfo = it->info;
1318 
1319  // Insert a padding type to ensure proper alignment.
1320  if (IRFunctionArgs.hasPaddingArg(ArgNo))
1321  ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1322  ArgInfo.getPaddingType();
1323 
1324  unsigned FirstIRArg, NumIRArgs;
1325  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1326 
1327  switch (ArgInfo.getKind()) {
1328  case ABIArgInfo::Ignore:
1329  case ABIArgInfo::InAlloca:
1330  assert(NumIRArgs == 0);
1331  break;
1332 
1333  case ABIArgInfo::Indirect: {
1334  assert(NumIRArgs == 1);
1335  // indirect arguments are always on the stack, which is addr space #0.
1336  llvm::Type *LTy = ConvertTypeForMem(it->type);
1337  ArgTypes[FirstIRArg] = LTy->getPointerTo();
1338  break;
1339  }
1340 
1341  case ABIArgInfo::Extend:
1342  case ABIArgInfo::Direct: {
1343  // Fast-isel and the optimizer generally like scalar values better than
1344  // FCAs, so we flatten them if this is safe to do for this argument.
1345  llvm::Type *argType = ArgInfo.getCoerceToType();
1346  llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1347  if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1348  assert(NumIRArgs == st->getNumElements());
1349  for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1350  ArgTypes[FirstIRArg + i] = st->getElementType(i);
1351  } else {
1352  assert(NumIRArgs == 1);
1353  ArgTypes[FirstIRArg] = argType;
1354  }
1355  break;
1356  }
1357 
1358  case ABIArgInfo::Expand:
1359  auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1360  getExpandedTypes(it->type, ArgTypesIter);
1361  assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1362  break;
1363  }
1364  }
1365 
1366  bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1367  assert(Erased && "Not in set?");
1368 
1369  return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1370 }
1371 
1373  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1374  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1375 
1376  if (!isFuncTypeConvertible(FPT))
1377  return llvm::StructType::get(getLLVMContext());
1378 
1379  const CGFunctionInfo *Info;
1380  if (isa<CXXDestructorDecl>(MD))
1381  Info =
1383  else
1384  Info = &arrangeCXXMethodDeclaration(MD);
1385  return GetFunctionType(*Info);
1386 }
1387 
1389  const Decl *TargetDecl,
1390  AttributeListType &PAL,
1391  unsigned &CallingConv,
1392  bool AttrOnCallSite) {
1393  llvm::AttrBuilder FuncAttrs;
1394  llvm::AttrBuilder RetAttrs;
1395  bool HasOptnone = false;
1396 
1397  CallingConv = FI.getEffectiveCallingConvention();
1398 
1399  if (FI.isNoReturn())
1400  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1401 
1402  // FIXME: handle sseregparm someday...
1403  if (TargetDecl) {
1404  if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1405  FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1406  if (TargetDecl->hasAttr<NoThrowAttr>())
1407  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1408  if (TargetDecl->hasAttr<NoReturnAttr>())
1409  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1410  if (TargetDecl->hasAttr<NoDuplicateAttr>())
1411  FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1412 
1413  if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1414  const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
1415  if (FPT && FPT->isNothrow(getContext()))
1416  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1417  // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1418  // These attributes are not inherited by overloads.
1419  const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1420  if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1421  FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1422  }
1423 
1424  // 'const' and 'pure' attribute functions are also nounwind.
1425  if (TargetDecl->hasAttr<ConstAttr>()) {
1426  FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1427  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1428  } else if (TargetDecl->hasAttr<PureAttr>()) {
1429  FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1430  FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1431  }
1432  if (TargetDecl->hasAttr<RestrictAttr>())
1433  RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1434  if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
1435  RetAttrs.addAttribute(llvm::Attribute::NonNull);
1436 
1437  HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1438  }
1439 
1440  // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1441  if (!HasOptnone) {
1442  if (CodeGenOpts.OptimizeSize)
1443  FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1444  if (CodeGenOpts.OptimizeSize == 2)
1445  FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1446  }
1447 
1448  if (CodeGenOpts.DisableRedZone)
1449  FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1450  if (CodeGenOpts.NoImplicitFloat)
1451  FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1452  if (CodeGenOpts.EnableSegmentedStacks &&
1453  !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1454  FuncAttrs.addAttribute("split-stack");
1455 
1456  if (AttrOnCallSite) {
1457  // Attributes that should go on the call site only.
1458  if (!CodeGenOpts.SimplifyLibCalls)
1459  FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1460  if (!CodeGenOpts.TrapFuncName.empty())
1461  FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1462  } else {
1463  // Attributes that should go on the function, but not the call site.
1464  if (!CodeGenOpts.DisableFPElim) {
1465  FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1466  } else if (CodeGenOpts.OmitLeafFramePointer) {
1467  FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1468  FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1469  } else {
1470  FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1471  FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1472  }
1473 
1474  FuncAttrs.addAttribute("disable-tail-calls",
1475  llvm::toStringRef(CodeGenOpts.DisableTailCalls));
1476  FuncAttrs.addAttribute("less-precise-fpmad",
1477  llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1478  FuncAttrs.addAttribute("no-infs-fp-math",
1479  llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1480  FuncAttrs.addAttribute("no-nans-fp-math",
1481  llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1482  FuncAttrs.addAttribute("unsafe-fp-math",
1483  llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1484  FuncAttrs.addAttribute("use-soft-float",
1485  llvm::toStringRef(CodeGenOpts.SoftFloat));
1486  FuncAttrs.addAttribute("stack-protector-buffer-size",
1487  llvm::utostr(CodeGenOpts.SSPBufferSize));
1488 
1489  if (!CodeGenOpts.StackRealignment)
1490  FuncAttrs.addAttribute("no-realign-stack");
1491 
1492  // Add target-cpu and target-features attributes to functions. If
1493  // we have a decl for the function and it has a target attribute then
1494  // parse that and add it to the feature set.
1495  StringRef TargetCPU = getTarget().getTargetOpts().CPU;
1496 
1497  // TODO: Features gets us the features on the command line including
1498  // feature dependencies. For canonicalization purposes we might want to
1499  // avoid putting features in the target-features set if we know it'll be
1500  // one of the default features in the backend, e.g. corei7-avx and +avx or
1501  // figure out non-explicit dependencies.
1502  // Canonicalize the existing features in a new feature map.
1503  // TODO: Migrate the existing backends to keep the map around rather than
1504  // the vector.
1505  llvm::StringMap<bool> FeatureMap;
1506  for (auto F : getTarget().getTargetOpts().Features) {
1507  const char *Name = F.c_str();
1508  bool Enabled = Name[0] == '+';
1509  getTarget().setFeatureEnabled(FeatureMap, Name + 1, Enabled);
1510  }
1511 
1512  const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
1513  if (FD) {
1514  if (const auto *TD = FD->getAttr<TargetAttr>()) {
1515  StringRef FeaturesStr = TD->getFeatures();
1516  SmallVector<StringRef, 1> AttrFeatures;
1517  FeaturesStr.split(AttrFeatures, ",");
1518 
1519  // Grab the various features and prepend a "+" to turn on the feature to
1520  // the backend and add them to our existing set of features.
1521  for (auto &Feature : AttrFeatures) {
1522  // Go ahead and trim whitespace rather than either erroring or
1523  // accepting it weirdly.
1524  Feature = Feature.trim();
1525 
1526  // While we're here iterating check for a different target cpu.
1527  if (Feature.startswith("arch="))
1528  TargetCPU = Feature.split("=").second.trim();
1529  else if (Feature.startswith("tune="))
1530  // We don't support cpu tuning this way currently.
1531  ;
1532  else if (Feature.startswith("fpmath="))
1533  // TODO: Support the fpmath option this way. It will require checking
1534  // overall feature validity for the function with the rest of the
1535  // attributes on the function.
1536  ;
1537  else if (Feature.startswith("mno-"))
1538  getTarget().setFeatureEnabled(FeatureMap, Feature.split("-").second,
1539  false);
1540  else
1541  getTarget().setFeatureEnabled(FeatureMap, Feature, true);
1542  }
1543  }
1544  }
1545 
1546  // Produce the canonical string for this set of features.
1547  std::vector<std::string> Features;
1548  for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(),
1549  ie = FeatureMap.end();
1550  it != ie; ++it)
1551  Features.push_back((it->second ? "+" : "-") + it->first().str());
1552 
1553  // Now add the target-cpu and target-features to the function.
1554  if (TargetCPU != "")
1555  FuncAttrs.addAttribute("target-cpu", TargetCPU);
1556  if (!Features.empty()) {
1557  std::sort(Features.begin(), Features.end());
1558  FuncAttrs.addAttribute("target-features",
1559  llvm::join(Features.begin(), Features.end(), ","));
1560  }
1561  }
1562 
1563  ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1564 
1565  QualType RetTy = FI.getReturnType();
1566  const ABIArgInfo &RetAI = FI.getReturnInfo();
1567  switch (RetAI.getKind()) {
1568  case ABIArgInfo::Extend:
1569  if (RetTy->hasSignedIntegerRepresentation())
1570  RetAttrs.addAttribute(llvm::Attribute::SExt);
1571  else if (RetTy->hasUnsignedIntegerRepresentation())
1572  RetAttrs.addAttribute(llvm::Attribute::ZExt);
1573  // FALL THROUGH
1574  case ABIArgInfo::Direct:
1575  if (RetAI.getInReg())
1576  RetAttrs.addAttribute(llvm::Attribute::InReg);
1577  break;
1578  case ABIArgInfo::Ignore:
1579  break;
1580 
1581  case ABIArgInfo::InAlloca:
1582  case ABIArgInfo::Indirect: {
1583  // inalloca and sret disable readnone and readonly
1584  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1585  .removeAttribute(llvm::Attribute::ReadNone);
1586  break;
1587  }
1588 
1589  case ABIArgInfo::Expand:
1590  llvm_unreachable("Invalid ABI kind for return argument");
1591  }
1592 
1593  if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
1594  QualType PTy = RefTy->getPointeeType();
1595  if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1596  RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1597  .getQuantity());
1598  else if (getContext().getTargetAddressSpace(PTy) == 0)
1599  RetAttrs.addAttribute(llvm::Attribute::NonNull);
1600  }
1601 
1602  // Attach return attributes.
1603  if (RetAttrs.hasAttributes()) {
1604  PAL.push_back(llvm::AttributeSet::get(
1605  getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs));
1606  }
1607 
1608  // Attach attributes to sret.
1609  if (IRFunctionArgs.hasSRetArg()) {
1610  llvm::AttrBuilder SRETAttrs;
1611  SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1612  if (RetAI.getInReg())
1613  SRETAttrs.addAttribute(llvm::Attribute::InReg);
1614  PAL.push_back(llvm::AttributeSet::get(
1615  getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs));
1616  }
1617 
1618  // Attach attributes to inalloca argument.
1619  if (IRFunctionArgs.hasInallocaArg()) {
1620  llvm::AttrBuilder Attrs;
1621  Attrs.addAttribute(llvm::Attribute::InAlloca);
1622  PAL.push_back(llvm::AttributeSet::get(
1623  getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs));
1624  }
1625 
1626  unsigned ArgNo = 0;
1628  E = FI.arg_end();
1629  I != E; ++I, ++ArgNo) {
1630  QualType ParamType = I->type;
1631  const ABIArgInfo &AI = I->info;
1632  llvm::AttrBuilder Attrs;
1633 
1634  // Add attribute for padding argument, if necessary.
1635  if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
1636  if (AI.getPaddingInReg())
1637  PAL.push_back(llvm::AttributeSet::get(
1638  getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1,
1639  llvm::Attribute::InReg));
1640  }
1641 
1642  // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
1643  // have the corresponding parameter variable. It doesn't make
1644  // sense to do it here because parameters are so messed up.
1645  switch (AI.getKind()) {
1646  case ABIArgInfo::Extend:
1647  if (ParamType->isSignedIntegerOrEnumerationType())
1648  Attrs.addAttribute(llvm::Attribute::SExt);
1649  else if (ParamType->isUnsignedIntegerOrEnumerationType()) {
1650  if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType))
1651  Attrs.addAttribute(llvm::Attribute::SExt);
1652  else
1653  Attrs.addAttribute(llvm::Attribute::ZExt);
1654  }
1655  // FALL THROUGH
1656  case ABIArgInfo::Direct:
1657  if (ArgNo == 0 && FI.isChainCall())
1658  Attrs.addAttribute(llvm::Attribute::Nest);
1659  else if (AI.getInReg())
1660  Attrs.addAttribute(llvm::Attribute::InReg);
1661  break;
1662 
1663  case ABIArgInfo::Indirect:
1664  if (AI.getInReg())
1665  Attrs.addAttribute(llvm::Attribute::InReg);
1666 
1667  if (AI.getIndirectByVal())
1668  Attrs.addAttribute(llvm::Attribute::ByVal);
1669 
1670  Attrs.addAlignmentAttr(AI.getIndirectAlign());
1671 
1672  // byval disables readnone and readonly.
1673  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1674  .removeAttribute(llvm::Attribute::ReadNone);
1675  break;
1676 
1677  case ABIArgInfo::Ignore:
1678  case ABIArgInfo::Expand:
1679  continue;
1680 
1681  case ABIArgInfo::InAlloca:
1682  // inalloca disables readnone and readonly.
1683  FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1684  .removeAttribute(llvm::Attribute::ReadNone);
1685  continue;
1686  }
1687 
1688  if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
1689  QualType PTy = RefTy->getPointeeType();
1690  if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1691  Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1692  .getQuantity());
1693  else if (getContext().getTargetAddressSpace(PTy) == 0)
1694  Attrs.addAttribute(llvm::Attribute::NonNull);
1695  }
1696 
1697  if (Attrs.hasAttributes()) {
1698  unsigned FirstIRArg, NumIRArgs;
1699  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1700  for (unsigned i = 0; i < NumIRArgs; i++)
1701  PAL.push_back(llvm::AttributeSet::get(getLLVMContext(),
1702  FirstIRArg + i + 1, Attrs));
1703  }
1704  }
1705  assert(ArgNo == FI.arg_size());
1706 
1707  if (FuncAttrs.hasAttributes())
1708  PAL.push_back(llvm::
1709  AttributeSet::get(getLLVMContext(),
1710  llvm::AttributeSet::FunctionIndex,
1711  FuncAttrs));
1712 }
1713 
1714 /// An argument came in as a promoted argument; demote it back to its
1715 /// declared type.
1717  const VarDecl *var,
1718  llvm::Value *value) {
1719  llvm::Type *varType = CGF.ConvertType(var->getType());
1720 
1721  // This can happen with promotions that actually don't change the
1722  // underlying type, like the enum promotions.
1723  if (value->getType() == varType) return value;
1724 
1725  assert((varType->isIntegerTy() || varType->isFloatingPointTy())
1726  && "unexpected promotion type");
1727 
1728  if (isa<llvm::IntegerType>(varType))
1729  return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
1730 
1731  return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
1732 }
1733 
1734 /// Returns the attribute (either parameter attribute, or function
1735 /// attribute), which declares argument ArgNo to be non-null.
1736 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
1737  QualType ArgType, unsigned ArgNo) {
1738  // FIXME: __attribute__((nonnull)) can also be applied to:
1739  // - references to pointers, where the pointee is known to be
1740  // nonnull (apparently a Clang extension)
1741  // - transparent unions containing pointers
1742  // In the former case, LLVM IR cannot represent the constraint. In
1743  // the latter case, we have no guarantee that the transparent union
1744  // is in fact passed as a pointer.
1745  if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
1746  return nullptr;
1747  // First, check attribute on parameter itself.
1748  if (PVD) {
1749  if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
1750  return ParmNNAttr;
1751  }
1752  // Check function attributes.
1753  if (!FD)
1754  return nullptr;
1755  for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
1756  if (NNAttr->isNonNull(ArgNo))
1757  return NNAttr;
1758  }
1759  return nullptr;
1760 }
1761 
1763  llvm::Function *Fn,
1764  const FunctionArgList &Args) {
1765  if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
1766  // Naked functions don't have prologues.
1767  return;
1768 
1769  // If this is an implicit-return-zero function, go ahead and
1770  // initialize the return value. TODO: it might be nice to have
1771  // a more general mechanism for this that didn't require synthesized
1772  // return statements.
1773  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
1774  if (FD->hasImplicitReturnZero()) {
1775  QualType RetTy = FD->getReturnType().getUnqualifiedType();
1776  llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
1777  llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
1778  Builder.CreateStore(Zero, ReturnValue);
1779  }
1780  }
1781 
1782  // FIXME: We no longer need the types from FunctionArgList; lift up and
1783  // simplify.
1784 
1785  ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
1786  // Flattened function arguments.
1788  FnArgs.reserve(IRFunctionArgs.totalIRArgs());
1789  for (auto &Arg : Fn->args()) {
1790  FnArgs.push_back(&Arg);
1791  }
1792  assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
1793 
1794  // If we're using inalloca, all the memory arguments are GEPs off of the last
1795  // parameter, which is a pointer to the complete memory area.
1796  llvm::Value *ArgStruct = nullptr;
1797  if (IRFunctionArgs.hasInallocaArg()) {
1798  ArgStruct = FnArgs[IRFunctionArgs.getInallocaArgNo()];
1799  assert(ArgStruct->getType() == FI.getArgStruct()->getPointerTo());
1800  }
1801 
1802  // Name the struct return parameter.
1803  if (IRFunctionArgs.hasSRetArg()) {
1804  auto AI = FnArgs[IRFunctionArgs.getSRetArgNo()];
1805  AI->setName("agg.result");
1806  AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1,
1807  llvm::Attribute::NoAlias));
1808  }
1809 
1810  // Track if we received the parameter as a pointer (indirect, byval, or
1811  // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
1812  // into a local alloca for us.
1813  enum ValOrPointer { HaveValue = 0, HavePointer = 1 };
1814  typedef llvm::PointerIntPair<llvm::Value *, 1> ValueAndIsPtr;
1816  ArgVals.reserve(Args.size());
1817 
1818  // Create a pointer value for every parameter declaration. This usually
1819  // entails copying one or more LLVM IR arguments into an alloca. Don't push
1820  // any cleanups or do anything that might unwind. We do that separately, so
1821  // we can push the cleanups in the correct order for the ABI.
1822  assert(FI.arg_size() == Args.size() &&
1823  "Mismatch between function signature & arguments.");
1824  unsigned ArgNo = 0;
1826  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1827  i != e; ++i, ++info_it, ++ArgNo) {
1828  const VarDecl *Arg = *i;
1829  QualType Ty = info_it->type;
1830  const ABIArgInfo &ArgI = info_it->info;
1831 
1832  bool isPromoted =
1833  isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
1834 
1835  unsigned FirstIRArg, NumIRArgs;
1836  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1837 
1838  switch (ArgI.getKind()) {
1839  case ABIArgInfo::InAlloca: {
1840  assert(NumIRArgs == 0);
1841  llvm::Value *V =
1842  Builder.CreateStructGEP(FI.getArgStruct(), ArgStruct,
1843  ArgI.getInAllocaFieldIndex(), Arg->getName());
1844  ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
1845  break;
1846  }
1847 
1848  case ABIArgInfo::Indirect: {
1849  assert(NumIRArgs == 1);
1850  llvm::Value *V = FnArgs[FirstIRArg];
1851 
1852  if (!hasScalarEvaluationKind(Ty)) {
1853  // Aggregates and complex variables are accessed by reference. All we
1854  // need to do is realign the value, if requested
1855  if (ArgI.getIndirectRealign()) {
1856  llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
1857 
1858  // Copy from the incoming argument pointer to the temporary with the
1859  // appropriate alignment.
1860  //
1861  // FIXME: We should have a common utility for generating an aggregate
1862  // copy.
1863  llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
1865  llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
1866  llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
1867  Builder.CreateMemCpy(Dst,
1868  Src,
1869  llvm::ConstantInt::get(IntPtrTy,
1870  Size.getQuantity()),
1871  ArgI.getIndirectAlign(),
1872  false);
1873  V = AlignedTemp;
1874  }
1875  ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
1876  } else {
1877  // Load scalar value from indirect argument.
1878  V = EmitLoadOfScalar(V, false, ArgI.getIndirectAlign(), Ty,
1879  Arg->getLocStart());
1880 
1881  if (isPromoted)
1882  V = emitArgumentDemotion(*this, Arg, V);
1883  ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
1884  }
1885  break;
1886  }
1887 
1888  case ABIArgInfo::Extend:
1889  case ABIArgInfo::Direct: {
1890 
1891  // If we have the trivial case, handle it with no muss and fuss.
1892  if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
1893  ArgI.getCoerceToType() == ConvertType(Ty) &&
1894  ArgI.getDirectOffset() == 0) {
1895  assert(NumIRArgs == 1);
1896  auto AI = FnArgs[FirstIRArg];
1897  llvm::Value *V = AI;
1898 
1899  if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
1900  if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
1901  PVD->getFunctionScopeIndex()))
1902  AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1903  AI->getArgNo() + 1,
1904  llvm::Attribute::NonNull));
1905 
1906  QualType OTy = PVD->getOriginalType();
1907  if (const auto *ArrTy =
1908  getContext().getAsConstantArrayType(OTy)) {
1909  // A C99 array parameter declaration with the static keyword also
1910  // indicates dereferenceability, and if the size is constant we can
1911  // use the dereferenceable attribute (which requires the size in
1912  // bytes).
1913  if (ArrTy->getSizeModifier() == ArrayType::Static) {
1914  QualType ETy = ArrTy->getElementType();
1915  uint64_t ArrSize = ArrTy->getSize().getZExtValue();
1916  if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
1917  ArrSize) {
1918  llvm::AttrBuilder Attrs;
1919  Attrs.addDereferenceableAttr(
1920  getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
1921  AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1922  AI->getArgNo() + 1, Attrs));
1923  } else if (getContext().getTargetAddressSpace(ETy) == 0) {
1924  AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1925  AI->getArgNo() + 1,
1926  llvm::Attribute::NonNull));
1927  }
1928  }
1929  } else if (const auto *ArrTy =
1931  // For C99 VLAs with the static keyword, we don't know the size so
1932  // we can't use the dereferenceable attribute, but in addrspace(0)
1933  // we know that it must be nonnull.
1934  if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
1935  !getContext().getTargetAddressSpace(ArrTy->getElementType()))
1936  AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1937  AI->getArgNo() + 1,
1938  llvm::Attribute::NonNull));
1939  }
1940 
1941  const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
1942  if (!AVAttr)
1943  if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
1944  AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
1945  if (AVAttr) {
1946  llvm::Value *AlignmentValue =
1947  EmitScalarExpr(AVAttr->getAlignment());
1948  llvm::ConstantInt *AlignmentCI =
1949  cast<llvm::ConstantInt>(AlignmentValue);
1950  unsigned Alignment =
1951  std::min((unsigned) AlignmentCI->getZExtValue(),
1952  +llvm::Value::MaximumAlignment);
1953 
1954  llvm::AttrBuilder Attrs;
1955  Attrs.addAlignmentAttr(Alignment);
1956  AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1957  AI->getArgNo() + 1, Attrs));
1958  }
1959  }
1960 
1961  if (Arg->getType().isRestrictQualified())
1962  AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1963  AI->getArgNo() + 1,
1964  llvm::Attribute::NoAlias));
1965 
1966  // Ensure the argument is the correct type.
1967  if (V->getType() != ArgI.getCoerceToType())
1968  V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
1969 
1970  if (isPromoted)
1971  V = emitArgumentDemotion(*this, Arg, V);
1972 
1973  if (const CXXMethodDecl *MD =
1974  dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) {
1975  if (MD->isVirtual() && Arg == CXXABIThisDecl)
1976  V = CGM.getCXXABI().
1977  adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V);
1978  }
1979 
1980  // Because of merging of function types from multiple decls it is
1981  // possible for the type of an argument to not match the corresponding
1982  // type in the function type. Since we are codegening the callee
1983  // in here, add a cast to the argument type.
1984  llvm::Type *LTy = ConvertType(Arg->getType());
1985  if (V->getType() != LTy)
1986  V = Builder.CreateBitCast(V, LTy);
1987 
1988  ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
1989  break;
1990  }
1991 
1992  llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
1993 
1994  // The alignment we need to use is the max of the requested alignment for
1995  // the argument plus the alignment required by our access code below.
1996  unsigned AlignmentToUse =
1997  CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType());
1998  AlignmentToUse = std::max(AlignmentToUse,
1999  (unsigned)getContext().getDeclAlign(Arg).getQuantity());
2000 
2001  Alloca->setAlignment(AlignmentToUse);
2002  llvm::Value *V = Alloca;
2003  llvm::Value *Ptr = V; // Pointer to store into.
2004  CharUnits PtrAlign = CharUnits::fromQuantity(AlignmentToUse);
2005 
2006  // If the value is offset in memory, apply the offset now.
2007  if (unsigned Offs = ArgI.getDirectOffset()) {
2008  Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
2009  Ptr = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), Ptr, Offs);
2010  Ptr = Builder.CreateBitCast(Ptr,
2011  llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
2012  PtrAlign = PtrAlign.alignmentAtOffset(CharUnits::fromQuantity(Offs));
2013  }
2014 
2015  // Fast-isel and the optimizer generally like scalar values better than
2016  // FCAs, so we flatten them if this is safe to do for this argument.
2017  llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2018  if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2019  STy->getNumElements() > 1) {
2020  uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2021  llvm::Type *DstTy =
2022  cast<llvm::PointerType>(Ptr->getType())->getElementType();
2023  uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2024 
2025  if (SrcSize <= DstSize) {
2026  Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
2027 
2028  assert(STy->getNumElements() == NumIRArgs);
2029  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2030  auto AI = FnArgs[FirstIRArg + i];
2031  AI->setName(Arg->getName() + ".coerce" + Twine(i));
2032  llvm::Value *EltPtr = Builder.CreateConstGEP2_32(STy, Ptr, 0, i);
2033  Builder.CreateStore(AI, EltPtr);
2034  }
2035  } else {
2036  llvm::AllocaInst *TempAlloca =
2037  CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
2038  TempAlloca->setAlignment(AlignmentToUse);
2039  llvm::Value *TempV = TempAlloca;
2040 
2041  assert(STy->getNumElements() == NumIRArgs);
2042  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2043  auto AI = FnArgs[FirstIRArg + i];
2044  AI->setName(Arg->getName() + ".coerce" + Twine(i));
2045  llvm::Value *EltPtr =
2046  Builder.CreateConstGEP2_32(ArgI.getCoerceToType(), TempV, 0, i);
2047  Builder.CreateStore(AI, EltPtr);
2048  }
2049 
2050  Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
2051  }
2052  } else {
2053  // Simple case, just do a coerced store of the argument into the alloca.
2054  assert(NumIRArgs == 1);
2055  auto AI = FnArgs[FirstIRArg];
2056  AI->setName(Arg->getName() + ".coerce");
2057  CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, PtrAlign, *this);
2058  }
2059 
2060 
2061  // Match to what EmitParmDecl is expecting for this type.
2063  V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty, Arg->getLocStart());
2064  if (isPromoted)
2065  V = emitArgumentDemotion(*this, Arg, V);
2066  ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
2067  } else {
2068  ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
2069  }
2070  break;
2071  }
2072 
2073  case ABIArgInfo::Expand: {
2074  // If this structure was expanded into multiple arguments then
2075  // we need to create a temporary and reconstruct it from the
2076  // arguments.
2077  llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
2078  CharUnits Align = getContext().getDeclAlign(Arg);
2079  Alloca->setAlignment(Align.getQuantity());
2080  LValue LV = MakeAddrLValue(Alloca, Ty, Align);
2081  ArgVals.push_back(ValueAndIsPtr(Alloca, HavePointer));
2082 
2083  auto FnArgIter = FnArgs.begin() + FirstIRArg;
2084  ExpandTypeFromArgs(Ty, LV, FnArgIter);
2085  assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2086  for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2087  auto AI = FnArgs[FirstIRArg + i];
2088  AI->setName(Arg->getName() + "." + Twine(i));
2089  }
2090  break;
2091  }
2092 
2093  case ABIArgInfo::Ignore:
2094  assert(NumIRArgs == 0);
2095  // Initialize the local variable appropriately.
2096  if (!hasScalarEvaluationKind(Ty)) {
2097  ArgVals.push_back(ValueAndIsPtr(CreateMemTemp(Ty), HavePointer));
2098  } else {
2099  llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2100  ArgVals.push_back(ValueAndIsPtr(U, HaveValue));
2101  }
2102  break;
2103  }
2104  }
2105 
2106  if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2107  for (int I = Args.size() - 1; I >= 0; --I)
2108  EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(),
2109  I + 1);
2110  } else {
2111  for (unsigned I = 0, E = Args.size(); I != E; ++I)
2112  EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(),
2113  I + 1);
2114  }
2115 }
2116 
2117 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2118  while (insn->use_empty()) {
2119  llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2120  if (!bitcast) return;
2121 
2122  // This is "safe" because we would have used a ConstantExpr otherwise.
2123  insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2124  bitcast->eraseFromParent();
2125  }
2126 }
2127 
2128 /// Try to emit a fused autorelease of a return result.
2130  llvm::Value *result) {
2131  // We must be immediately followed the cast.
2132  llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2133  if (BB->empty()) return nullptr;
2134  if (&BB->back() != result) return nullptr;
2135 
2136  llvm::Type *resultType = result->getType();
2137 
2138  // result is in a BasicBlock and is therefore an Instruction.
2139  llvm::Instruction *generator = cast<llvm::Instruction>(result);
2140 
2142 
2143  // Look for:
2144  // %generator = bitcast %type1* %generator2 to %type2*
2145  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2146  // We would have emitted this as a constant if the operand weren't
2147  // an Instruction.
2148  generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2149 
2150  // Require the generator to be immediately followed by the cast.
2151  if (generator->getNextNode() != bitcast)
2152  return nullptr;
2153 
2154  insnsToKill.push_back(bitcast);
2155  }
2156 
2157  // Look for:
2158  // %generator = call i8* @objc_retain(i8* %originalResult)
2159  // or
2160  // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2161  llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2162  if (!call) return nullptr;
2163 
2164  bool doRetainAutorelease;
2165 
2166  if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
2167  doRetainAutorelease = true;
2168  } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
2170  doRetainAutorelease = false;
2171 
2172  // If we emitted an assembly marker for this call (and the
2173  // ARCEntrypoints field should have been set if so), go looking
2174  // for that call. If we can't find it, we can't do this
2175  // optimization. But it should always be the immediately previous
2176  // instruction, unless we needed bitcasts around the call.
2178  llvm::Instruction *prev = call->getPrevNode();
2179  assert(prev);
2180  if (isa<llvm::BitCastInst>(prev)) {
2181  prev = prev->getPrevNode();
2182  assert(prev);
2183  }
2184  assert(isa<llvm::CallInst>(prev));
2185  assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2187  insnsToKill.push_back(prev);
2188  }
2189  } else {
2190  return nullptr;
2191  }
2192 
2193  result = call->getArgOperand(0);
2194  insnsToKill.push_back(call);
2195 
2196  // Keep killing bitcasts, for sanity. Note that we no longer care
2197  // about precise ordering as long as there's exactly one use.
2198  while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2199  if (!bitcast->hasOneUse()) break;
2200  insnsToKill.push_back(bitcast);
2201  result = bitcast->getOperand(0);
2202  }
2203 
2204  // Delete all the unnecessary instructions, from latest to earliest.
2206  i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
2207  (*i)->eraseFromParent();
2208 
2209  // Do the fused retain/autorelease if we were asked to.
2210  if (doRetainAutorelease)
2211  result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2212 
2213  // Cast back to the result type.
2214  return CGF.Builder.CreateBitCast(result, resultType);
2215 }
2216 
2217 /// If this is a +1 of the value of an immutable 'self', remove it.
2219  llvm::Value *result) {
2220  // This is only applicable to a method with an immutable 'self'.
2221  const ObjCMethodDecl *method =
2222  dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2223  if (!method) return nullptr;
2224  const VarDecl *self = method->getSelfDecl();
2225  if (!self->getType().isConstQualified()) return nullptr;
2226 
2227  // Look for a retain call.
2228  llvm::CallInst *retainCall =
2229  dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2230  if (!retainCall ||
2231  retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain)
2232  return nullptr;
2233 
2234  // Look for an ordinary load of 'self'.
2235  llvm::Value *retainedValue = retainCall->getArgOperand(0);
2236  llvm::LoadInst *load =
2237  dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2238  if (!load || load->isAtomic() || load->isVolatile() ||
2239  load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
2240  return nullptr;
2241 
2242  // Okay! Burn it all down. This relies for correctness on the
2243  // assumption that the retain is emitted as part of the return and
2244  // that thereafter everything is used "linearly".
2245  llvm::Type *resultType = result->getType();
2246  eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2247  assert(retainCall->use_empty());
2248  retainCall->eraseFromParent();
2249  eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2250 
2251  return CGF.Builder.CreateBitCast(load, resultType);
2252 }
2253 
2254 /// Emit an ARC autorelease of the result of a function.
2255 ///
2256 /// \return the value to actually return from the function
2258  llvm::Value *result) {
2259  // If we're returning 'self', kill the initial retain. This is a
2260  // heuristic attempt to "encourage correctness" in the really unfortunate
2261  // case where we have a return of self during a dealloc and we desperately
2262  // need to avoid the possible autorelease.
2263  if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2264  return self;
2265 
2266  // At -O0, try to emit a fused retain/autorelease.
2267  if (CGF.shouldUseFusedARCCalls())
2268  if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2269  return fused;
2270 
2271  return CGF.EmitARCAutoreleaseReturnValue(result);
2272 }
2273 
2274 /// Heuristically search for a dominating store to the return-value slot.
2275 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2276  // If there are multiple uses of the return-value slot, just check
2277  // for something immediately preceding the IP. Sometimes this can
2278  // happen with how we generate implicit-returns; it can also happen
2279  // with noreturn cleanups.
2280  if (!CGF.ReturnValue->hasOneUse()) {
2281  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2282  if (IP->empty()) return nullptr;
2283  llvm::Instruction *I = &IP->back();
2284 
2285  // Skip lifetime markers
2286  for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2287  IE = IP->rend();
2288  II != IE; ++II) {
2289  if (llvm::IntrinsicInst *Intrinsic =
2290  dyn_cast<llvm::IntrinsicInst>(&*II)) {
2291  if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2292  const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2293  ++II;
2294  if (II == IE)
2295  break;
2296  if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2297  continue;
2298  }
2299  }
2300  I = &*II;
2301  break;
2302  }
2303 
2304  llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(I);
2305  if (!store) return nullptr;
2306  if (store->getPointerOperand() != CGF.ReturnValue) return nullptr;
2307  assert(!store->isAtomic() && !store->isVolatile()); // see below
2308  return store;
2309  }
2310 
2311  llvm::StoreInst *store =
2312  dyn_cast<llvm::StoreInst>(CGF.ReturnValue->user_back());
2313  if (!store) return nullptr;
2314 
2315  // These aren't actually possible for non-coerced returns, and we
2316  // only care about non-coerced returns on this code path.
2317  assert(!store->isAtomic() && !store->isVolatile());
2318 
2319  // Now do a first-and-dirty dominance check: just walk up the
2320  // single-predecessors chain from the current insertion point.
2321  llvm::BasicBlock *StoreBB = store->getParent();
2322  llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2323  while (IP != StoreBB) {
2324  if (!(IP = IP->getSinglePredecessor()))
2325  return nullptr;
2326  }
2327 
2328  // Okay, the store's basic block dominates the insertion point; we
2329  // can do our thing.
2330  return store;
2331 }
2332 
2334  bool EmitRetDbgLoc,
2335  SourceLocation EndLoc) {
2336  if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2337  // Naked functions don't have epilogues.
2338  Builder.CreateUnreachable();
2339  return;
2340  }
2341 
2342  // Functions with no result always return void.
2343  if (!ReturnValue) {
2344  Builder.CreateRetVoid();
2345  return;
2346  }
2347 
2348  llvm::DebugLoc RetDbgLoc;
2349  llvm::Value *RV = nullptr;
2350  QualType RetTy = FI.getReturnType();
2351  const ABIArgInfo &RetAI = FI.getReturnInfo();
2352 
2353  switch (RetAI.getKind()) {
2354  case ABIArgInfo::InAlloca:
2355  // Aggregrates get evaluated directly into the destination. Sometimes we
2356  // need to return the sret value in a register, though.
2357  assert(hasAggregateEvaluationKind(RetTy));
2358  if (RetAI.getInAllocaSRet()) {
2359  llvm::Function::arg_iterator EI = CurFn->arg_end();
2360  --EI;
2361  llvm::Value *ArgStruct = EI;
2362  llvm::Value *SRet = Builder.CreateStructGEP(
2363  nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2364  RV = Builder.CreateLoad(SRet, "sret");
2365  }
2366  break;
2367 
2368  case ABIArgInfo::Indirect: {
2369  auto AI = CurFn->arg_begin();
2370  if (RetAI.isSRetAfterThis())
2371  ++AI;
2372  switch (getEvaluationKind(RetTy)) {
2373  case TEK_Complex: {
2374  ComplexPairTy RT =
2375  EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy),
2376  EndLoc);
2377  EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(AI, RetTy),
2378  /*isInit*/ true);
2379  break;
2380  }
2381  case TEK_Aggregate:
2382  // Do nothing; aggregrates get evaluated directly into the destination.
2383  break;
2384  case TEK_Scalar:
2385  EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2386  MakeNaturalAlignAddrLValue(AI, RetTy),
2387  /*isInit*/ true);
2388  break;
2389  }
2390  break;
2391  }
2392 
2393  case ABIArgInfo::Extend:
2394  case ABIArgInfo::Direct:
2395  if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2396  RetAI.getDirectOffset() == 0) {
2397  // The internal return value temp always will have pointer-to-return-type
2398  // type, just do a load.
2399 
2400  // If there is a dominating store to ReturnValue, we can elide
2401  // the load, zap the store, and usually zap the alloca.
2402  if (llvm::StoreInst *SI =
2404  // Reuse the debug location from the store unless there is
2405  // cleanup code to be emitted between the store and return
2406  // instruction.
2407  if (EmitRetDbgLoc && !AutoreleaseResult)
2408  RetDbgLoc = SI->getDebugLoc();
2409  // Get the stored value and nuke the now-dead store.
2410  RV = SI->getValueOperand();
2411  SI->eraseFromParent();
2412 
2413  // If that was the only use of the return value, nuke it as well now.
2414  if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
2415  cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
2416  ReturnValue = nullptr;
2417  }
2418 
2419  // Otherwise, we have to do a simple load.
2420  } else {
2421  RV = Builder.CreateLoad(ReturnValue);
2422  }
2423  } else {
2424  llvm::Value *V = ReturnValue;
2425  CharUnits Align = getContext().getTypeAlignInChars(RetTy);
2426  // If the value is offset in memory, apply the offset now.
2427  if (unsigned Offs = RetAI.getDirectOffset()) {
2428  V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
2429  V = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), V, Offs);
2430  V = Builder.CreateBitCast(V,
2431  llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
2432  Align = Align.alignmentAtOffset(CharUnits::fromQuantity(Offs));
2433  }
2434 
2435  RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), Align, *this);
2436  }
2437 
2438  // In ARC, end functions that return a retainable type with a call
2439  // to objc_autoreleaseReturnValue.
2440  if (AutoreleaseResult) {
2441  assert(getLangOpts().ObjCAutoRefCount &&
2442  !FI.isReturnsRetained() &&
2443  RetTy->isObjCRetainableType());
2444  RV = emitAutoreleaseOfResult(*this, RV);
2445  }
2446 
2447  break;
2448 
2449  case ABIArgInfo::Ignore:
2450  break;
2451 
2452  case ABIArgInfo::Expand:
2453  llvm_unreachable("Invalid ABI kind for return argument");
2454  }
2455 
2456  llvm::Instruction *Ret;
2457  if (RV) {
2458  if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) {
2459  if (auto RetNNAttr = CurGD.getDecl()->getAttr<ReturnsNonNullAttr>()) {
2460  SanitizerScope SanScope(this);
2461  llvm::Value *Cond = Builder.CreateICmpNE(
2462  RV, llvm::Constant::getNullValue(RV->getType()));
2463  llvm::Constant *StaticData[] = {
2464  EmitCheckSourceLocation(EndLoc),
2465  EmitCheckSourceLocation(RetNNAttr->getLocation()),
2466  };
2467  EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute),
2468  "nonnull_return", StaticData, None);
2469  }
2470  }
2471  Ret = Builder.CreateRet(RV);
2472  } else {
2473  Ret = Builder.CreateRetVoid();
2474  }
2475 
2476  if (RetDbgLoc)
2477  Ret->setDebugLoc(std::move(RetDbgLoc));
2478 }
2479 
2481  const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2482  return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
2483 }
2484 
2486  // FIXME: Generate IR in one pass, rather than going back and fixing up these
2487  // placeholders.
2488  llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
2489  llvm::Value *Placeholder =
2490  llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo());
2491  Placeholder = CGF.Builder.CreateLoad(Placeholder);
2492  return AggValueSlot::forAddr(Placeholder, CharUnits::Zero(),
2493  Ty.getQualifiers(),
2497 }
2498 
2500  const VarDecl *param,
2501  SourceLocation loc) {
2502  // StartFunction converted the ABI-lowered parameter(s) into a
2503  // local alloca. We need to turn that into an r-value suitable
2504  // for EmitCall.
2505  llvm::Value *local = GetAddrOfLocalVar(param);
2506 
2507  QualType type = param->getType();
2508 
2509  // For the most part, we just need to load the alloca, except:
2510  // 1) aggregate r-values are actually pointers to temporaries, and
2511  // 2) references to non-scalars are pointers directly to the aggregate.
2512  // I don't know why references to scalars are different here.
2513  if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
2514  if (!hasScalarEvaluationKind(ref->getPointeeType()))
2515  return args.add(RValue::getAggregate(local), type);
2516 
2517  // Locals which are references to scalars are represented
2518  // with allocas holding the pointer.
2519  return args.add(RValue::get(Builder.CreateLoad(local)), type);
2520  }
2521 
2522  assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
2523  "cannot emit delegate call arguments for inalloca arguments!");
2524 
2525  args.add(convertTempToRValue(local, type, loc), type);
2526 }
2527 
2528 static bool isProvablyNull(llvm::Value *addr) {
2529  return isa<llvm::ConstantPointerNull>(addr);
2530 }
2531 
2532 static bool isProvablyNonNull(llvm::Value *addr) {
2533  return isa<llvm::AllocaInst>(addr);
2534 }
2535 
2536 /// Emit the actual writing-back of a writeback.
2538  const CallArgList::Writeback &writeback) {
2539  const LValue &srcLV = writeback.Source;
2540  llvm::Value *srcAddr = srcLV.getAddress();
2541  assert(!isProvablyNull(srcAddr) &&
2542  "shouldn't have writeback for provably null argument");
2543 
2544  llvm::BasicBlock *contBB = nullptr;
2545 
2546  // If the argument wasn't provably non-null, we need to null check
2547  // before doing the store.
2548  bool provablyNonNull = isProvablyNonNull(srcAddr);
2549  if (!provablyNonNull) {
2550  llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
2551  contBB = CGF.createBasicBlock("icr.done");
2552 
2553  llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
2554  CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
2555  CGF.EmitBlock(writebackBB);
2556  }
2557 
2558  // Load the value to writeback.
2559  llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
2560 
2561  // Cast it back, in case we're writing an id to a Foo* or something.
2562  value = CGF.Builder.CreateBitCast(value,
2563  cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
2564  "icr.writeback-cast");
2565 
2566  // Perform the writeback.
2567 
2568  // If we have a "to use" value, it's something we need to emit a use
2569  // of. This has to be carefully threaded in: if it's done after the
2570  // release it's potentially undefined behavior (and the optimizer
2571  // will ignore it), and if it happens before the retain then the
2572  // optimizer could move the release there.
2573  if (writeback.ToUse) {
2574  assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
2575 
2576  // Retain the new value. No need to block-copy here: the block's
2577  // being passed up the stack.
2578  value = CGF.EmitARCRetainNonBlock(value);
2579 
2580  // Emit the intrinsic use here.
2581  CGF.EmitARCIntrinsicUse(writeback.ToUse);
2582 
2583  // Load the old value (primitively).
2584  llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
2585 
2586  // Put the new value in place (primitively).
2587  CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
2588 
2589  // Release the old value.
2590  CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
2591 
2592  // Otherwise, we can just do a normal lvalue store.
2593  } else {
2594  CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
2595  }
2596 
2597  // Jump to the continuation block.
2598  if (!provablyNonNull)
2599  CGF.EmitBlock(contBB);
2600 }
2601 
2603  const CallArgList &args) {
2604  for (const auto &I : args.writebacks())
2605  emitWriteback(CGF, I);
2606 }
2607 
2609  const CallArgList &CallArgs) {
2612  CallArgs.getCleanupsToDeactivate();
2613  // Iterate in reverse to increase the likelihood of popping the cleanup.
2615  I = Cleanups.rbegin(), E = Cleanups.rend(); I != E; ++I) {
2616  CGF.DeactivateCleanupBlock(I->Cleanup, I->IsActiveIP);
2617  I->IsActiveIP->eraseFromParent();
2618  }
2619 }
2620 
2621 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
2622  if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
2623  if (uop->getOpcode() == UO_AddrOf)
2624  return uop->getSubExpr();
2625  return nullptr;
2626 }
2627 
2628 /// Emit an argument that's being passed call-by-writeback. That is,
2629 /// we are passing the address of
2631  const ObjCIndirectCopyRestoreExpr *CRE) {
2632  LValue srcLV;
2633 
2634  // Make an optimistic effort to emit the address as an l-value.
2635  // This can fail if the argument expression is more complicated.
2636  if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
2637  srcLV = CGF.EmitLValue(lvExpr);
2638 
2639  // Otherwise, just emit it as a scalar.
2640  } else {
2641  llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
2642 
2643  QualType srcAddrType =
2644  CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
2645  srcLV = CGF.MakeNaturalAlignAddrLValue(srcAddr, srcAddrType);
2646  }
2647  llvm::Value *srcAddr = srcLV.getAddress();
2648 
2649  // The dest and src types don't necessarily match in LLVM terms
2650  // because of the crazy ObjC compatibility rules.
2651 
2652  llvm::PointerType *destType =
2653  cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
2654 
2655  // If the address is a constant null, just pass the appropriate null.
2656  if (isProvablyNull(srcAddr)) {
2657  args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
2658  CRE->getType());
2659  return;
2660  }
2661 
2662  // Create the temporary.
2663  llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
2664  "icr.temp");
2665  // Loading an l-value can introduce a cleanup if the l-value is __weak,
2666  // and that cleanup will be conditional if we can't prove that the l-value
2667  // isn't null, so we need to register a dominating point so that the cleanups
2668  // system will make valid IR.
2670 
2671  // Zero-initialize it if we're not doing a copy-initialization.
2672  bool shouldCopy = CRE->shouldCopy();
2673  if (!shouldCopy) {
2674  llvm::Value *null =
2675  llvm::ConstantPointerNull::get(
2676  cast<llvm::PointerType>(destType->getElementType()));
2677  CGF.Builder.CreateStore(null, temp);
2678  }
2679 
2680  llvm::BasicBlock *contBB = nullptr;
2681  llvm::BasicBlock *originBB = nullptr;
2682 
2683  // If the address is *not* known to be non-null, we need to switch.
2684  llvm::Value *finalArgument;
2685 
2686  bool provablyNonNull = isProvablyNonNull(srcAddr);
2687  if (provablyNonNull) {
2688  finalArgument = temp;
2689  } else {
2690  llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
2691 
2692  finalArgument = CGF.Builder.CreateSelect(isNull,
2693  llvm::ConstantPointerNull::get(destType),
2694  temp, "icr.argument");
2695 
2696  // If we need to copy, then the load has to be conditional, which
2697  // means we need control flow.
2698  if (shouldCopy) {
2699  originBB = CGF.Builder.GetInsertBlock();
2700  contBB = CGF.createBasicBlock("icr.cont");
2701  llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
2702  CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
2703  CGF.EmitBlock(copyBB);
2704  condEval.begin(CGF);
2705  }
2706  }
2707 
2708  llvm::Value *valueToUse = nullptr;
2709 
2710  // Perform a copy if necessary.
2711  if (shouldCopy) {
2712  RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
2713  assert(srcRV.isScalar());
2714 
2715  llvm::Value *src = srcRV.getScalarVal();
2716  src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
2717  "icr.cast");
2718 
2719  // Use an ordinary store, not a store-to-lvalue.
2720  CGF.Builder.CreateStore(src, temp);
2721 
2722  // If optimization is enabled, and the value was held in a
2723  // __strong variable, we need to tell the optimizer that this
2724  // value has to stay alive until we're doing the store back.
2725  // This is because the temporary is effectively unretained,
2726  // and so otherwise we can violate the high-level semantics.
2727  if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
2729  valueToUse = src;
2730  }
2731  }
2732 
2733  // Finish the control flow if we needed it.
2734  if (shouldCopy && !provablyNonNull) {
2735  llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
2736  CGF.EmitBlock(contBB);
2737 
2738  // Make a phi for the value to intrinsically use.
2739  if (valueToUse) {
2740  llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
2741  "icr.to-use");
2742  phiToUse->addIncoming(valueToUse, copyBB);
2743  phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
2744  originBB);
2745  valueToUse = phiToUse;
2746  }
2747 
2748  condEval.end(CGF);
2749  }
2750 
2751  args.addWriteback(srcLV, temp, valueToUse);
2752  args.add(RValue::get(finalArgument), CRE->getType());
2753 }
2754 
2756  assert(!StackBase && !StackCleanup.isValid());
2757 
2758  // Save the stack.
2759  llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
2760  StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
2761 
2762  // Control gets really tied up in landing pads, so we have to spill the
2763  // stacksave to an alloca to avoid violating SSA form.
2764  // TODO: This is dead if we never emit the cleanup. We should create the
2765  // alloca and store lazily on the first cleanup emission.
2766  StackBaseMem = CGF.CreateTempAlloca(CGF.Int8PtrTy, "inalloca.spmem");
2767  CGF.Builder.CreateStore(StackBase, StackBaseMem);
2768  CGF.pushStackRestore(EHCleanup, StackBaseMem);
2769  StackCleanup = CGF.EHStack.getInnermostEHScope();
2770  assert(StackCleanup.isValid());
2771 }
2772 
2774  if (StackBase) {
2775  CGF.DeactivateCleanupBlock(StackCleanup, StackBase);
2776  llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
2777  // We could load StackBase from StackBaseMem, but in the non-exceptional
2778  // case we can skip it.
2779  CGF.Builder.CreateCall(F, StackBase);
2780  }
2781 }
2782 
2784  SourceLocation ArgLoc,
2785  const FunctionDecl *FD,
2786  unsigned ParmNum) {
2787  if (!SanOpts.has(SanitizerKind::NonnullAttribute) || !FD)
2788  return;
2789  auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr;
2790  unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
2791  auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo);
2792  if (!NNAttr)
2793  return;
2794  SanitizerScope SanScope(this);
2795  assert(RV.isScalar());
2796  llvm::Value *V = RV.getScalarVal();
2797  llvm::Value *Cond =
2798  Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
2799  llvm::Constant *StaticData[] = {
2800  EmitCheckSourceLocation(ArgLoc),
2801  EmitCheckSourceLocation(NNAttr->getLocation()),
2802  llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
2803  };
2804  EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute),
2805  "nonnull_arg", StaticData, None);
2806 }
2807 
2809  ArrayRef<QualType> ArgTypes,
2812  const FunctionDecl *CalleeDecl,
2813  unsigned ParamsToSkip) {
2814  // We *have* to evaluate arguments from right to left in the MS C++ ABI,
2815  // because arguments are destroyed left to right in the callee.
2817  // Insert a stack save if we're going to need any inalloca args.
2818  bool HasInAllocaArgs = false;
2819  for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
2820  I != E && !HasInAllocaArgs; ++I)
2821  HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
2822  if (HasInAllocaArgs) {
2823  assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
2824  Args.allocateArgumentMemory(*this);
2825  }
2826 
2827  // Evaluate each argument.
2828  size_t CallArgsStart = Args.size();
2829  for (int I = ArgTypes.size() - 1; I >= 0; --I) {
2830  CallExpr::const_arg_iterator Arg = ArgBeg + I;
2831  EmitCallArg(Args, *Arg, ArgTypes[I]);
2832  EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], Arg->getExprLoc(),
2833  CalleeDecl, ParamsToSkip + I);
2834  }
2835 
2836  // Un-reverse the arguments we just evaluated so they match up with the LLVM
2837  // IR function.
2838  std::reverse(Args.begin() + CallArgsStart, Args.end());
2839  return;
2840  }
2841 
2842  for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
2843  CallExpr::const_arg_iterator Arg = ArgBeg + I;
2844  assert(Arg != ArgEnd);
2845  EmitCallArg(Args, *Arg, ArgTypes[I]);
2846  EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], Arg->getExprLoc(),
2847  CalleeDecl, ParamsToSkip + I);
2848  }
2849 }
2850 
2851 namespace {
2852 
2853 struct DestroyUnpassedArg : EHScopeStack::Cleanup {
2854  DestroyUnpassedArg(llvm::Value *Addr, QualType Ty)
2855  : Addr(Addr), Ty(Ty) {}
2856 
2857  llvm::Value *Addr;
2858  QualType Ty;
2859 
2860  void Emit(CodeGenFunction &CGF, Flags flags) override {
2861  const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
2862  assert(!Dtor->isTrivial());
2863  CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
2864  /*Delegating=*/false, Addr);
2865  }
2866 };
2867 
2868 }
2869 
2873  DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
2874  if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
2875  CGF.disableDebugInfo();
2876  }
2878  if (disabledDebugInfo)
2879  CGF.enableDebugInfo();
2880  }
2881 };
2882 
2884  QualType type) {
2885  DisableDebugLocationUpdates Dis(*this, E);
2886  if (const ObjCIndirectCopyRestoreExpr *CRE
2887  = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
2888  assert(getLangOpts().ObjCAutoRefCount);
2889  assert(getContext().hasSameType(E->getType(), type));
2890  return emitWritebackArg(*this, args, CRE);
2891  }
2892 
2893  assert(type->isReferenceType() == E->isGLValue() &&
2894  "reference binding to unmaterialized r-value!");
2895 
2896  if (E->isGLValue()) {
2897  assert(E->getObjectKind() == OK_Ordinary);
2898  return args.add(EmitReferenceBindingToExpr(E), type);
2899  }
2900 
2901  bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
2902 
2903  // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
2904  // However, we still have to push an EH-only cleanup in case we unwind before
2905  // we make it to the call.
2906  if (HasAggregateEvalKind &&
2908  // If we're using inalloca, use the argument memory. Otherwise, use a
2909  // temporary.
2910  AggValueSlot Slot;
2911  if (args.isUsingInAlloca())
2912  Slot = createPlaceholderSlot(*this, type);
2913  else
2914  Slot = CreateAggTemp(type, "agg.tmp");
2915 
2916  const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2917  bool DestroyedInCallee =
2918  RD && RD->hasNonTrivialDestructor() &&
2920  if (DestroyedInCallee)
2921  Slot.setExternallyDestructed();
2922 
2923  EmitAggExpr(E, Slot);
2924  RValue RV = Slot.asRValue();
2925  args.add(RV, type);
2926 
2927  if (DestroyedInCallee) {
2928  // Create a no-op GEP between the placeholder and the cleanup so we can
2929  // RAUW it successfully. It also serves as a marker of the first
2930  // instruction where the cleanup is active.
2931  pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddr(), type);
2932  // This unreachable is a temporary marker which will be removed later.
2933  llvm::Instruction *IsActive = Builder.CreateUnreachable();
2934  args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
2935  }
2936  return;
2937  }
2938 
2939  if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
2940  cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
2941  LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
2942  assert(L.isSimple());
2943  if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
2944  args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
2945  } else {
2946  // We can't represent a misaligned lvalue in the CallArgList, so copy
2947  // to an aligned temporary now.
2948  llvm::Value *tmp = CreateMemTemp(type);
2949  EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile(),
2950  L.getAlignment());
2951  args.add(RValue::getAggregate(tmp), type);
2952  }
2953  return;
2954  }
2955 
2956  args.add(EmitAnyExprToTemp(E), type);
2957 }
2958 
2959 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
2960  // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
2961  // implicitly widens null pointer constants that are arguments to varargs
2962  // functions to pointer-sized ints.
2963  if (!getTarget().getTriple().isOSWindows())
2964  return Arg->getType();
2965 
2966  if (Arg->getType()->isIntegerType() &&
2967  getContext().getTypeSize(Arg->getType()) <
2971  return getContext().getIntPtrType();
2972  }
2973 
2974  return Arg->getType();
2975 }
2976 
2977 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2978 // optimizer it can aggressively ignore unwind edges.
2979 void
2980 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
2981  if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
2982  !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
2983  Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
2985 }
2986 
2987 /// Emits a call to the given no-arguments nounwind runtime function.
2988 llvm::CallInst *
2990  const llvm::Twine &name) {
2991  return EmitNounwindRuntimeCall(callee, None, name);
2992 }
2993 
2994 /// Emits a call to the given nounwind runtime function.
2995 llvm::CallInst *
2998  const llvm::Twine &name) {
2999  llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3000  call->setDoesNotThrow();
3001  return call;
3002 }
3003 
3004 /// Emits a simple call (never an invoke) to the given no-arguments
3005 /// runtime function.
3006 llvm::CallInst *
3008  const llvm::Twine &name) {
3009  return EmitRuntimeCall(callee, None, name);
3010 }
3011 
3012 /// Emits a simple call (never an invoke) to the given runtime
3013 /// function.
3014 llvm::CallInst *
3017  const llvm::Twine &name) {
3018  llvm::CallInst *call = Builder.CreateCall(callee, args, name);
3019  call->setCallingConv(getRuntimeCC());
3020  return call;
3021 }
3022 
3023 /// Emits a call or invoke to the given noreturn runtime function.
3025  ArrayRef<llvm::Value*> args) {
3026  if (getInvokeDest()) {
3027  llvm::InvokeInst *invoke =
3028  Builder.CreateInvoke(callee,
3029  getUnreachableBlock(),
3030  getInvokeDest(),
3031  args);
3032  invoke->setDoesNotReturn();
3033  invoke->setCallingConv(getRuntimeCC());
3034  } else {
3035  llvm::CallInst *call = Builder.CreateCall(callee, args);
3036  call->setDoesNotReturn();
3037  call->setCallingConv(getRuntimeCC());
3038  Builder.CreateUnreachable();
3039  }
3040 }
3041 
3042 /// Emits a call or invoke instruction to the given nullary runtime
3043 /// function.
3044 llvm::CallSite
3046  const Twine &name) {
3047  return EmitRuntimeCallOrInvoke(callee, None, name);
3048 }
3049 
3050 /// Emits a call or invoke instruction to the given runtime function.
3051 llvm::CallSite
3054  const Twine &name) {
3055  llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
3056  callSite.setCallingConv(getRuntimeCC());
3057  return callSite;
3058 }
3059 
3060 llvm::CallSite
3062  const Twine &Name) {
3063  return EmitCallOrInvoke(Callee, None, Name);
3064 }
3065 
3066 /// Emits a call or invoke instruction to the given function, depending
3067 /// on the current state of the EH stack.
3068 llvm::CallSite
3071  const Twine &Name) {
3072  llvm::BasicBlock *InvokeDest = getInvokeDest();
3073 
3074  llvm::Instruction *Inst;
3075  if (!InvokeDest)
3076  Inst = Builder.CreateCall(Callee, Args, Name);
3077  else {
3078  llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3079  Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
3080  EmitBlock(ContBB);
3081  }
3082 
3083  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3084  // optimizer it can aggressively ignore unwind edges.
3085  if (CGM.getLangOpts().ObjCAutoRefCount)
3086  AddObjCARCExceptionMetadata(Inst);
3087 
3088  return llvm::CallSite(Inst);
3089 }
3090 
3091 /// \brief Store a non-aggregate value to an address to initialize it. For
3092 /// initialization, a non-atomic store will be used.
3094  LValue Dst) {
3095  if (Src.isScalar())
3096  CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true);
3097  else
3098  CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true);
3099 }
3100 
3101 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3102  llvm::Value *New) {
3103  DeferredReplacements.push_back(std::make_pair(Old, New));
3104 }
3105 
3107  llvm::Value *Callee,
3108  ReturnValueSlot ReturnValue,
3109  const CallArgList &CallArgs,
3110  const Decl *TargetDecl,
3111  llvm::Instruction **callOrInvoke) {
3112  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3113 
3114  // Handle struct-return functions by passing a pointer to the
3115  // location that we would like to return into.
3116  QualType RetTy = CallInfo.getReturnType();
3117  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3118 
3119  llvm::FunctionType *IRFuncTy =
3120  cast<llvm::FunctionType>(
3121  cast<llvm::PointerType>(Callee->getType())->getElementType());
3122 
3123  // If we're using inalloca, insert the allocation after the stack save.
3124  // FIXME: Do this earlier rather than hacking it in here!
3125  llvm::AllocaInst *ArgMemory = nullptr;
3126  if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3127  llvm::Instruction *IP = CallArgs.getStackBase();
3128  llvm::AllocaInst *AI;
3129  if (IP) {
3130  IP = IP->getNextNode();
3131  AI = new llvm::AllocaInst(ArgStruct, "argmem", IP);
3132  } else {
3133  AI = CreateTempAlloca(ArgStruct, "argmem");
3134  }
3135  AI->setUsedWithInAlloca(true);
3136  assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3137  ArgMemory = AI;
3138  }
3139 
3140  ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3141  SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3142 
3143  // If the call returns a temporary with struct return, create a temporary
3144  // alloca to hold the result, unless one is given to us.
3145  llvm::Value *SRetPtr = nullptr;
3146  size_t UnusedReturnSize = 0;
3147  if (RetAI.isIndirect() || RetAI.isInAlloca()) {
3148  SRetPtr = ReturnValue.getValue();
3149  if (!SRetPtr) {
3150  SRetPtr = CreateMemTemp(RetTy);
3151  if (HaveInsertPoint() && ReturnValue.isUnused()) {
3152  uint64_t size =
3153  CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3154  if (EmitLifetimeStart(size, SRetPtr))
3155  UnusedReturnSize = size;
3156  }
3157  }
3158  if (IRFunctionArgs.hasSRetArg()) {
3159  IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr;
3160  } else {
3161  llvm::Value *Addr =
3162  Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory,
3163  RetAI.getInAllocaFieldIndex());
3164  Builder.CreateStore(SRetPtr, Addr);
3165  }
3166  }
3167 
3168  assert(CallInfo.arg_size() == CallArgs.size() &&
3169  "Mismatch between function signature & arguments.");
3170  unsigned ArgNo = 0;
3171  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3172  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3173  I != E; ++I, ++info_it, ++ArgNo) {
3174  const ABIArgInfo &ArgInfo = info_it->info;
3175  RValue RV = I->RV;
3176 
3177  CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty);
3178 
3179  // Insert a padding argument to ensure proper alignment.
3180  if (IRFunctionArgs.hasPaddingArg(ArgNo))
3181  IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3182  llvm::UndefValue::get(ArgInfo.getPaddingType());
3183 
3184  unsigned FirstIRArg, NumIRArgs;
3185  std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3186 
3187  switch (ArgInfo.getKind()) {
3188  case ABIArgInfo::InAlloca: {
3189  assert(NumIRArgs == 0);
3190  assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3191  if (RV.isAggregate()) {
3192  // Replace the placeholder with the appropriate argument slot GEP.
3193  llvm::Instruction *Placeholder =
3194  cast<llvm::Instruction>(RV.getAggregateAddr());
3195  CGBuilderTy::InsertPoint IP = Builder.saveIP();
3196  Builder.SetInsertPoint(Placeholder);
3197  llvm::Value *Addr =
3198  Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory,
3199  ArgInfo.getInAllocaFieldIndex());
3200  Builder.restoreIP(IP);
3201  deferPlaceholderReplacement(Placeholder, Addr);
3202  } else {
3203  // Store the RValue into the argument struct.
3204  llvm::Value *Addr =
3205  Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory,
3206  ArgInfo.getInAllocaFieldIndex());
3207  unsigned AS = Addr->getType()->getPointerAddressSpace();
3208  llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3209  // There are some cases where a trivial bitcast is not avoidable. The
3210  // definition of a type later in a translation unit may change it's type
3211  // from {}* to (%struct.foo*)*.
3212  if (Addr->getType() != MemType)
3213  Addr = Builder.CreateBitCast(Addr, MemType);
3214  LValue argLV = MakeAddrLValue(Addr, I->Ty, TypeAlign);
3215  EmitInitStoreOfNonAggregate(*this, RV, argLV);
3216  }
3217  break;
3218  }
3219 
3220  case ABIArgInfo::Indirect: {
3221  assert(NumIRArgs == 1);
3222  if (RV.isScalar() || RV.isComplex()) {
3223  // Make a temporary alloca to pass the argument.
3224  llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
3225  if (ArgInfo.getIndirectAlign() > AI->getAlignment())
3226  AI->setAlignment(ArgInfo.getIndirectAlign());
3227  IRCallArgs[FirstIRArg] = AI;
3228 
3229  LValue argLV = MakeAddrLValue(AI, I->Ty, TypeAlign);
3230  EmitInitStoreOfNonAggregate(*this, RV, argLV);
3231  } else {
3232  // We want to avoid creating an unnecessary temporary+copy here;
3233  // however, we need one in three cases:
3234  // 1. If the argument is not byval, and we are required to copy the
3235  // source. (This case doesn't occur on any common architecture.)
3236  // 2. If the argument is byval, RV is not sufficiently aligned, and
3237  // we cannot force it to be sufficiently aligned.
3238  // 3. If the argument is byval, but RV is located in an address space
3239  // different than that of the argument (0).
3240  llvm::Value *Addr = RV.getAggregateAddr();
3241  unsigned Align = ArgInfo.getIndirectAlign();
3242  const llvm::DataLayout *TD = &CGM.getDataLayout();
3243  const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace();
3244  const unsigned ArgAddrSpace =
3245  (FirstIRArg < IRFuncTy->getNumParams()
3246  ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
3247  : 0);
3248  if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
3249  (ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align &&
3250  llvm::getOrEnforceKnownAlignment(Addr, Align, *TD) < Align) ||
3251  (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
3252  // Create an aligned temporary, and copy to it.
3253  llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
3254  if (Align > AI->getAlignment())
3255  AI->setAlignment(Align);
3256  IRCallArgs[FirstIRArg] = AI;
3257  EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
3258  } else {
3259  // Skip the extra memcpy call.
3260  IRCallArgs[FirstIRArg] = Addr;
3261  }
3262  }
3263  break;
3264  }
3265 
3266  case ABIArgInfo::Ignore:
3267  assert(NumIRArgs == 0);
3268  break;
3269 
3270  case ABIArgInfo::Extend:
3271  case ABIArgInfo::Direct: {
3272  if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
3273  ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
3274  ArgInfo.getDirectOffset() == 0) {
3275  assert(NumIRArgs == 1);
3276  llvm::Value *V;
3277  if (RV.isScalar())
3278  V = RV.getScalarVal();
3279  else
3280  V = Builder.CreateLoad(RV.getAggregateAddr());
3281 
3282  // We might have to widen integers, but we should never truncate.
3283  if (ArgInfo.getCoerceToType() != V->getType() &&
3284  V->getType()->isIntegerTy())
3285  V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
3286 
3287  // If the argument doesn't match, perform a bitcast to coerce it. This
3288  // can happen due to trivial type mismatches.
3289  if (FirstIRArg < IRFuncTy->getNumParams() &&
3290  V->getType() != IRFuncTy->getParamType(FirstIRArg))
3291  V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
3292  IRCallArgs[FirstIRArg] = V;
3293  break;
3294  }
3295 
3296  // FIXME: Avoid the conversion through memory if possible.
3297  llvm::Value *SrcPtr;
3298  CharUnits SrcAlign;
3299  if (RV.isScalar() || RV.isComplex()) {
3300  SrcPtr = CreateMemTemp(I->Ty, "coerce");
3301  SrcAlign = TypeAlign;
3302  LValue SrcLV = MakeAddrLValue(SrcPtr, I->Ty, TypeAlign);
3303  EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
3304  } else {
3305  SrcPtr = RV.getAggregateAddr();
3306  // This alignment is guaranteed by EmitCallArg.
3307  SrcAlign = TypeAlign;
3308  }
3309 
3310  // If the value is offset in memory, apply the offset now.
3311  if (unsigned Offs = ArgInfo.getDirectOffset()) {
3312  SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
3313  SrcPtr = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), SrcPtr, Offs);
3314  SrcPtr = Builder.CreateBitCast(SrcPtr,
3315  llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
3316  SrcAlign = SrcAlign.alignmentAtOffset(CharUnits::fromQuantity(Offs));
3317  }
3318 
3319  // Fast-isel and the optimizer generally like scalar values better than
3320  // FCAs, so we flatten them if this is safe to do for this argument.
3321  llvm::StructType *STy =
3322  dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
3323  if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
3324  llvm::Type *SrcTy =
3325  cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
3326  uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
3327  uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
3328 
3329  // If the source type is smaller than the destination type of the
3330  // coerce-to logic, copy the source value into a temp alloca the size
3331  // of the destination type to allow loading all of it. The bits past
3332  // the source value are left undef.
3333  if (SrcSize < DstSize) {
3334  llvm::AllocaInst *TempAlloca
3335  = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce");
3336  Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0);
3337  SrcPtr = TempAlloca;
3338  } else {
3339  SrcPtr = Builder.CreateBitCast(SrcPtr,
3340  llvm::PointerType::getUnqual(STy));
3341  }
3342 
3343  assert(NumIRArgs == STy->getNumElements());
3344  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3345  llvm::Value *EltPtr = Builder.CreateConstGEP2_32(STy, SrcPtr, 0, i);
3346  llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
3347  // We don't know what we're loading from.
3348  LI->setAlignment(1);
3349  IRCallArgs[FirstIRArg + i] = LI;
3350  }
3351  } else {
3352  // In the simple case, just pass the coerced loaded value.
3353  assert(NumIRArgs == 1);
3354  IRCallArgs[FirstIRArg] =
3355  CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
3356  SrcAlign, *this);
3357  }
3358 
3359  break;
3360  }
3361 
3362  case ABIArgInfo::Expand:
3363  unsigned IRArgPos = FirstIRArg;
3364  ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos);
3365  assert(IRArgPos == FirstIRArg + NumIRArgs);
3366  break;
3367  }
3368  }
3369 
3370  if (ArgMemory) {
3371  llvm::Value *Arg = ArgMemory;
3372  if (CallInfo.isVariadic()) {
3373  // When passing non-POD arguments by value to variadic functions, we will
3374  // end up with a variadic prototype and an inalloca call site. In such
3375  // cases, we can't do any parameter mismatch checks. Give up and bitcast
3376  // the callee.
3377  unsigned CalleeAS =
3378  cast<llvm::PointerType>(Callee->getType())->getAddressSpace();
3379  Callee = Builder.CreateBitCast(
3380  Callee, getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS));
3381  } else {
3382  llvm::Type *LastParamTy =
3383  IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
3384  if (Arg->getType() != LastParamTy) {
3385 #ifndef NDEBUG
3386  // Assert that these structs have equivalent element types.
3387  llvm::StructType *FullTy = CallInfo.getArgStruct();
3388  llvm::StructType *DeclaredTy = cast<llvm::StructType>(
3389  cast<llvm::PointerType>(LastParamTy)->getElementType());
3390  assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
3391  for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
3392  DE = DeclaredTy->element_end(),
3393  FI = FullTy->element_begin();
3394  DI != DE; ++DI, ++FI)
3395  assert(*DI == *FI);
3396 #endif
3397  Arg = Builder.CreateBitCast(Arg, LastParamTy);
3398  }
3399  }
3400  assert(IRFunctionArgs.hasInallocaArg());
3401  IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
3402  }
3403 
3404  if (!CallArgs.getCleanupsToDeactivate().empty())
3405  deactivateArgCleanupsBeforeCall(*this, CallArgs);
3406 
3407  // If the callee is a bitcast of a function to a varargs pointer to function
3408  // type, check to see if we can remove the bitcast. This handles some cases
3409  // with unprototyped functions.
3410  if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
3411  if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
3412  llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
3413  llvm::FunctionType *CurFT =
3414  cast<llvm::FunctionType>(CurPT->getElementType());
3415  llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
3416 
3417  if (CE->getOpcode() == llvm::Instruction::BitCast &&
3418  ActualFT->getReturnType() == CurFT->getReturnType() &&
3419  ActualFT->getNumParams() == CurFT->getNumParams() &&
3420  ActualFT->getNumParams() == IRCallArgs.size() &&
3421  (CurFT->isVarArg() || !ActualFT->isVarArg())) {
3422  bool ArgsMatch = true;
3423  for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
3424  if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
3425  ArgsMatch = false;
3426  break;
3427  }
3428 
3429  // Strip the cast if we can get away with it. This is a nice cleanup,
3430  // but also allows us to inline the function at -O0 if it is marked
3431  // always_inline.
3432  if (ArgsMatch)
3433  Callee = CalleeF;
3434  }
3435  }
3436 
3437  assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
3438  for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
3439  // Inalloca argument can have different type.
3440  if (IRFunctionArgs.hasInallocaArg() &&
3441  i == IRFunctionArgs.getInallocaArgNo())
3442  continue;
3443  if (i < IRFuncTy->getNumParams())
3444  assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
3445  }
3446 
3447  unsigned CallingConv;
3449  CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList,
3450  CallingConv, true);
3451  llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
3452  AttributeList);
3453 
3454  llvm::BasicBlock *InvokeDest = nullptr;
3455  if (!Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
3456  llvm::Attribute::NoUnwind) ||
3457  currentFunctionUsesSEHTry())
3458  InvokeDest = getInvokeDest();
3459 
3460  llvm::CallSite CS;
3461  if (!InvokeDest) {
3462  CS = Builder.CreateCall(Callee, IRCallArgs);
3463  } else {
3464  llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
3465  CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs);
3466  EmitBlock(Cont);
3467  }
3468  if (callOrInvoke)
3469  *callOrInvoke = CS.getInstruction();
3470 
3471  if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
3472  !CS.hasFnAttr(llvm::Attribute::NoInline))
3473  Attrs =
3474  Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
3475  llvm::Attribute::AlwaysInline);
3476 
3477  // Disable inlining inside SEH __try blocks.
3478  if (isSEHTryScope())
3479  Attrs =
3480  Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
3481  llvm::Attribute::NoInline);
3482 
3483  CS.setAttributes(Attrs);
3484  CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
3485 
3486  // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3487  // optimizer it can aggressively ignore unwind edges.
3488  if (CGM.getLangOpts().ObjCAutoRefCount)
3489  AddObjCARCExceptionMetadata(CS.getInstruction());
3490 
3491  // If the call doesn't return, finish the basic block and clear the
3492  // insertion point; this allows the rest of IRgen to discard
3493  // unreachable code.
3494  if (CS.doesNotReturn()) {
3495  if (UnusedReturnSize)
3496  EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
3497  SRetPtr);
3498 
3499  Builder.CreateUnreachable();
3500  Builder.ClearInsertionPoint();
3501 
3502  // FIXME: For now, emit a dummy basic block because expr emitters in
3503  // generally are not ready to handle emitting expressions at unreachable
3504  // points.
3505  EnsureInsertPoint();
3506 
3507  // Return a reasonable RValue.
3508  return GetUndefRValue(RetTy);
3509  }
3510 
3511  llvm::Instruction *CI = CS.getInstruction();
3512  if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
3513  CI->setName("call");
3514 
3515  // Emit any writebacks immediately. Arguably this should happen
3516  // after any return-value munging.
3517  if (CallArgs.hasWritebacks())
3518  emitWritebacks(*this, CallArgs);
3519 
3520  // The stack cleanup for inalloca arguments has to run out of the normal
3521  // lexical order, so deactivate it and run it manually here.
3522  CallArgs.freeArgumentMemory(*this);
3523 
3524  RValue Ret = [&] {
3525  switch (RetAI.getKind()) {
3526  case ABIArgInfo::InAlloca:
3527  case ABIArgInfo::Indirect: {
3528  RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
3529  if (UnusedReturnSize)
3530  EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
3531  SRetPtr);
3532  return ret;
3533  }
3534 
3535  case ABIArgInfo::Ignore:
3536  // If we are ignoring an argument that had a result, make sure to
3537  // construct the appropriate return value for our caller.
3538  return GetUndefRValue(RetTy);
3539 
3540  case ABIArgInfo::Extend:
3541  case ABIArgInfo::Direct: {
3542  llvm::Type *RetIRTy = ConvertType(RetTy);
3543  if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
3544  switch (getEvaluationKind(RetTy)) {
3545  case TEK_Complex: {
3546  llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
3547  llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
3548  return RValue::getComplex(std::make_pair(Real, Imag));
3549  }
3550  case TEK_Aggregate: {
3551  llvm::Value *DestPtr = ReturnValue.getValue();
3552  bool DestIsVolatile = ReturnValue.isVolatile();
3553  CharUnits DestAlign = getContext().getTypeAlignInChars(RetTy);
3554 
3555  if (!DestPtr) {
3556  DestPtr = CreateMemTemp(RetTy, "agg.tmp");
3557  DestIsVolatile = false;
3558  }
3559  BuildAggStore(*this, CI, DestPtr, DestIsVolatile, DestAlign);
3560  return RValue::getAggregate(DestPtr);
3561  }
3562  case TEK_Scalar: {
3563  // If the argument doesn't match, perform a bitcast to coerce it. This
3564  // can happen due to trivial type mismatches.
3565  llvm::Value *V = CI;
3566  if (V->getType() != RetIRTy)
3567  V = Builder.CreateBitCast(V, RetIRTy);
3568  return RValue::get(V);
3569  }
3570  }
3571  llvm_unreachable("bad evaluation kind");
3572  }
3573 
3574  llvm::Value *DestPtr = ReturnValue.getValue();
3575  bool DestIsVolatile = ReturnValue.isVolatile();
3576  CharUnits DestAlign = getContext().getTypeAlignInChars(RetTy);
3577 
3578  if (!DestPtr) {
3579  DestPtr = CreateMemTemp(RetTy, "coerce");
3580  DestIsVolatile = false;
3581  }
3582 
3583  // If the value is offset in memory, apply the offset now.
3584  llvm::Value *StorePtr = DestPtr;
3585  CharUnits StoreAlign = DestAlign;
3586  if (unsigned Offs = RetAI.getDirectOffset()) {
3587  StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
3588  StorePtr =
3589  Builder.CreateConstGEP1_32(Builder.getInt8Ty(), StorePtr, Offs);
3590  StorePtr = Builder.CreateBitCast(StorePtr,
3591  llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
3592  StoreAlign =
3593  StoreAlign.alignmentAtOffset(CharUnits::fromQuantity(Offs));
3594  }
3595  CreateCoercedStore(CI, StorePtr, DestIsVolatile, StoreAlign, *this);
3596 
3597  return convertTempToRValue(DestPtr, RetTy, SourceLocation());
3598  }
3599 
3600  case ABIArgInfo::Expand:
3601  llvm_unreachable("Invalid ABI kind for return argument");
3602  }
3603 
3604  llvm_unreachable("Unhandled ABIArgInfo::Kind");
3605  } ();
3606 
3607  if (Ret.isScalar() && TargetDecl) {
3608  if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
3609  llvm::Value *OffsetValue = nullptr;
3610  if (const auto *Offset = AA->getOffset())
3611  OffsetValue = EmitScalarExpr(Offset);
3612 
3613  llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
3614  llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
3615  EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
3616  OffsetValue);
3617  }
3618  }
3619 
3620  return Ret;
3621 }
3622 
3623 /* VarArg handling */
3624 
3626  return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
3627 }
static CanQual< Type > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
bool useObjCFP2RetForComplexLongDouble() const
Check whether _Complex long double should use the "fp2ret" flavor of Objective-C message passing on t...
ExprObjectKind getObjectKind() const
Definition: Expr.h:411
void getExpandedTypes(QualType Ty, SmallVectorImpl< llvm::Type * >::iterator &TI)
Definition: CGCall.cpp:707
virtual llvm::Value * EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGen::CodeGenFunction &CGF) const =0
StringRef getName() const
Definition: Decl.h:168
CanQualType VoidPtrTy
Definition: ASTContext.h:831
bool useObjCFPRetForRealType(RealType T) const
Check whether the given real type should use the "fpret" flavor of Objective-C message passing on thi...
bool isCanonicalAsParam() const
Determines if this canonical type is furthermore canonical as a parameter. The parameter-canonicaliza...
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI)
Return true iff the given type uses 'sret' when used as a return type.
Definition: CGCall.cpp:1210
llvm::Type * ConvertTypeForMem(QualType T)
CanQualType getReturnType() const
ExtInfo withCallingConv(CallingConv cc) const
Definition: Type.h:2929
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
static CanQual< FunctionProtoType > GetFormalType(const CXXMethodDecl *MD)
Returns the canonical formal type of the given C++ method.
Definition: CGCall.cpp:69
CXXCtorType getCtorType() const
Definition: GlobalDecl.h:62
unsigned getInAllocaFieldIndex() const
static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback)
Emit the actual writing-back of a writeback.
Definition: CGCall.cpp:2537
ARCEntrypoints & getARCEntrypoints() const
static llvm::Value * emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Definition: CGCall.cpp:2257
static const CGFunctionInfo & arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, CodeGenModule &CGM, const CallArgList &args, const FunctionType *fnType, unsigned numExtraRequiredArgs, bool chainCall)
Definition: CGCall.cpp:373
CanQual< T > getUnqualifiedType() const
Retrieve the unqualified form of this type.
const TargetInfo & getTarget() const
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
Definition: CGValue.h:61
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, const FunctionDecl *FD, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
Definition: CGCall.cpp:2783
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
Definition: CGExpr.cpp:57
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:163
static bool isProvablyNull(llvm::Value *addr)
Definition: CGCall.cpp:2528
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
CGCXXABI & getCXXABI() const
Definition: CodeGenTypes.h:175
bool hasNonTrivialDestructor() const
Determine whether this class has a non-trivial destructor (C++ [class.dtor]p3)
Definition: DeclCXX.h:1269
bool hasFlexibleArrayMember() const
Definition: Decl.h:3279
ASTContext & getContext() const
Definition: CodeGenTypes.h:172
const llvm::DataLayout & getDataLayout() const
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
Definition: CGExpr.cpp:1469
RValue asAggregateRValue() const
Definition: CGValue.h:356
static const NonNullAttr * getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo)
Definition: CGCall.cpp:1736
static CGFunctionInfo * create(unsigned llvmCC, bool instanceMethod, bool chainCall, const FunctionType::ExtInfo &extInfo, CanQualType resultType, ArrayRef< CanQualType > argTypes, RequiredArgs required)
Definition: CGCall.cpp:535
static int getExpansionSize(QualType Ty, const ASTContext &Context)
Definition: CGCall.cpp:687
bool isFuncTypeConvertible(const FunctionType *FT)
bool isBlockPointerType() const
Definition: Type.h:5238
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
Definition: CGObjC.cpp:1933
static llvm::Value * CreateCoercedLoad(llvm::Value *SrcPtr, llvm::Type *Ty, CharUnits SrcAlign, CodeGenFunction &CGF)
Definition: CGCall.cpp:921
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, llvm::Value *This)
Definition: CGClass.cpp:1950
const CGFunctionInfo & arrangeFreeFunctionDeclaration(QualType ResTy, const FunctionArgList &Args, const FunctionType::ExtInfo &Info, bool isVariadic)
Definition: CGCall.cpp:460
Represents a C++ constructor within a class.
Definition: DeclCXX.h:2147
bool hasUnsignedIntegerRepresentation() const
Determine whether this type has an unsigned integer representation of some sort, e.g., it is an unsigned integer type or a vector.
Definition: Type.cpp:1753
static llvm::Value * emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value)
Definition: CGCall.cpp:1716
llvm::Value * getAddress() const
Definition: CGValue.h:265
void freeArgumentMemory(CodeGenFunction &CGF) const
Definition: CGCall.cpp:2773
Default closure variant of a ctor.
Definition: ABI.h:30
llvm::Value * getValue() const
Definition: CGCall.h:175
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
bool areArgsDestroyedLeftToRightInCallee() const
Definition: TargetCXXABI.h:163
llvm::Constant * objc_retainAutoreleasedReturnValue
id objc_retainAutoreleasedReturnValue(id);
void setCoerceToType(llvm::Type *T)
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:1701
static unsigned ClangCallConvToLLVMCallConv(CallingConv CC)
Definition: CGCall.cpp:40
static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src, LValue Dst)
Store a non-aggregate value to an address to initialize it. For initialization, a non-atomic store wi...
Definition: CGCall.cpp:3093
static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > FTP)
Definition: CGCall.cpp:97
const CGFunctionInfo & arrangeCXXStructorDeclaration(const CXXMethodDecl *MD, StructorType Type)
Definition: CGCall.cpp:193
llvm::Type * ConvertTypeForMem(QualType T)
const CGFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > Ty)
Definition: CGCall.cpp:112
ParmVarDecl - Represents a parameter to a function.
Definition: Decl.h:1334
bool isObjCRetainableType() const
Definition: Type.cpp:3542
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
Definition: Expr.cpp:3225
static bool isProvablyNonNull(llvm::Value *addr)
Definition: CGCall.cpp:2532
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
Definition: CGCXXABI.h:97
const_arg_iterator arg_end() const
An object to manage conditionally-evaluated expressions.
ExtInfo withProducesResult(bool producesResult) const
Definition: Type.h:2916
static void emitWritebacks(CodeGenFunction &CGF, const CallArgList &args)
Definition: CGCall.cpp:2602
void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo, CallExpr::const_arg_iterator ArgBeg, CallExpr::const_arg_iterator ArgEnd, const FunctionDecl *CalleeDecl=nullptr, unsigned ParamsToSkip=0)
EmitCallArgs - Emit call arguments for a function.
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
Definition: CGCall.cpp:2333
bool hasAttr() const
Definition: DeclBase.h:487
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:89
bool isReferenceType() const
Definition: Type.h:5241
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
Definition: CGObjC.cpp:2150
bool isAnyPointerType() const
Definition: Type.h:5235
StructorType getFromDtorType(CXXDtorType T)
Definition: CodeGenTypes.h:104
llvm::CallInst * EmitRuntimeCall(llvm::Value *callee, const Twine &name="")
bool getInAllocaSRet() const
Return true if this field of an inalloca struct should be returned to implement a struct return calli...
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
Definition: CGObjC.cpp:2140
param_range params()
Definition: DeclObjC.h:354
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE)
Definition: CGCall.cpp:2630
const CGFunctionInfo & arrangeLLVMFunctionInfo(CanQualType returnType, bool instanceMethod, bool chainCall, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, RequiredArgs args)
Definition: CGCall.cpp:485
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
Definition: CGCall.cpp:3069
void addWriteback(LValue srcLV, llvm::Value *temporary, llvm::Value *toUse)
Definition: CGCall.h:91
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
llvm::Type * getCoerceToType() const
unsigned getRegParm() const
Definition: Type.h:2891
i32 captured_struct **param SharedsTy A type which contains references the shared variables *param Shareds Context with the list of shared variables from the p *TaskFunction *param IfCond Not a nullptr if if clause was nullptr *otherwise *param PrivateVars List of references to private variables for the task *directive *param PrivateCopies List of private copies for each private variable in *p PrivateVars *param FirstprivateVars List of references to private variables for the *task directive *param FirstprivateCopies List of private copies for each private variable *in p FirstprivateVars *param FirstprivateInits List of references to auto generated variables *used for initialization of a single array element Used if firstprivate *variable is of array type *param Dependences List of dependences for the task construct
const Decl * getDecl() const
Definition: GlobalDecl.h:60
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP)
Definition: CGCall.h:109
T * getAttr() const
Definition: DeclBase.h:484
unsigned getEffectiveCallingConvention() const
void EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, ArrayRef< llvm::Value * > args)
Emits a call or invoke to the given noreturn runtime function.
Definition: CGCall.cpp:3024
unsigned getIndirectAlign() const
static bool hasScalarEvaluationKind(QualType T)
RValue EmitCall(const CGFunctionInfo &FnInfo, llvm::Value *Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, const Decl *TargetDecl=nullptr, llvm::Instruction **callOrInvoke=nullptr)
Definition: CGCall.cpp:3106
CharUnits getAlignment() const
Definition: CGValue.h:261
static llvm::Value * tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Try to emit a fused autorelease of a return result.
Definition: CGCall.cpp:2129
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:518
uint32_t Offset
Definition: CacheTokens.cpp:43
Qualifiers::ObjCLifetime getObjCLifetime() const
Definition: CGValue.h:207
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed...
QualType getReturnType() const
Definition: Type.h:2952
const CXXRecordDecl * getParent() const
Definition: DeclCXX.h:1817
field_range fields() const
Definition: Decl.h:3349
DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E)
Definition: CGCall.cpp:2873
QualType getIntPtrType() const
Return a type compatible with "intptr_t" (C99 7.18.1.4), as defined by the target.
llvm::Value * getAggregateAddr() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:66
const CGFunctionInfo & arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType)
Definition: CGCall.cpp:295
bool isVariadic() const
Whether this function is variadic.
Definition: Decl.cpp:2362
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
bool isIncompleteType(NamedDecl **Def=nullptr) const
Def If non-NULL, and the type refers to some kind of declaration that can be completed (such as a C s...
Definition: Type.cpp:1869
const CGFunctionInfo & arrangeCXXMethodDeclaration(const CXXMethodDecl *MD)
Definition: CGCall.cpp:177
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
An ordinary object is located at an address in memory.
Definition: Specifiers.h:111
QualType getCanonicalTypeInternal() const
Definition: Type.h:1951
QualType getType() const
Definition: Decl.h:538
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
Definition: CGCall.cpp:2883
const CGFunctionInfo & arrangeGlobalDeclaration(GlobalDecl GD)
Definition: CGCall.cpp:322
static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD)
Definition: CGCall.cpp:63
unsigned getNumRequiredArgs() const
llvm::CallSite EmitRuntimeCallOrInvoke(llvm::Value *callee, ArrayRef< llvm::Value * > args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
Definition: CGCall.cpp:3052
bool isUnion() const
Definition: Decl.h:2906
ExtInfo getExtInfo() const
Definition: Type.h:2961
CanQualType getCanonicalTypeUnqualified() const
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
llvm::CallInst * EmitNounwindRuntimeCall(llvm::Value *callee, const Twine &name="")
const TargetCodeGenInfo & getTargetCodeGenInfo()
writeback_const_range writebacks() const
Definition: CGCall.h:105
const TargetInfo & getTarget() const
ArrayRef< CallArgCleanup > getCleanupsToDeactivate() const
Definition: CGCall.h:117
ASTContext * Context
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
Definition: CGCall.cpp:2499
ID
Defines the set of possible language-specific address spaces.
Definition: AddressSpaces.h:27
QualType getPointeeType() const
Definition: Type.cpp:414
llvm::Value * ToUse
A value to "use" after the writeback, or null.
Definition: CGCall.h:70
bool isSignedIntegerOrEnumerationType() const
Definition: Type.cpp:1699
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty)
Definition: CGCall.cpp:2485
CXXDtorType getDtorType() const
Definition: GlobalDecl.h:67
bool isInstance() const
Definition: DeclCXX.h:1744
bool isAggregate() const
Definition: CGValue.h:49
CGCXXABI & getCXXABI() const
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
Definition: CGCall.cpp:2480
static CanQualType GetReturnType(QualType RetTy)
Definition: CGCall.cpp:78
llvm::Value * GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
const CGFunctionInfo & arrangeNullaryFunction()
Definition: CGCall.cpp:475
static void CreateCoercedStore(llvm::Value *Src, llvm::Value *DstPtr, bool DstIsVolatile, CharUnits DstAlign, CodeGenFunction &CGF)
Definition: CGCall.cpp:1009
bool isVirtual() const
Definition: DeclCXX.h:1761
bool usesInAlloca() const
Return true if this function uses inalloca arguments.
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2358
RValue asRValue() const
Definition: CGValue.h:492
const ParmVarDecl * getParamDecl(unsigned i) const
Definition: Decl.h:1968
bool getNoReturn() const
Definition: Type.h:2888
ASTContext & getContext() const
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
ImplicitParamDecl * getSelfDecl() const
Definition: DeclObjC.h:411
void add(RValue rvalue, QualType type, bool needscopy=false)
Definition: CGCall.h:81
static void eraseUnusedBitCasts(llvm::Instruction *insn)
Definition: CGCall.cpp:2117
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI)
Definition: CGCall.cpp:1214
static llvm::StoreInst * findDominatingStoreToReturnValue(CodeGenFunction &CGF)
Heuristically search for a dominating store to the return-value slot.
Definition: CGCall.cpp:2275
virtual bool HasThisReturn(GlobalDecl GD) const
Definition: CGCXXABI.h:95
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T)
void Profile(llvm::FoldingSetNodeID &ID)
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys=None)
bool isGLValue() const
Definition: Expr.h:253
llvm::Type * getPaddingType() const
ARCPreciseLifetime_t isARCPreciseLifetime() const
Definition: CGValue.h:226
bool isUnsignedIntegerOrEnumerationType() const
Definition: Type.cpp:1739
static llvm::Value * CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF)
Definition: CGCall.cpp:867
CallingConv
CallingConv - Specifies the calling convention that a function uses.
Definition: Specifiers.h:204
bool hasWritebacks() const
Definition: CGCall.h:100
bool isVolatile() const
Definition: CGValue.h:240
static llvm::Value * EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF)
Definition: CGCall.cpp:830
std::string CPU
If given, the name of the target CPU to generate code for.
Definition: TargetOptions.h:31
bool isNothrow(const ASTContext &Ctx, bool ResultIfDependent=false) const
Determine whether this function type has a non-throwing exception specification. If this depends on t...
Definition: Type.cpp:2680
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
llvm::Value * Temporary
The temporary alloca.
Definition: CGCall.h:67
Kind
bool ReturnTypeUsesFPRet(QualType ResultType)
Return true iff the given type uses 'fpret' when used as a return type.
Definition: CGCall.cpp:1219
bool isSimple() const
Definition: CGValue.h:193
static const Expr * maybeGetUnaryAddrOfOperand(const Expr *E)
Definition: CGCall.cpp:2621
Encodes a location in the source. The SourceManager can decode this to get at the full include stack...
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Definition: CGObjC.cpp:1774
unsigned getNumParams() const
Definition: Decl.cpp:2651
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
Definition: CGObjC.cpp:2021
llvm::Constant * objc_retain
id objc_retain(id);
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
bool isConstantSizeType() const
Definition: Type.cpp:1859
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
Deactive a cleanup that was created in an active state.
Definition: CGCleanup.cpp:1135
An aggregate value slot.
Definition: CGValue.h:363
bool isVariadic() const
Definition: DeclObjC.h:421
const CGFunctionInfo & arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD)
Definition: CGCall.cpp:282
Represents a static or instance method of a struct/union/class.
Definition: DeclCXX.h:1717
CanQualType getCanonicalParamType(QualType T) const
Return the canonical parameter type corresponding to the specific potentially non-canonical one...
static RValue getAggregate(llvm::Value *V, bool Volatile=false)
Definition: CGValue.h:92
void allocateArgumentMemory(CodeGenFunction &CGF)
Definition: CGCall.cpp:2755
const ConstantArrayType * getAsConstantArrayType(QualType T) const
Definition: ASTContext.h:2003
llvm::InlineAsm * retainAutoreleasedReturnValueMarker
virtual void setFeatureEnabled(llvm::StringMap< bool > &Features, StringRef Name, bool Enabled) const
Enable or disable a specific target feature; the feature name must be valid.
Specifies that a value-dependent expression should be considered to never be a null pointer constant...
Definition: Expr.h:666
CanQualType VoidTy
Definition: ASTContext.h:817
CodeGenFunction & CGF
Definition: CGCall.cpp:2871
const CodeGenOptions & getCodeGenOpts() const
const LangOptions & getLangOpts() const
llvm::LLVMContext & getLLVMContext()
Definition: CodeGenTypes.h:176
QualType getReturnType() const
Definition: DeclObjC.h:330
const T * castAs() const
Definition: Type.h:5586
Complete object dtor.
Definition: ABI.h:36
bool ReturnTypeUsesFP2Ret(QualType ResultType)
Return true iff the given type uses 'fp2ret' when used as a return type.
Definition: CGCall.cpp:1236
const CGFunctionInfo & arrangeCXXMethodCall(const CallArgList &args, const FunctionProtoType *type, RequiredArgs required)
Arrange a call to a C++ method, passing the given arguments.
Definition: CGCall.cpp:446
static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, llvm::Value *DestPtr, bool DestIsVolatile, CharUnits DestAlign)
Definition: CGCall.cpp:979
CXXCtorType
C++ constructor types.
Definition: ABI.h:25
SourceLocation getExprLoc() const LLVM_READONLY
Definition: Expr.cpp:193
const CGFunctionInfo & arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP)
Definition: CGCall.cpp:157
const CGFunctionInfo & arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *type)
Definition: CGCall.cpp:424
void setExternallyDestructed(bool destructed=true)
Definition: CGValue.h:453
QualType getObjCSelType() const
Retrieve the type that corresponds to the predefined Objective-C 'SEL' type.
Definition: ASTContext.h:1520
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
QualType getType() const
Definition: Expr.h:125
bool canHaveCoerceToType() const
static const Type * getElementType(const Expr *BaseExpr)
bool isScalar() const
Definition: CGValue.h:47
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition: CGValue.h:78
unsigned getDirectOffset() const
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
Definition: DeclCXX.cpp:1302
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
Definition: ASTMatchers.h:1639
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:54
CodeGenFunction::ComplexPairTy ComplexPairTy
CXXDtorType toCXXDtorType(StructorType T)
Definition: CodeGenTypes.h:92
virtual void buildStructorSignature(const CXXMethodDecl *MD, StructorType T, SmallVectorImpl< CanQualType > &ArgTys)=0
const CGFunctionInfo & arrangeFunctionDeclaration(const FunctionDecl *FD)
Definition: CGCall.cpp:257
bool getProducesResult() const
Definition: Type.h:2889
void ConstructAttributeList(const CGFunctionInfo &Info, const Decl *TargetDecl, AttributeListType &PAL, unsigned &CallingConv, bool AttrOnCallSite)
Definition: CGCall.cpp:1388
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CallingConv getCC() const
Definition: Type.h:2897
SourceLocation getLocStart() const LLVM_READONLY
Definition: Decl.h:633
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, bool Volatile, unsigned Alignment, QualType Ty, llvm::MDNode *TBAAInfo=nullptr, bool isInit=false, QualType TBAABaseTy=QualType(), uint64_t TBAAOffset=0)
Definition: CGExpr.cpp:1244
const VariableArrayType * getAsVariableArrayType(QualType T) const
Definition: ASTContext.h:2006
static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype, unsigned additional)
StructorType getFromCtorType(CXXCtorType T)
Definition: CodeGenTypes.h:77
static bool classof(const OMPClause *T)
const T * getAs() const
Definition: Type.h:5555
TargetOptions & getTargetOpts() const
Retrieve the target options.
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
static llvm::Value * tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result)
If this is a +1 of the value of an immutable 'self', remove it.
Definition: CGCall.cpp:2218
Implements C++ ABI-specific code generation functions.
Definition: CGCXXABI.h:42
const CGFunctionInfo & arrangeMSMemberPointerThunk(const CXXMethodDecl *MD)
Definition: CGCall.cpp:341
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1505
bool isTrivial() const
Definition: Decl.h:1800
static std::unique_ptr< TypeExpansion > getTypeExpansion(QualType Ty, const ASTContext &Context)
Definition: CGCall.cpp:630
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
Definition: Type.h:5080
bool isComplex() const
Definition: CGValue.h:48
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
Definition: CGStmt.cpp:348
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl...
Represents a base class of a C++ class.
Definition: DeclCXX.h:157
llvm::MDNode * getNoObjCARCExceptionsMetadata()
uint64_t getPointerWidth(unsigned AddrSpace) const
Return the width of pointers on this target, for the specified address space.
llvm::Type * GetFunctionTypeForVTable(GlobalDecl GD)
Definition: CGCall.cpp:1372
llvm::Value * getAddr() const
Definition: CGValue.h:475
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate.h) and friends (in DeclFriend.h).
Represents a C++ struct/union/class.
Definition: DeclCXX.h:285
BoundNodesTreeBuilder *const Builder
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
virtual const CXXRecordDecl * getThisArgumentTypeForMethod(const CXXMethodDecl *MD)
Definition: CGCXXABI.h:290
bool isUsingInAlloca() const
Returns if we're using an inalloca struct to pass arguments in memory.
Definition: CGCall.h:127
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
Definition: DeclBase.h:470
llvm::Type * ConvertType(QualType T)
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
Definition: CGCall.cpp:1762
CallingConv getDefaultCallingConvention(bool isVariadic, bool IsCXXMethod) const
Retrieves the default calling convention for the current target.
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
LValue EmitLValue(const Expr *E)
Definition: CGExpr.cpp:831
llvm::Instruction * getStackBase() const
Definition: CGCall.h:122
const TargetInfo & getTarget() const
Definition: CodeGenTypes.h:174
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
Definition: CanonicalType.h:70
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
Definition: CGExpr.cpp:1316
Copying closure variant of a ctor.
Definition: ABI.h:29
Defines the clang::TargetInfo interface.
const_arg_iterator arg_begin() const
stable_iterator getInnermostEHScope() const
Definition: EHScopeStack.h:345
static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows)
Definition: CGCall.cpp:118
bool getHasRegParm() const
Definition: Type.h:2890
const CGFunctionInfo & arrangeMSCtorClosure(const CXXConstructorDecl *CD, CXXCtorType CT)
Definition: CGCall.cpp:351
CanQualType IntTy
Definition: ASTContext.h:825
void pushStackRestore(CleanupKind kind, llvm::Value *SPMem)
Definition: CGDecl.cpp:1397
unsigned getTargetAddressSpace(QualType T) const
Definition: ASTContext.h:2089
static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, const CallArgList &CallArgs)
Definition: CGCall.cpp:2608
const ABIInfo & getABIInfo() const
Definition: CodeGenTypes.h:173
llvm::Value * EmitVAArg(llvm::Value *VAListAddr, QualType Ty)
Definition: CGCall.cpp:3625
static RValue get(llvm::Value *V)
Definition: CGValue.h:71
llvm::Value * EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, unsigned Alignment, QualType Ty, SourceLocation Loc, llvm::MDNode *TBAAInfo=nullptr, QualType TBAABaseTy=QualType(), uint64_t TBAAOffset=0)
Definition: CGExpr.cpp:1120
bool isVolatileQualified() const
Definition: CGValue.h:51
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Definition: CGCall.cpp:414
static AggValueSlot forAddr(llvm::Value *addr, CharUnits align, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, IsZeroed_t isZeroed=IsNotZeroed)
Definition: CGValue.h:424
CXXCtorType toCXXCtorType(StructorType T)
Definition: CodeGenTypes.h:65
bool getIndirectRealign() const
CharUnits alignmentAtOffset(CharUnits offset)
Definition: CharUnits.h:175
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
Definition: DeclCXX.h:728
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g., it is an signed integer type or a vector.
Definition: Type.cpp:1713
bool isIntegerType() const
Definition: Type.h:5448
Expr * IgnoreParens() LLVM_READONLY
Definition: Expr.cpp:2408
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:5043
const CGFunctionInfo & arrangeCXXConstructorCall(const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraArgs)
Arrange a call to a C++ method, passing the given arguments.
Definition: CGCall.cpp:230
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1253