23 #include "llvm/ADT/StringExtras.h"
24 #include "llvm/ADT/Triple.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/Type.h"
27 #include "llvm/Support/raw_ostream.h"
30 using namespace clang;
31 using namespace CodeGen;
39 for (
unsigned I = FirstIndex; I <= LastIndex; ++I) {
41 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
42 Builder.CreateStore(Value, Cell);
74 if (UD->
hasAttr<TransparentUnionAttr>()) {
75 assert(!UD->
field_empty() &&
"sema created an empty transparent union");
83 return CGT.getCXXABI();
87 return CGT.getContext();
91 return CGT.getLLVMContext();
95 return CGT.getDataLayout();
99 return CGT.getTarget();
107 uint64_t Members)
const {
116 raw_ostream &OS = llvm::errs();
117 OS <<
"(ABIArgInfo Kind=";
120 OS <<
"Direct Type=";
195 if (AT->getSize() == 0)
197 FT = AT->getElementType();
208 if (isa<CXXRecordDecl>(RT->
getDecl()))
226 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
227 for (
const auto &I : CXXRD->bases())
231 for (
const auto *I : RD->
fields())
254 const Type *Found =
nullptr;
257 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
258 for (
const auto &I : CXXRD->bases()) {
276 for (
const auto *FD : RD->
fields()) {
290 if (AT->getSize().getZExtValue() != 1)
292 FT = AT->getElementType();
315 Ty = CTy->getElementType();
325 return Size == 32 || Size == 64;
351 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
352 if (!CXXRD->isCLike())
358 for (
const auto *FD : RD->
fields()) {
365 if (FD->isBitField())
383 class DefaultABIInfo :
public ABIInfo {
391 if (!getCXXABI().classifyReturnType(FI))
394 I.info = classifyArgumentType(I.type);
426 Ty = EnumTy->getDecl()->getIntegerType();
441 RetTy = EnumTy->getDecl()->getIntegerType();
454 class PNaClABIInfo :
public ABIInfo {
473 if (!getCXXABI().classifyReturnType(FI))
477 I.info = classifyArgumentType(I.type);
493 Ty = EnumTy->getDecl()->getIntegerType();
513 RetTy = EnumTy->getDecl()->getIntegerType();
520 bool IsX86_MMXType(llvm::Type *IRType) {
522 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
523 cast<llvm::VectorType>(IRType)->
getElementType()->isIntegerTy() &&
524 IRType->getScalarSizeInBits() != 64;
528 StringRef Constraint,
530 if ((Constraint ==
"y" || Constraint ==
"&y") && Ty->isVectorTy()) {
531 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
547 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half)
553 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
561 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
562 return NumMembers <= 4;
571 CCState(
unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
575 unsigned FreeSSERegs;
579 class X86_32ABIInfo :
public ABIInfo {
585 static const unsigned MinABIStackAlignInBytes = 4;
587 bool IsDarwinVectorABI;
588 bool IsSmallStructInRegABI;
589 bool IsWin32StructABI;
590 unsigned DefaultNumRegisterParameters;
592 static bool isRegisterSize(
unsigned Size) {
593 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
596 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override {
598 return isX86VectorTypeForVectorCall(getContext(), Ty);
601 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
602 uint64_t NumMembers)
const override {
604 return isX86VectorCallAggregateSmallEnough(NumMembers);
616 unsigned getTypeStackAlignInBytes(
QualType Ty,
unsigned Align)
const;
621 bool shouldUseInReg(
QualType Ty, CCState &
State,
bool &NeedsPadding)
const;
639 :
ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p),
640 IsWin32StructABI(w), DefaultNumRegisterParameters(r) {}
646 bool d,
bool p,
bool w,
unsigned r)
649 static bool isStructReturnInRegABI(
652 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
665 StringRef Constraint,
666 llvm::Type* Ty)
const override {
667 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
671 std::string &Constraints,
672 std::vector<llvm::Type *> &ResultRegTypes,
673 std::vector<llvm::Type *> &ResultTruncRegTypes,
674 std::vector<LValue> &ResultRegDests,
675 std::string &AsmString,
676 unsigned NumOutputs)
const override;
680 unsigned Sig = (0xeb << 0) |
684 return llvm::ConstantInt::get(CGM.
Int32Ty, Sig);
700 std::string &AsmString) {
702 llvm::raw_string_ostream OS(Buf);
704 while (Pos < AsmString.size()) {
705 size_t DollarStart = AsmString.find(
'$', Pos);
706 if (DollarStart == std::string::npos)
707 DollarStart = AsmString.size();
708 size_t DollarEnd = AsmString.find_first_not_of(
'$', DollarStart);
709 if (DollarEnd == std::string::npos)
710 DollarEnd = AsmString.size();
711 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
713 size_t NumDollars = DollarEnd - DollarStart;
714 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
716 size_t DigitStart = Pos;
717 size_t DigitEnd = AsmString.find_first_not_of(
"0123456789", DigitStart);
718 if (DigitEnd == std::string::npos)
719 DigitEnd = AsmString.size();
720 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
721 unsigned OperandIndex;
722 if (!OperandStr.getAsInteger(10, OperandIndex)) {
723 if (OperandIndex >= FirstIn)
724 OperandIndex += NumNewOuts;
732 AsmString = std::move(OS.str());
736 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
738 std::vector<llvm::Type *> &ResultRegTypes,
739 std::vector<llvm::Type *> &ResultTruncRegTypes,
740 std::vector<LValue> &ResultRegDests, std::string &AsmString,
741 unsigned NumOutputs)
const {
746 if (!Constraints.empty())
748 if (RetWidth <= 32) {
749 Constraints +=
"={eax}";
750 ResultRegTypes.push_back(CGF.
Int32Ty);
754 ResultRegTypes.push_back(CGF.
Int64Ty);
758 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.
getLLVMContext(), RetWidth);
759 ResultTruncRegTypes.push_back(CoerceTy);
763 CoerceTy->getPointerTo()));
764 ResultRegDests.push_back(ReturnSlot);
771 bool X86_32ABIInfo::shouldReturnTypeInRegister(
QualType Ty,
776 if (!isRegisterSize(Size))
782 if (Size == 64 || Size == 128)
797 return shouldReturnTypeInRegister(AT->getElementType(),
Context);
801 if (!RT)
return false;
813 if (!shouldReturnTypeInRegister(FD->getType(),
Context))
819 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(CCState &
State)
const {
822 if (State.FreeRegs) {
830 CCState &State)
const {
835 uint64_t NumElts = 0;
836 if (State.CC == llvm::CallingConv::X86_VectorCall &&
837 isHomogeneousAggregate(RetTy, Base, NumElts)) {
844 if (IsDarwinVectorABI) {
845 uint64_t Size = getContext().getTypeSize(RetTy);
852 llvm::Type::getInt64Ty(getVMContext()), 2));
856 if ((Size == 8 || Size == 16 || Size == 32) ||
857 (Size == 64 && VT->getNumElements() == 1))
861 return getIndirectReturnResult(State);
871 return getIndirectReturnResult(State);
876 return getIndirectReturnResult(State);
880 if (shouldReturnTypeInRegister(RetTy, getContext())) {
881 uint64_t Size = getContext().getTypeSize(RetTy);
889 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
890 || SeltTy->hasPointerRepresentation())
898 return getIndirectReturnResult(State);
903 RetTy = EnumTy->getDecl()->getIntegerType();
920 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
921 for (
const auto &I : CXXRD->bases())
925 for (
const auto *i : RD->
fields()) {
938 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(
QualType Ty,
939 unsigned Align)
const {
942 if (Align <= MinABIStackAlignInBytes)
946 if (!IsDarwinVectorABI) {
948 return MinABIStackAlignInBytes;
956 return MinABIStackAlignInBytes;
960 CCState &State)
const {
962 if (State.FreeRegs) {
970 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
971 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
977 bool Realign = TypeAlign > StackAlign;
981 X86_32ABIInfo::Class X86_32ABIInfo::classify(
QualType Ty)
const {
988 if (K == BuiltinType::Float || K == BuiltinType::Double)
994 bool X86_32ABIInfo::shouldUseInReg(
QualType Ty, CCState &State,
995 bool &NeedsPadding)
const {
996 NeedsPadding =
false;
997 Class
C = classify(Ty);
1001 unsigned Size = getContext().getTypeSize(Ty);
1002 unsigned SizeInRegs = (Size + 31) / 32;
1004 if (SizeInRegs == 0)
1007 if (SizeInRegs > State.FreeRegs) {
1012 State.FreeRegs -= SizeInRegs;
1014 if (State.CC == llvm::CallingConv::X86_FastCall ||
1015 State.CC == llvm::CallingConv::X86_VectorCall) {
1029 NeedsPadding =
true;
1038 CCState &State)
const {
1048 return getIndirectResult(Ty,
false, State);
1057 const Type *Base =
nullptr;
1058 uint64_t NumElts = 0;
1059 if (State.CC == llvm::CallingConv::X86_VectorCall &&
1060 isHomogeneousAggregate(Ty, Base, NumElts)) {
1061 if (State.FreeSSERegs >= NumElts) {
1062 State.FreeSSERegs -= NumElts;
1067 return getIndirectResult(Ty,
false, State);
1073 if (IsWin32StructABI)
1074 return getIndirectResult(Ty,
true, State);
1078 return getIndirectResult(Ty,
true, State);
1085 llvm::LLVMContext &LLVMContext = getVMContext();
1086 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1088 if (shouldUseInReg(Ty, State, NeedsPadding)) {
1089 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
1091 llvm::Type *
Result = llvm::StructType::get(LLVMContext, Elements);
1094 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 :
nullptr;
1100 if (getContext().getTypeSize(Ty) <= 4*32 &&
1103 State.CC == llvm::CallingConv::X86_FastCall ||
1104 State.CC == llvm::CallingConv::X86_VectorCall,
1107 return getIndirectResult(Ty,
true, State);
1113 if (IsDarwinVectorABI) {
1114 uint64_t Size = getContext().getTypeSize(Ty);
1115 if ((Size == 8 || Size == 16 || Size == 32) ||
1116 (Size == 64 && VT->getNumElements() == 1))
1121 if (IsX86_MMXType(CGT.ConvertType(Ty)))
1129 Ty = EnumTy->getDecl()->getIntegerType();
1132 bool InReg = shouldUseInReg(Ty, State, NeedsPadding);
1146 if (State.CC == llvm::CallingConv::X86_FastCall)
1148 else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1150 State.FreeSSERegs = 6;
1154 State.FreeRegs = DefaultNumRegisterParameters;
1156 if (!getCXXABI().classifyReturnType(FI)) {
1161 if (State.FreeRegs) {
1171 bool UsedInAlloca =
false;
1173 I.info = classifyArgumentType(I.type, State);
1180 rewriteWithInAlloca(FI);
1185 unsigned &StackOffset,
1187 assert(StackOffset % 4U == 0 &&
"unaligned inalloca struct");
1189 FrameFields.push_back(CGT.ConvertTypeForMem(Type));
1190 StackOffset += getContext().getTypeSizeInChars(Type).getQuantity();
1194 if (StackOffset % 4U) {
1195 unsigned OldOffset = StackOffset;
1196 StackOffset = llvm::RoundUpToAlignment(StackOffset, 4U);
1197 unsigned NumBytes = StackOffset - OldOffset;
1199 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1200 Ty = llvm::ArrayType::get(Ty, NumBytes);
1201 FrameFields.push_back(Ty);
1222 llvm_unreachable(
"invalid enum");
1225 void X86_32ABIInfo::rewriteWithInAlloca(
CGFunctionInfo &FI)
const {
1226 assert(IsWin32StructABI &&
"inalloca only supported on win32");
1231 unsigned StackOffset = 0;
1240 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1247 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1257 for (; I != E; ++I) {
1259 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1262 FI.
setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1271 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
1273 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP,
"ap.cur");
1277 Align = getTypeStackAlignInBytes(Ty, Align);
1278 Align = std::max(Align, 4U);
1282 llvm::ConstantInt::get(CGF.
Int32Ty, Align - 1);
1283 Addr = CGF.
Builder.CreateGEP(Addr, Offset);
1287 Addr = CGF.
Builder.CreateIntToPtr(CGF.
Builder.CreateAnd(AsInt, Mask),
1293 llvm::PointerType::getUnqual(CGF.
ConvertType(Ty));
1294 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
1299 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.
Int32Ty, Offset),
1301 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
1306 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1308 assert(Triple.getArch() == llvm::Triple::x86);
1310 switch (Opts.getStructReturnConvention()) {
1319 if (Triple.isOSDarwin())
1322 switch (Triple.getOS()) {
1323 case llvm::Triple::DragonFly:
1324 case llvm::Triple::FreeBSD:
1325 case llvm::Triple::OpenBSD:
1326 case llvm::Triple::Bitrig:
1327 case llvm::Triple::Win32:
1334 void X86_32TargetCodeGenInfo::setTargetAttributes(
const Decl *D,
1335 llvm::GlobalValue *GV,
1337 if (
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
1338 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1340 llvm::Function *Fn = cast<llvm::Function>(GV);
1343 llvm::AttrBuilder B;
1344 B.addStackAlignmentAttr(16);
1345 Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
1347 llvm::AttributeSet::FunctionIndex,
1353 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1375 Builder.CreateStore(
1376 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.
Int8Ty, Address, 9));
1402 static unsigned getNativeVectorSizeForAVXABI(
X86AVXABILevel AVXLevel) {
1404 case X86AVXABILevel::AVX512:
1406 case X86AVXABILevel::AVX:
1411 llvm_unreachable(
"Unknown AVXLevel");
1415 class X86_64ABIInfo :
public ABIInfo {
1436 static Class merge(Class Accum, Class Field);
1452 void postMerge(
unsigned AggregateSize, Class &Lo, Class &Hi)
const;
1478 void classify(
QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
1479 bool isNamedArg)
const;
1481 llvm::Type *GetByteVectorType(
QualType Ty)
const;
1482 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
1483 unsigned IROffset,
QualType SourceTy,
1484 unsigned SourceOffset)
const;
1485 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1486 unsigned IROffset,
QualType SourceTy,
1487 unsigned SourceOffset)
const;
1503 unsigned freeIntRegs,
1504 unsigned &neededInt,
1505 unsigned &neededSSE,
1506 bool isNamedArg)
const;
1508 bool IsIllegalVectorType(
QualType Ty)
const;
1515 bool honorsRevision0_98()
const {
1516 return !getTarget().getTriple().isOSDarwin();
1522 bool Has64BitPointers;
1526 ABIInfo(CGT), AVXLevel(AVXLevel),
1527 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
1531 unsigned neededInt, neededSSE;
1533 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
1537 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1538 return (vectorTy->getBitWidth() > 128);
1548 bool has64BitPointers()
const {
1549 return Has64BitPointers;
1554 class WinX86_64ABIInfo :
public ABIInfo {
1557 bool IsReturnType)
const;
1567 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override {
1569 return isX86VectorTypeForVectorCall(getContext(), Ty);
1572 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
1573 uint64_t NumMembers)
const override {
1575 return isX86VectorCallAggregateSmallEnough(NumMembers);
1584 const X86_64ABIInfo &getABIInfo()
const {
1603 StringRef Constraint,
1604 llvm::Type* Ty)
const override {
1605 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1608 bool isNoProtoCallVariadic(
const CallArgList &args,
1617 bool HasAVXType =
false;
1618 for (CallArgList::const_iterator
1619 it = args.begin(), ie = args.end(); it != ie; ++it) {
1620 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
1636 if (getABIInfo().has64BitPointers())
1646 return llvm::ConstantInt::get(CGM.
Int32Ty, Sig);
1650 class PS4TargetCodeGenInfo :
public X86_64TargetCodeGenInfo {
1653 : X86_64TargetCodeGenInfo(CGT, AVXLevel) {}
1655 void getDependentLibraryOption(llvm::StringRef Lib,
1662 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
1666 bool Quote = (Lib.find(
" ") != StringRef::npos);
1667 std::string ArgStr = Quote ?
"\"" :
"";
1669 if (!Lib.endswith_lower(
".lib"))
1671 ArgStr += Quote ?
"\"" :
"";
1675 class WinX86_32TargetCodeGenInfo :
public X86_32TargetCodeGenInfo {
1678 bool d,
bool p,
bool w,
unsigned RegParms)
1679 : X86_32TargetCodeGenInfo(CGT, d, p, w, RegParms) {}
1681 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1684 void getDependentLibraryOption(llvm::StringRef Lib,
1686 Opt =
"/DEFAULTLIB:";
1687 Opt += qualifyWindowsLibrary(Lib);
1690 void getDetectMismatchOption(llvm::StringRef Name,
1691 llvm::StringRef
Value,
1693 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
1697 static void addStackProbeSizeTargetAttribute(
const Decl *D,
1698 llvm::GlobalValue *GV,
1700 if (isa<FunctionDecl>(D)) {
1702 llvm::Function *Fn = cast<llvm::Function>(GV);
1704 Fn->addFnAttr(
"stack-probe-size",
1710 void WinX86_32TargetCodeGenInfo::setTargetAttributes(
const Decl *D,
1711 llvm::GlobalValue *GV,
1713 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
1715 addStackProbeSizeTargetAttribute(D, GV, CGM);
1724 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1741 void getDependentLibraryOption(llvm::StringRef Lib,
1743 Opt =
"/DEFAULTLIB:";
1744 Opt += qualifyWindowsLibrary(Lib);
1747 void getDetectMismatchOption(llvm::StringRef Name,
1748 llvm::StringRef Value,
1750 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
1754 void WinX86_64TargetCodeGenInfo::setTargetAttributes(
const Decl *D,
1755 llvm::GlobalValue *GV,
1759 addStackProbeSizeTargetAttribute(D, GV, CGM);
1763 void X86_64ABIInfo::postMerge(
unsigned AggregateSize, Class &Lo,
1788 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
1790 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
1792 if (Hi == SSEUp && Lo != SSE)
1796 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
1820 assert((Accum != Memory && Accum != ComplexX87) &&
1821 "Invalid accumulated classification during merge.");
1822 if (Accum == Field || Field == NoClass)
1824 if (Field == Memory)
1826 if (Accum == NoClass)
1828 if (Accum == Integer || Field == Integer)
1830 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
1831 Accum == X87 || Accum == X87Up)
1836 void X86_64ABIInfo::classify(
QualType Ty, uint64_t OffsetBase,
1837 Class &Lo, Class &Hi,
bool isNamedArg)
const {
1848 Class &
Current = OffsetBase < 64 ? Lo : Hi;
1854 if (k == BuiltinType::Void) {
1856 }
else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
1859 }
else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
1861 }
else if (k == BuiltinType::Float || k == BuiltinType::Double) {
1863 }
else if (k == BuiltinType::LongDouble) {
1864 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
1865 if (LDF == &llvm::APFloat::IEEEquad) {
1868 }
else if (LDF == &llvm::APFloat::x87DoubleExtended) {
1871 }
else if (LDF == &llvm::APFloat::IEEEdouble) {
1874 llvm_unreachable(
"unexpected long double representation!");
1883 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
1894 if (Has64BitPointers) {
1901 uint64_t EB_FuncPtr = (OffsetBase) / 64;
1902 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
1903 if (EB_FuncPtr != EB_ThisAdj) {
1916 uint64_t Size = getContext().getTypeSize(VT);
1924 uint64_t EB_Real = (OffsetBase) / 64;
1925 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
1926 if (EB_Real != EB_Imag)
1928 }
else if (Size == 64) {
1930 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
1934 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) ||
1935 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) ||
1936 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) ||
1937 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong))
1944 if (OffsetBase && OffsetBase != 64)
1946 }
else if (Size == 128 ||
1947 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
1971 uint64_t Size = getContext().getTypeSize(Ty);
1975 else if (Size <= 128)
1977 }
else if (ET == getContext().FloatTy) {
1979 }
else if (ET == getContext().DoubleTy) {
1981 }
else if (ET == getContext().LongDoubleTy) {
1982 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
1983 if (LDF == &llvm::APFloat::IEEEquad)
1985 else if (LDF == &llvm::APFloat::x87DoubleExtended)
1986 Current = ComplexX87;
1987 else if (LDF == &llvm::APFloat::IEEEdouble)
1990 llvm_unreachable(
"unexpected long double representation!");
1995 uint64_t EB_Real = (OffsetBase) / 64;
1996 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
1997 if (Hi == NoClass && EB_Real != EB_Imag)
2006 uint64_t Size = getContext().getTypeSize(Ty);
2017 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2023 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
2024 uint64_t ArraySize = AT->getSize().getZExtValue();
2029 if (Size > 128 && EltSize != 256)
2032 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
2033 Class FieldLo, FieldHi;
2034 classify(AT->getElementType(),
Offset, FieldLo, FieldHi, isNamedArg);
2035 Lo = merge(Lo, FieldLo);
2036 Hi = merge(Hi, FieldHi);
2037 if (Lo == Memory || Hi == Memory)
2041 postMerge(Size, Lo, Hi);
2042 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp array classification.");
2047 uint64_t Size = getContext().getTypeSize(Ty);
2072 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2073 for (
const auto &I : CXXRD->bases()) {
2074 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2075 "Unexpected base class!");
2077 cast<CXXRecordDecl>(I.getType()->getAs<
RecordType>()->getDecl());
2084 Class FieldLo, FieldHi;
2087 classify(I.getType(),
Offset, FieldLo, FieldHi, isNamedArg);
2088 Lo = merge(Lo, FieldLo);
2089 Hi = merge(Hi, FieldHi);
2090 if (Lo == Memory || Hi == Memory) {
2091 postMerge(Size, Lo, Hi);
2100 i != e; ++i, ++idx) {
2102 bool BitField = i->isBitField();
2111 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) {
2113 postMerge(Size, Lo, Hi);
2117 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
2119 postMerge(Size, Lo, Hi);
2129 Class FieldLo, FieldHi;
2136 if (i->isUnnamedBitfield())
2140 uint64_t Size = i->getBitWidthValue(getContext());
2142 uint64_t EB_Lo = Offset / 64;
2143 uint64_t EB_Hi = (Offset + Size - 1) / 64;
2146 assert(EB_Hi == EB_Lo &&
"Invalid classification, type > 16 bytes.");
2151 FieldHi = EB_Hi ? Integer : NoClass;
2154 classify(i->getType(),
Offset, FieldLo, FieldHi, isNamedArg);
2155 Lo = merge(Lo, FieldLo);
2156 Hi = merge(Hi, FieldHi);
2157 if (Lo == Memory || Hi == Memory)
2161 postMerge(Size, Lo, Hi);
2171 Ty = EnumTy->getDecl()->getIntegerType();
2180 bool X86_64ABIInfo::IsIllegalVectorType(
QualType Ty)
const {
2182 uint64_t Size = getContext().getTypeSize(VecTy);
2183 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2184 if (Size <= 64 || Size > LargestVector)
2192 unsigned freeIntRegs)
const {
2204 Ty = EnumTy->getDecl()->getIntegerType();
2215 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2238 if (freeIntRegs == 0) {
2239 uint64_t Size = getContext().getTypeSize(Ty);
2243 if (Align == 8 && Size <= 64)
2253 llvm::Type *X86_64ABIInfo::GetByteVectorType(
QualType Ty)
const {
2259 llvm::Type *IRType = CGT.ConvertType(Ty);
2260 if (isa<llvm::VectorType>(IRType) ||
2261 IRType->getTypeID() == llvm::Type::FP128TyID)
2265 uint64_t Size = getContext().getTypeSize(Ty);
2266 assert((Size == 128 || Size == 256) &&
"Invalid type found!");
2269 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()),
2286 if (TySize <= StartBit)
2291 unsigned NumElts = (
unsigned)AT->getSize().getZExtValue();
2294 for (
unsigned i = 0; i != NumElts; ++i) {
2296 unsigned EltOffset = i*EltSize;
2297 if (EltOffset >= EndBit)
break;
2299 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2313 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2314 for (
const auto &I : CXXRD->bases()) {
2315 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2316 "Unexpected base class!");
2318 cast<CXXRecordDecl>(I.getType()->getAs<
RecordType>()->getDecl());
2322 if (BaseOffset >= EndBit)
continue;
2324 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
2337 i != e; ++i, ++idx) {
2341 if (FieldOffset >= EndBit)
break;
2343 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2362 const llvm::DataLayout &TD) {
2364 if (IROffset == 0 && IRType->isFloatTy())
2368 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2369 const llvm::StructLayout *SL = TD.getStructLayout(STy);
2370 unsigned Elt = SL->getElementContainingOffset(IROffset);
2371 IROffset -= SL->getElementOffset(Elt);
2376 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2377 llvm::Type *EltTy = ATy->getElementType();
2378 unsigned EltSize = TD.getTypeAllocSize(EltTy);
2379 IROffset -= IROffset/EltSize*EltSize;
2389 llvm::Type *X86_64ABIInfo::
2390 GetSSETypeAtOffset(llvm::Type *IRType,
unsigned IROffset,
2391 QualType SourceTy,
unsigned SourceOffset)
const {
2396 SourceOffset*8+64, getContext()))
2397 return llvm::Type::getFloatTy(getVMContext());
2404 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
2406 return llvm::Type::getDoubleTy(getVMContext());
2424 llvm::Type *X86_64ABIInfo::
2425 GetINTEGERTypeAtOffset(llvm::Type *IRType,
unsigned IROffset,
2426 QualType SourceTy,
unsigned SourceOffset)
const {
2429 if (IROffset == 0) {
2431 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
2432 IRType->isIntegerTy(64))
2441 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
2442 IRType->isIntegerTy(32) ||
2443 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
2444 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
2445 cast<llvm::IntegerType>(IRType)->getBitWidth();
2448 SourceOffset*8+64, getContext()))
2453 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2455 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
2456 if (IROffset < SL->getSizeInBytes()) {
2457 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2458 IROffset -= SL->getElementOffset(FieldIdx);
2460 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2461 SourceTy, SourceOffset);
2465 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2466 llvm::Type *EltTy = ATy->getElementType();
2467 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
2468 unsigned EltOffset = IROffset/EltSize*EltSize;
2469 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2475 unsigned TySizeInBytes =
2476 (
unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
2478 assert(TySizeInBytes != SourceOffset &&
"Empty field?");
2482 return llvm::IntegerType::get(getVMContext(),
2483 std::min(TySizeInBytes-SourceOffset, 8U)*8);
2494 const llvm::DataLayout &TD) {
2499 unsigned LoSize = (
unsigned)TD.getTypeAllocSize(Lo);
2500 unsigned HiAlign = TD.getABITypeAlignment(Hi);
2501 unsigned HiStart = llvm::RoundUpToAlignment(LoSize, HiAlign);
2502 assert(HiStart != 0 && HiStart <= 8 &&
"Invalid x86-64 argument pair!");
2514 if (Lo->isFloatTy())
2515 Lo = llvm::Type::getDoubleTy(Lo->getContext());
2517 assert((Lo->isIntegerTy() || Lo->isPointerTy())
2518 &&
"Invalid/unknown lo type");
2519 Lo = llvm::Type::getInt64Ty(Lo->getContext());
2523 llvm::StructType *
Result = llvm::StructType::get(Lo, Hi,
nullptr);
2527 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
2528 "Invalid x86-64 argument pair!");
2533 classifyReturnType(
QualType RetTy)
const {
2536 X86_64ABIInfo::Class Lo, Hi;
2537 classify(RetTy, 0, Lo, Hi,
true);
2540 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
2541 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
2543 llvm::Type *ResType =
nullptr;
2550 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2551 "Unknown missing lo part");
2556 llvm_unreachable(
"Invalid classification for lo word.");
2561 return getIndirectReturnResult(RetTy);
2566 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2570 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2573 RetTy = EnumTy->getDecl()->getIntegerType();
2584 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2590 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
2597 assert(Hi == ComplexX87 &&
"Unexpected ComplexX87 classification.");
2598 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
2599 llvm::Type::getX86_FP80Ty(getVMContext()),
2604 llvm::Type *HighPart =
nullptr;
2610 llvm_unreachable(
"Invalid classification for hi word.");
2617 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2622 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2633 assert(Lo == SSE &&
"Unexpected SSEUp classification.");
2634 ResType = GetByteVectorType(RetTy);
2645 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2661 ABIArgInfo X86_64ABIInfo::classifyArgumentType(
2662 QualType Ty,
unsigned freeIntRegs,
unsigned &neededInt,
unsigned &neededSSE,
2668 X86_64ABIInfo::Class Lo, Hi;
2669 classify(Ty, 0, Lo, Hi, isNamedArg);
2673 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
2674 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
2678 llvm::Type *ResType =
nullptr;
2685 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2686 "Unknown missing lo part");
2699 return getIndirectResult(Ty, freeIntRegs);
2703 llvm_unreachable(
"Invalid classification for lo word.");
2712 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
2716 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2719 Ty = EnumTy->getDecl()->getIntegerType();
2732 llvm::Type *IRType = CGT.ConvertType(Ty);
2733 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
2739 llvm::Type *HighPart =
nullptr;
2747 llvm_unreachable(
"Invalid classification for hi word.");
2749 case NoClass:
break;
2754 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2764 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2776 assert(Lo == SSE &&
"Unexpected SSEUp classification");
2777 ResType = GetByteVectorType(Ty);
2792 if (!getCXXABI().classifyReturnType(FI))
2796 unsigned freeIntRegs = 6, freeSSERegs = 8;
2812 it != ie; ++it, ++ArgNo) {
2813 bool IsNamedArg = ArgNo < NumRequiredArgs;
2815 unsigned neededInt, neededSSE;
2816 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt,
2817 neededSSE, IsNamedArg);
2823 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
2824 freeIntRegs -= neededInt;
2825 freeSSERegs -= neededSSE;
2827 it->info = getIndirectResult(it->type, freeIntRegs);
2836 nullptr, VAListAddr, 2,
"overflow_arg_area_p");
2838 CGF.
Builder.CreateLoad(overflow_arg_area_p,
"overflow_arg_area");
2848 llvm::ConstantInt::get(CGF.
Int64Ty, Align - 1);
2849 overflow_arg_area = CGF.
Builder.CreateGEP(overflow_arg_area, Offset);
2855 overflow_arg_area->getType(),
2856 "overflow_arg_area.align");
2862 CGF.
Builder.CreateBitCast(overflow_arg_area,
2863 llvm::PointerType::getUnqual(LTy));
2872 llvm::ConstantInt::get(CGF.
Int32Ty, (SizeInBytes + 7) & ~7);
2873 overflow_arg_area = CGF.
Builder.CreateGEP(overflow_arg_area, Offset,
2874 "overflow_arg_area.next");
2875 CGF.
Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
2890 unsigned neededInt, neededSSE;
2893 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
2898 if (!neededInt && !neededSSE)
2913 llvm::Value *gp_offset_p =
nullptr, *gp_offset =
nullptr;
2914 llvm::Value *fp_offset_p =
nullptr, *fp_offset =
nullptr;
2917 CGF.
Builder.CreateStructGEP(
nullptr, VAListAddr, 0,
"gp_offset_p");
2918 gp_offset = CGF.
Builder.CreateLoad(gp_offset_p,
"gp_offset");
2919 InRegs = llvm::ConstantInt::get(CGF.
Int32Ty, 48 - neededInt * 8);
2920 InRegs = CGF.
Builder.CreateICmpULE(gp_offset, InRegs,
"fits_in_gp");
2925 CGF.
Builder.CreateStructGEP(
nullptr, VAListAddr, 1,
"fp_offset_p");
2926 fp_offset = CGF.
Builder.CreateLoad(fp_offset_p,
"fp_offset");
2928 llvm::ConstantInt::get(CGF.
Int32Ty, 176 - neededSSE * 16);
2929 FitsInFP = CGF.
Builder.CreateICmpULE(fp_offset, FitsInFP,
"fits_in_fp");
2930 InRegs = InRegs ? CGF.
Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
2936 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
2954 CGF.
Builder.CreateStructGEP(
nullptr, VAListAddr, 3),
"reg_save_area");
2955 if (neededInt && neededSSE) {
2957 assert(AI.
isDirect() &&
"Unexpected ABI info for mixed regs");
2960 Tmp = CGF.
Builder.CreateBitCast(Tmp, ST->getPointerTo());
2961 assert(ST->getNumElements() == 2 &&
"Unexpected ABI info for mixed regs");
2962 llvm::Type *TyLo = ST->getElementType(0);
2963 llvm::Type *TyHi = ST->getElementType(1);
2964 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
2965 "Unexpected ABI info for mixed regs");
2966 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
2967 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
2970 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
2971 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
2973 CGF.
Builder.CreateLoad(CGF.
Builder.CreateBitCast(RegLoAddr, PTyLo));
2974 CGF.
Builder.CreateStore(V, CGF.
Builder.CreateStructGEP(ST, Tmp, 0));
2975 V = CGF.
Builder.CreateLoad(CGF.
Builder.CreateBitCast(RegHiAddr, PTyHi));
2976 CGF.
Builder.CreateStore(V, CGF.
Builder.CreateStructGEP(ST, Tmp, 1));
2978 RegAddr = CGF.
Builder.CreateBitCast(Tmp,
2979 llvm::PointerType::getUnqual(LTy));
2980 }
else if (neededInt) {
2981 RegAddr = CGF.
Builder.CreateGEP(RegAddr, gp_offset);
2982 RegAddr = CGF.
Builder.CreateBitCast(RegAddr,
2983 llvm::PointerType::getUnqual(LTy));
2986 std::pair<CharUnits, CharUnits> SizeAlign =
2988 uint64_t TySize = SizeAlign.first.getQuantity();
2989 unsigned TyAlign = SizeAlign.second.getQuantity();
2992 CGF.
Builder.CreateMemCpy(Tmp, RegAddr, TySize, 8,
false);
2995 }
else if (neededSSE == 1) {
2996 RegAddr = CGF.
Builder.CreateGEP(RegAddr, fp_offset);
2997 RegAddr = CGF.
Builder.CreateBitCast(RegAddr,
2998 llvm::PointerType::getUnqual(LTy));
3000 assert(neededSSE == 2 &&
"Invalid number of needed registers!");
3005 llvm::Type *DoubleTy = CGF.
DoubleTy;
3006 llvm::Type *DblPtrTy =
3007 llvm::PointerType::getUnqual(DoubleTy);
3008 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy,
nullptr);
3010 Tmp = CGF.
Builder.CreateBitCast(Tmp, ST->getPointerTo());
3013 CGF.
Builder.CreateStore(V, CGF.
Builder.CreateStructGEP(ST, Tmp, 0));
3016 CGF.
Builder.CreateStore(V, CGF.
Builder.CreateStructGEP(ST, Tmp, 1));
3017 RegAddr = CGF.
Builder.CreateBitCast(Tmp,
3018 llvm::PointerType::getUnqual(LTy));
3026 CGF.
Builder.CreateStore(CGF.
Builder.CreateAdd(gp_offset, Offset),
3031 CGF.
Builder.CreateStore(CGF.
Builder.CreateAdd(fp_offset, Offset),
3044 llvm::PHINode *ResAddr = CGF.
Builder.CreatePHI(RegAddr->getType(), 2,
3046 ResAddr->addIncoming(RegAddr, InRegBlock);
3047 ResAddr->addIncoming(MemAddr, InMemBlock);
3052 bool IsReturnType)
const {
3058 Ty = EnumTy->getDecl()->getIntegerType();
3060 TypeInfo Info = getContext().getTypeInfo(Ty);
3061 uint64_t Width = Info.
Width;
3062 unsigned Align = getContext().toCharUnitsFromBits(Info.
Align).getQuantity();
3066 if (!IsReturnType) {
3075 if (Width == 128 && getTarget().getTriple().isWindowsGNUEnvironment())
3082 const Type *Base =
nullptr;
3083 uint64_t NumElts = 0;
3084 if (FreeSSERegs && isHomogeneousAggregate(Ty, Base, NumElts)) {
3085 if (FreeSSERegs >= NumElts) {
3086 FreeSSERegs -= NumElts;
3098 llvm::Type *LLTy = CGT.ConvertType(Ty);
3099 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3106 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3116 if (BT && BT->
getKind() == BuiltinType::Bool)
3127 unsigned FreeSSERegs = IsVectorCall ? 4 : 0;
3128 if (!getCXXABI().classifyReturnType(FI))
3132 FreeSSERegs = IsVectorCall ? 6 : 0;
3134 I.info = classify(I.type, FreeSSERegs,
false);
3142 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
3144 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP,
"ap.cur");
3146 llvm::PointerType::getUnqual(CGF.
ConvertType(Ty));
3147 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
3152 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.
Int32Ty, Offset),
3154 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
3162 class PPC32_SVR4_ABIInfo :
public DefaultABIInfo {
3195 bool isI64 = Ty->
isIntegerType() && getContext().getTypeSize(Ty) == 64;
3202 llvm::Value *GPRPtr = Builder.CreateBitCast(VAListAddr, CharPtr,
"gprptr");
3205 Builder.CreateAdd(GPRPtrAsInt, Builder.getInt32(1));
3206 llvm::Value *FPRPtr = Builder.CreateIntToPtr(FPRPtrAsInt, CharPtr);
3208 Builder.CreateAdd(FPRPtrAsInt, Builder.getInt32(3));
3210 Builder.CreateIntToPtr(OverflowAreaPtrAsInt, CharPtrPtr);
3212 Builder.CreateAdd(OverflowAreaPtrAsInt, Builder.getInt32(4));
3214 Builder.CreateIntToPtr(RegsaveAreaPtrAsInt, CharPtrPtr);
3215 llvm::Value *GPR = Builder.CreateLoad(GPRPtr,
false,
"gpr");
3218 llvm::Value *GPRAnd = Builder.CreateAnd(GPR, Builder.getInt8(1));
3219 llvm::Value *CC64 = Builder.CreateICmpEQ(GPRAnd, Builder.getInt8(1));
3220 llvm::Value *GPRPlusOne = Builder.CreateAdd(GPR, Builder.getInt8(1));
3221 GPR = Builder.CreateSelect(CC64, GPRPlusOne, GPR);
3223 llvm::Value *FPR = Builder.CreateLoad(FPRPtr,
false,
"fpr");
3225 Builder.CreateLoad(OverflowAreaPtr,
false,
"overflow_area");
3227 Builder.CreatePtrToInt(OverflowArea, CGF.
Int32Ty);
3229 Builder.CreateLoad(RegsaveAreaPtr,
false,
"regsave_area");
3231 Builder.CreatePtrToInt(RegsaveArea, CGF.
Int32Ty);
3234 Builder.CreateICmpULT(isInt ? GPR : FPR, Builder.getInt8(8),
"cond");
3237 Builder.CreateMul(isInt ? GPR : FPR, Builder.getInt8(isInt ? 4 : 8));
3240 RegsaveAreaAsInt, Builder.CreateSExt(RegConstant, CGF.
Int32Ty));
3243 OurReg = Builder.CreateAdd(OurReg, Builder.getInt32(32));
3249 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
3253 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.
ConvertType(Ty));
3254 llvm::Value *Result1 = Builder.CreateIntToPtr(OurReg, PTy);
3257 GPR = Builder.CreateAdd(GPR, Builder.getInt8(isI64 ? 2 : 1));
3258 Builder.CreateStore(GPR, GPRPtr);
3260 FPR = Builder.CreateAdd(FPR, Builder.getInt8(1));
3261 Builder.CreateStore(FPR, FPRPtr);
3268 llvm::Value *Result2 = Builder.CreateIntToPtr(OverflowAreaAsInt, PTy);
3270 Builder.CreateAdd(OverflowAreaAsInt, Builder.getInt32(isInt ? 4 : 8));
3271 Builder.CreateStore(Builder.CreateIntToPtr(OverflowAreaAsInt, CharPtr),
3277 llvm::PHINode *
Result = CGF.
Builder.CreatePHI(PTy, 2,
"vaarg.addr");
3278 Result->addIncoming(Result1, UsingRegs);
3279 Result->addIncoming(Result2, UsingOverflow);
3282 llvm::Value *AGGPtr = Builder.CreateBitCast(Result, CharPtrPtr,
"aggrptr");
3283 return Builder.CreateLoad(AGGPtr,
false,
"aggr");
3297 llvm::IntegerType *i8 = CGF.
Int8Ty;
3298 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
3299 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
3300 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
3334 class PPC64_SVR4_ABIInfo :
public DefaultABIInfo {
3342 static const unsigned GPRBits = 64;
3348 bool IsQPXVectorTy(
const Type *Ty)
const {
3353 unsigned NumElements = VT->getNumElements();
3354 if (NumElements == 1)
3357 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
3358 if (getContext().getTypeSize(Ty) <= 256)
3360 }
else if (VT->getElementType()->
3361 isSpecificBuiltinType(BuiltinType::Float)) {
3362 if (getContext().getTypeSize(Ty) <= 128)
3370 bool IsQPXVectorTy(
QualType Ty)
const {
3376 : DefaultABIInfo(CGT), Kind(Kind), HasQPX(HasQPX) {}
3378 bool isPromotableTypeForABI(
QualType Ty)
const;
3379 bool isAlignedParamType(
QualType Ty,
bool &Align32)
const;
3384 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override;
3385 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
3386 uint64_t Members)
const override;
3395 if (!getCXXABI().classifyReturnType(FI))
3404 if (IsQPXVectorTy(T) ||
3405 (T->
isVectorType() && getContext().getTypeSize(T) == 128) ||
3412 I.info = classifyArgumentType(I.type);
3424 PPC64_SVR4_ABIInfo::ABIKind
Kind,
bool HasQPX)
3436 class PPC64TargetCodeGenInfo :
public DefaultTargetCodeGenInfo {
3438 PPC64TargetCodeGenInfo(
CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
3454 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(
QualType Ty)
const {
3457 Ty = EnumTy->getDecl()->getIntegerType();
3467 case BuiltinType::Int:
3468 case BuiltinType::UInt:
3480 PPC64_SVR4_ABIInfo::isAlignedParamType(
QualType Ty,
bool &Align32)
const {
3485 Ty = CTy->getElementType();
3489 if (IsQPXVectorTy(Ty)) {
3490 if (getContext().getTypeSize(Ty) > 128)
3495 return getContext().getTypeSize(Ty) == 128;
3500 const Type *AlignAsType =
nullptr;
3504 if (IsQPXVectorTy(EltType) || (EltType->
isVectorType() &&
3505 getContext().getTypeSize(EltType) == 128) ||
3507 AlignAsType = EltType;
3511 const Type *Base =
nullptr;
3512 uint64_t Members = 0;
3513 if (!AlignAsType &&
Kind == ELFv2 &&
3518 if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
3519 if (getContext().getTypeSize(AlignAsType) > 128)
3523 }
else if (AlignAsType) {
3530 if (HasQPX && getContext().getTypeAlign(Ty) >= 256)
3542 uint64_t &Members)
const {
3544 uint64_t NElements = AT->getSize().getZExtValue();
3547 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
3549 Members *= NElements;
3558 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3559 for (
const auto &I : CXXRD->bases()) {
3564 uint64_t FldMembers;
3565 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
3568 Members += FldMembers;
3572 for (
const auto *FD : RD->
fields()) {
3576 getContext().getAsConstantArrayType(FT)) {
3577 if (AT->getSize().getZExtValue() == 0)
3579 FT = AT->getElementType();
3585 if (getContext().getLangOpts().
CPlusPlus &&
3586 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
3589 uint64_t FldMembers;
3590 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
3594 std::max(Members, FldMembers) : Members + FldMembers);
3601 if (getContext().getTypeSize(Base) * Members !=
3602 getContext().getTypeSize(Ty))
3608 Ty = CT->getElementType();
3612 if (!isHomogeneousAggregateBaseType(Ty))
3623 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
3626 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
3629 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
3633 if (BT->
getKind() == BuiltinType::Float ||
3634 BT->
getKind() == BuiltinType::Double ||
3635 BT->
getKind() == BuiltinType::LongDouble)
3639 if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty))
3645 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
3646 const Type *Base, uint64_t Members)
const {
3650 Base->
isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64;
3653 return Members * NumRegs <= 8;
3657 PPC64_SVR4_ABIInfo::classifyArgumentType(
QualType Ty)
const {
3666 uint64_t Size = getContext().getTypeSize(Ty);
3669 else if (Size < 128) {
3670 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
3680 uint64_t ABIAlign = isAlignedParamType(Ty, Align32) ?
3681 (Align32 ? 32 : 16) : 8;
3682 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
3685 const Type *Base =
nullptr;
3686 uint64_t Members = 0;
3687 if (
Kind == ELFv2 &&
3688 isHomogeneousAggregate(Ty, Base, Members)) {
3689 llvm::Type *BaseTy = CGT.ConvertType(
QualType(Base, 0));
3690 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
3698 uint64_t Bits = getContext().getTypeSize(Ty);
3699 if (Bits > 0 && Bits <= 8 * GPRBits) {
3700 llvm::Type *CoerceTy;
3704 if (Bits <= GPRBits)
3705 CoerceTy = llvm::IntegerType::get(getVMContext(),
3706 llvm::RoundUpToAlignment(Bits, 8));
3710 uint64_t RegBits = ABIAlign * 8;
3711 uint64_t NumRegs = llvm::RoundUpToAlignment(Bits, RegBits) / RegBits;
3712 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
3713 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
3721 TyAlign > ABIAlign);
3724 return (isPromotableTypeForABI(Ty) ?
3729 PPC64_SVR4_ABIInfo::classifyReturnType(
QualType RetTy)
const {
3739 uint64_t Size = getContext().getTypeSize(RetTy);
3742 else if (Size < 128) {
3743 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
3750 const Type *Base =
nullptr;
3751 uint64_t Members = 0;
3752 if (
Kind == ELFv2 &&
3753 isHomogeneousAggregate(RetTy, Base, Members)) {
3754 llvm::Type *BaseTy = CGT.ConvertType(
QualType(Base, 0));
3755 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
3760 uint64_t Bits = getContext().getTypeSize(RetTy);
3761 if (
Kind == ELFv2 && Bits <= 2 * GPRBits) {
3765 llvm::Type *CoerceTy;
3766 if (Bits > GPRBits) {
3767 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
3768 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy,
nullptr);
3770 CoerceTy = llvm::IntegerType::get(getVMContext(),
3771 llvm::RoundUpToAlignment(Bits, 8));
3779 return (isPromotableTypeForABI(RetTy) ?
3791 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
"ap");
3792 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP,
"ap.cur");
3796 if (isAlignedParamType(Ty, Align32)) {
3798 AddrAsInt = Builder.CreateAdd(AddrAsInt,
3799 Builder.getInt64(Align32 ? 31 : 15));
3800 AddrAsInt = Builder.CreateAnd(AddrAsInt,
3801 Builder.getInt64(Align32 ? -32 : -16));
3802 Addr = Builder.CreateIntToPtr(AddrAsInt, BP,
"ap.align");
3811 unsigned CplxBaseSize = 0;
3814 BaseTy = CTy->getElementType();
3816 if (CplxBaseSize < 8)
3820 unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8);
3822 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.
Int64Ty, Offset),
3824 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
3832 if (CplxBaseSize && CplxBaseSize < 8) {
3837 Builder.CreateAdd(RealAddr, Builder.getInt64(8 - CplxBaseSize));
3839 Builder.CreateAdd(ImagAddr, Builder.getInt64(16 - CplxBaseSize));
3841 ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(8));
3843 llvm::Type *PBaseTy = llvm::PointerType::getUnqual(CGF.
ConvertType(BaseTy));
3844 RealAddr = Builder.CreateIntToPtr(RealAddr, PBaseTy);
3845 ImagAddr = Builder.CreateIntToPtr(ImagAddr, PBaseTy);
3846 llvm::Value *Real = Builder.CreateLoad(RealAddr,
false,
".vareal");
3847 llvm::Value *Imag = Builder.CreateLoad(ImagAddr,
false,
".vaimag");
3848 llvm::AllocaInst *Ptr =
3851 Builder.CreateStructGEP(Ptr->getAllocatedType(), Ptr, 0,
".real");
3853 Builder.CreateStructGEP(Ptr->getAllocatedType(), Ptr, 1,
".imag");
3854 Builder.CreateStore(Real, RealPtr,
false);
3855 Builder.CreateStore(Imag, ImagPtr,
false);
3864 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes));
3865 Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
3868 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.
ConvertType(Ty));
3869 return Builder.CreateBitCast(Addr, PTy);
3880 llvm::IntegerType *i8 = CGF.
Int8Ty;
3881 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
3882 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
3883 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
3914 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
3934 class AArch64ABIInfo :
public ABIInfo {
3948 ABIKind getABIKind()
const {
return Kind; }
3949 bool isDarwinPCS()
const {
return Kind == DarwinPCS; }
3953 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override;
3954 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
3955 uint64_t Members)
const override;
3957 bool isIllegalVectorType(
QualType Ty)
const;
3960 if (!getCXXABI().classifyReturnType(FI))
3964 it.info = classifyArgumentType(it.type);
3975 return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
3976 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
3985 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
3986 return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue";
3993 bool doesReturnSlotInterfereWithArgs()
const override {
return false; }
4001 if (isIllegalVectorType(Ty)) {
4002 uint64_t Size = getContext().getTypeSize(Ty);
4004 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
4008 llvm::Type *ResType =
4009 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
4013 llvm::Type *ResType =
4014 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
4023 Ty = EnumTy->getDecl()->getIntegerType();
4040 if (!getContext().getLangOpts().
CPlusPlus || isDarwinPCS())
4047 const Type *Base =
nullptr;
4048 uint64_t Members = 0;
4049 if (isHomogeneousAggregate(Ty, Base, Members)) {
4051 llvm::ArrayType::get(CGT.ConvertType(
QualType(Base, 0)), Members));
4055 uint64_t Size = getContext().getTypeSize(Ty);
4057 unsigned Alignment = getContext().getTypeAlign(Ty);
4058 Size = 64 * ((Size + 63) / 64);
4062 if (Alignment < 128 && Size == 128) {
4063 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
4077 if (RetTy->
isVectorType() && getContext().getTypeSize(RetTy) > 128)
4083 RetTy = EnumTy->getDecl()->getIntegerType();
4093 const Type *Base =
nullptr;
4094 uint64_t Members = 0;
4095 if (isHomogeneousAggregate(RetTy, Base, Members))
4100 uint64_t Size = getContext().getTypeSize(RetTy);
4102 unsigned Alignment = getContext().getTypeAlign(RetTy);
4103 Size = 64 * ((Size + 63) / 64);
4107 if (Alignment < 128 && Size == 128) {
4108 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
4118 bool AArch64ABIInfo::isIllegalVectorType(
QualType Ty)
const {
4121 unsigned NumElements = VT->getNumElements();
4122 uint64_t Size = getContext().getTypeSize(VT);
4124 if ((NumElements & (NumElements - 1)) != 0 || NumElements > 16)
4126 return Size != 64 && (Size != 128 || NumElements == 1);
4131 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
4140 unsigned VecSize = getContext().getTypeSize(VT);
4141 if (VecSize == 64 || VecSize == 128)
4147 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base,
4148 uint64_t Members)
const {
4149 return Members <= 4;
4160 BaseTy = llvm::PointerType::getUnqual(BaseTy);
4164 unsigned NumRegs = 1;
4165 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
4166 BaseTy = ArrTy->getElementType();
4167 NumRegs = ArrTy->getNumElements();
4169 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
4188 llvm::Value *reg_offs_p =
nullptr, *reg_offs =
nullptr;
4190 int RegSize = IsIndirect ? 8 : getContext().getTypeSize(Ty) / 8;
4194 CGF.
Builder.CreateStructGEP(
nullptr, VAListAddr, 3,
"gr_offs_p");
4195 reg_offs = CGF.
Builder.CreateLoad(reg_offs_p,
"gr_offs");
4197 RegSize = llvm::RoundUpToAlignment(RegSize, 8);
4201 CGF.
Builder.CreateStructGEP(
nullptr, VAListAddr, 4,
"vr_offs_p");
4202 reg_offs = CGF.
Builder.CreateLoad(reg_offs_p,
"vr_offs");
4204 RegSize = 16 * NumRegs;
4216 UsingStack = CGF.
Builder.CreateICmpSGE(
4217 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, 0));
4219 CGF.
Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
4228 if (!IsFPR && !IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
4229 int Align = Ctx.getTypeAlign(Ty) / 8;
4231 reg_offs = CGF.
Builder.CreateAdd(
4232 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, Align - 1),
4234 reg_offs = CGF.
Builder.CreateAnd(
4235 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, -Align),
4241 NewOffset = CGF.
Builder.CreateAdd(
4242 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, RegSize),
"new_reg_offs");
4243 CGF.
Builder.CreateStore(NewOffset, reg_offs_p);
4248 InRegs = CGF.
Builder.CreateICmpSLE(
4249 NewOffset, llvm::ConstantInt::get(CGF.
Int32Ty, 0),
"inreg");
4251 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
4261 llvm::Value *reg_top_p =
nullptr, *reg_top =
nullptr;
4262 reg_top_p = CGF.
Builder.CreateStructGEP(
nullptr, VAListAddr, reg_top_index,
4264 reg_top = CGF.
Builder.CreateLoad(reg_top_p,
"reg_top");
4272 MemTy = llvm::PointerType::getUnqual(MemTy);
4275 const Type *Base =
nullptr;
4276 uint64_t NumMembers = 0;
4277 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
4278 if (IsHFA && NumMembers > 1) {
4283 assert(!IsIndirect &&
"Homogeneous aggregates should be passed directly");
4285 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
4290 Offset = 16 - Ctx.getTypeSize(Base) / 8;
4291 for (
unsigned i = 0; i < NumMembers; ++i) {
4293 llvm::ConstantInt::get(CGF.
Int32Ty, 16 * i + Offset);
4295 LoadAddr = CGF.
Builder.CreateBitCast(
4296 LoadAddr, llvm::PointerType::getUnqual(BaseTy));
4298 CGF.
Builder.CreateStructGEP(Tmp->getAllocatedType(), Tmp, i);
4301 CGF.
Builder.CreateStore(Elem, StoreAddr);
4304 RegAddr = CGF.
Builder.CreateBitCast(Tmp, MemTy);
4307 unsigned BeAlign = reg_top_index == 2 ? 16 : 8;
4310 Ctx.getTypeSize(Ty) < (BeAlign * 8)) {
4311 int Offset = BeAlign - Ctx.getTypeSize(Ty) / 8;
4314 BaseAddr = CGF.
Builder.CreateAdd(
4315 BaseAddr, llvm::ConstantInt::get(CGF.
Int64Ty, Offset),
"align_be");
4320 RegAddr = CGF.
Builder.CreateBitCast(BaseAddr, MemTy);
4330 llvm::Value *stack_p =
nullptr, *OnStackAddr =
nullptr;
4331 stack_p = CGF.
Builder.CreateStructGEP(
nullptr, VAListAddr, 0,
"stack_p");
4332 OnStackAddr = CGF.
Builder.CreateLoad(stack_p,
"stack");
4336 if (!IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
4337 int Align = Ctx.getTypeAlign(Ty) / 8;
4339 OnStackAddr = CGF.
Builder.CreatePtrToInt(OnStackAddr, CGF.
Int64Ty);
4341 OnStackAddr = CGF.
Builder.CreateAdd(
4342 OnStackAddr, llvm::ConstantInt::get(CGF.
Int64Ty, Align - 1),
4344 OnStackAddr = CGF.
Builder.CreateAnd(
4345 OnStackAddr, llvm::ConstantInt::get(CGF.
Int64Ty, -Align),
4355 StackSize = Ctx.getTypeSize(Ty) / 8;
4358 StackSize = llvm::RoundUpToAlignment(StackSize, 8);
4362 CGF.
Builder.CreateGEP(OnStackAddr, StackSizeC,
"new_stack");
4365 CGF.
Builder.CreateStore(NewStack, stack_p);
4368 Ctx.getTypeSize(Ty) < 64) {
4369 int Offset = 8 - Ctx.getTypeSize(Ty) / 8;
4370 OnStackAddr = CGF.
Builder.CreatePtrToInt(OnStackAddr, CGF.
Int64Ty);
4372 OnStackAddr = CGF.
Builder.CreateAdd(
4373 OnStackAddr, llvm::ConstantInt::get(CGF.
Int64Ty, Offset),
"align_be");
4378 OnStackAddr = CGF.
Builder.CreateBitCast(OnStackAddr, MemTy);
4387 llvm::PHINode *ResAddr = CGF.
Builder.CreatePHI(MemTy, 2,
"vaarg.addr");
4388 ResAddr->addIncoming(RegAddr, InRegBlock);
4389 ResAddr->addIncoming(OnStackAddr, OnStackBlock);
4392 return CGF.
Builder.CreateLoad(ResAddr,
"vaarg.addr");
4409 const Type *Base =
nullptr;
4410 uint64_t Members = 0;
4411 bool isHA = isHomogeneousAggregate(Ty, Base, Members);
4413 bool isIndirect =
false;
4416 if (Size > 16 && !isHA) {
4423 llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
4426 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
"ap");
4427 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP,
"ap.cur");
4431 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.
ConvertType(Ty));
4432 return Builder.CreateBitCast(Addr, PTy);
4435 const uint64_t MinABIAlign = 8;
4436 if (Align > MinABIAlign) {
4438 Addr = Builder.CreateGEP(Addr, Offset);
4441 llvm::Value *Aligned = Builder.CreateAnd(AsInt, Mask);
4442 Addr = Builder.CreateIntToPtr(Aligned, BP,
"ap.align");
4445 uint64_t Offset = llvm::RoundUpToAlignment(Size, MinABIAlign);
4447 Addr, llvm::ConstantInt::get(CGF.
Int32Ty, Offset),
"ap.next");
4448 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
4451 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
4452 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.
ConvertType(Ty));
4453 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
4464 class ARMABIInfo :
public ABIInfo {
4480 bool isEABI()
const {
4481 switch (getTarget().getTriple().getEnvironment()) {
4482 case llvm::Triple::Android:
4483 case llvm::Triple::EABI:
4484 case llvm::Triple::EABIHF:
4485 case llvm::Triple::GNUEABI:
4486 case llvm::Triple::GNUEABIHF:
4493 bool isEABIHF()
const {
4494 switch (getTarget().getTriple().getEnvironment()) {
4495 case llvm::Triple::EABIHF:
4496 case llvm::Triple::GNUEABIHF:
4503 ABIKind getABIKind()
const {
return Kind; }
4508 bool isIllegalVectorType(
QualType Ty)
const;
4510 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override;
4511 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
4512 uint64_t Members)
const override;
4526 ARMTargetCodeGenInfo(
CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
4529 const ARMABIInfo &getABIInfo()
const {
4537 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
4538 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
4550 unsigned getSizeOfUnwindException()
const override {
4551 if (getABIInfo().isEABI())
return 88;
4555 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
4561 const ARMInterruptAttr *
Attr = FD->
getAttr<ARMInterruptAttr>();
4566 switch (Attr->getInterrupt()) {
4567 case ARMInterruptAttr::Generic: Kind =
"";
break;
4568 case ARMInterruptAttr::IRQ: Kind =
"IRQ";
break;
4569 case ARMInterruptAttr::FIQ: Kind =
"FIQ";
break;
4570 case ARMInterruptAttr::SWI: Kind =
"SWI";
break;
4571 case ARMInterruptAttr::ABORT: Kind =
"ABORT";
break;
4572 case ARMInterruptAttr::UNDEF: Kind =
"UNDEF";
break;
4575 llvm::Function *Fn = cast<llvm::Function>(GV);
4577 Fn->addFnAttr(
"interrupt", Kind);
4579 if (cast<ARMABIInfo>(getABIInfo()).getABIKind() == ARMABIInfo::APCS)
4585 llvm::AttrBuilder B;
4586 B.addStackAlignmentAttr(8);
4587 Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
4589 llvm::AttributeSet::FunctionIndex,
4594 class WindowsARMTargetCodeGenInfo :
public ARMTargetCodeGenInfo {
4595 void addStackProbeSizeTargetAttribute(
const Decl *D, llvm::GlobalValue *GV,
4599 WindowsARMTargetCodeGenInfo(
CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
4600 : ARMTargetCodeGenInfo(CGT, K) {}
4602 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
4606 void WindowsARMTargetCodeGenInfo::addStackProbeSizeTargetAttribute(
4608 if (!isa<FunctionDecl>(D))
4613 llvm::Function *F = cast<llvm::Function>(GV);
4614 F->addFnAttr(
"stack-probe-size",
4618 void WindowsARMTargetCodeGenInfo::setTargetAttributes(
4620 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
4621 addStackProbeSizeTargetAttribute(D, GV, CGM);
4626 if (!getCXXABI().classifyReturnType(FI))
4631 I.info = classifyArgumentType(I.type, FI.
isVariadic());
4646 return llvm::CallingConv::ARM_AAPCS_VFP;
4648 return llvm::CallingConv::ARM_AAPCS;
4650 return llvm::CallingConv::ARM_APCS;
4656 switch (getABIKind()) {
4657 case APCS:
return llvm::CallingConv::ARM_APCS;
4658 case AAPCS:
return llvm::CallingConv::ARM_AAPCS;
4659 case AAPCS_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
4661 llvm_unreachable(
"bad ABI kind");
4664 void ARMABIInfo::setCCs() {
4670 if (abiCC != getLLVMDefaultCC())
4673 BuiltinCC = (getABIKind() == APCS ?
4674 llvm::CallingConv::ARM_APCS : llvm::CallingConv::ARM_AAPCS);
4678 bool isVariadic)
const {
4686 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
4691 if (isIllegalVectorType(Ty)) {
4692 uint64_t Size = getContext().getTypeSize(Ty);
4694 llvm::Type *ResType =
4695 llvm::Type::getInt32Ty(getVMContext());
4699 llvm::Type *ResType = llvm::VectorType::get(
4700 llvm::Type::getInt32Ty(getVMContext()), 2);
4704 llvm::Type *ResType = llvm::VectorType::get(
4705 llvm::Type::getInt32Ty(getVMContext()), 4);
4714 Ty = EnumTy->getDecl()->getIntegerType();
4729 if (IsEffectivelyAAPCS_VFP) {
4732 const Type *Base =
nullptr;
4733 uint64_t Members = 0;
4734 if (isHomogeneousAggregate(Ty, Base, Members)) {
4735 assert(Base &&
"Base class should be set for homogeneous aggregate");
4745 uint64_t ABIAlign = 4;
4746 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
4747 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
4748 getABIKind() == ARMABIInfo::AAPCS)
4749 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
4753 TyAlign > ABIAlign);
4761 if (getContext().getTypeAlign(Ty) <= 32) {
4762 ElemTy = llvm::Type::getInt32Ty(getVMContext());
4763 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
4765 ElemTy = llvm::Type::getInt64Ty(getVMContext());
4766 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
4773 llvm::LLVMContext &VMContext) {
4805 if (!RT)
return false;
4816 bool HadField =
false;
4819 i != e; ++i, ++idx) {
4858 bool isVariadic)
const {
4859 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
4865 if (RetTy->
isVectorType() && getContext().getTypeSize(RetTy) > 128) {
4872 RetTy = EnumTy->getDecl()->getIntegerType();
4879 if (getABIKind() == APCS) {
4889 getVMContext(), getContext().getTypeSize(RetTy)));
4894 uint64_t Size = getContext().getTypeSize(RetTy);
4912 if (IsEffectivelyAAPCS_VFP) {
4913 const Type *Base =
nullptr;
4915 if (isHomogeneousAggregate(RetTy, Base, Members)) {
4916 assert(Base &&
"Base class should be set for homogeneous aggregate");
4924 uint64_t Size = getContext().getTypeSize(RetTy);
4926 if (getDataLayout().isBigEndian())
4942 bool ARMABIInfo::isIllegalVectorType(
QualType Ty)
const {
4945 unsigned NumElements = VT->getNumElements();
4946 uint64_t Size = getContext().getTypeSize(VT);
4948 if ((NumElements & (NumElements - 1)) != 0)
4956 bool ARMABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
4960 if (BT->
getKind() == BuiltinType::Float ||
4961 BT->
getKind() == BuiltinType::Double ||
4962 BT->
getKind() == BuiltinType::LongDouble)
4965 unsigned VecSize = getContext().getTypeSize(VT);
4966 if (VecSize == 64 || VecSize == 128)
4972 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base,
4973 uint64_t Members)
const {
4974 return Members <= 4;
4983 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
"ap");
4984 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP,
"ap.cur");
4988 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.
ConvertType(Ty));
4989 return Builder.CreateBitCast(Addr, PTy);
4994 bool IsIndirect =
false;
4998 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
4999 getABIKind() == ARMABIInfo::AAPCS)
5000 TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
5004 if (isIllegalVectorType(Ty) && Size > 16) {
5012 assert((TyAlign & (TyAlign - 1)) == 0 &&
5013 "Alignment is not power of 2!");
5015 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
5016 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
5017 Addr = Builder.CreateIntToPtr(AddrAsInt, BP,
"ap.align");
5021 llvm::RoundUpToAlignment(Size, 4);
5023 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.
Int32Ty, Offset),
5025 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
5028 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
5035 llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
5036 CharUnits CharSize = getContext().getTypeSizeInChars(Ty);
5039 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
5040 llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy);
5041 Builder.CreateMemCpy(Dst, Src,
5047 llvm::PointerType::getUnqual(CGF.
ConvertType(Ty));
5048 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
5059 class NVPTXABIInfo :
public ABIInfo {
5076 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5081 static void addNVVMMetadata(llvm::Function *F, StringRef Name,
int Operand);
5094 RetTy = EnumTy->getDecl()->getIntegerType();
5103 Ty = EnumTy->getDecl()->getIntegerType();
5114 if (!getCXXABI().classifyReturnType(FI))
5117 I.info = classifyArgumentType(I.type);
5128 llvm_unreachable(
"NVPTX does not support varargs");
5131 void NVPTXTargetCodeGenInfo::
5132 setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5137 llvm::Function *F = cast<llvm::Function>(GV);
5143 if (FD->
hasAttr<OpenCLKernelAttr>()) {
5146 addNVVMMetadata(F,
"kernel", 1);
5148 F->addFnAttr(llvm::Attribute::NoInline);
5157 if (FD->
hasAttr<CUDAGlobalAttr>()) {
5159 addNVVMMetadata(F,
"kernel", 1);
5161 if (CUDALaunchBoundsAttr *Attr = FD->
getAttr<CUDALaunchBoundsAttr>()) {
5163 llvm::APSInt MaxThreads(32);
5164 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.
getContext());
5166 addNVVMMetadata(F,
"maxntidx", MaxThreads.getExtValue());
5171 if (Attr->getMinBlocks()) {
5172 llvm::APSInt MinBlocks(32);
5173 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.
getContext());
5176 addNVVMMetadata(F,
"minctasm", MinBlocks.getExtValue());
5182 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
5184 llvm::Module *M = F->getParent();
5188 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata(
"nvvm.annotations");
5190 llvm::Metadata *MDVals[] = {
5191 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
5192 llvm::ConstantAsMetadata::get(
5193 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
5195 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
5205 class SystemZABIInfo :
public ABIInfo {
5210 :
ABIInfo(CGT), HasVector(HV) {}
5212 bool isPromotableIntegerType(
QualType Ty)
const;
5213 bool isCompoundType(
QualType Ty)
const;
5214 bool isVectorArgumentType(
QualType Ty)
const;
5215 bool isFPArgumentType(
QualType Ty)
const;
5222 if (!getCXXABI().classifyReturnType(FI))
5225 I.info = classifyArgumentType(I.type);
5234 SystemZTargetCodeGenInfo(
CodeGenTypes &CGT,
bool HasVector)
5240 bool SystemZABIInfo::isPromotableIntegerType(
QualType Ty)
const {
5243 Ty = EnumTy->getDecl()->getIntegerType();
5252 case BuiltinType::Int:
5253 case BuiltinType::UInt:
5261 bool SystemZABIInfo::isCompoundType(
QualType Ty)
const {
5267 bool SystemZABIInfo::isVectorArgumentType(
QualType Ty)
const {
5268 return (HasVector &&
5270 getContext().getTypeSize(Ty) <= 128);
5273 bool SystemZABIInfo::isFPArgumentType(
QualType Ty)
const {
5276 case BuiltinType::Float:
5277 case BuiltinType::Double:
5292 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
5293 for (
const auto &I : CXXRD->bases()) {
5302 Found = GetSingleElementType(Base);
5306 for (
const auto *FD : RD->
fields()) {
5310 if (getContext().getLangOpts().CPlusPlus &&
5311 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
5318 Found = GetSingleElementType(FD->
getType());
5345 llvm::Type *APTy = llvm::PointerType::getUnqual(ArgTy);
5348 bool InFPRs =
false;
5349 bool IsVector =
false;
5350 unsigned UnpaddedBitSize;
5352 APTy = llvm::PointerType::getUnqual(APTy);
5353 UnpaddedBitSize = 64;
5357 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
5358 IsVector = ArgTy->isVectorTy();
5359 UnpaddedBitSize = getContext().getTypeSize(Ty);
5361 unsigned PaddedBitSize = (IsVector && UnpaddedBitSize > 64) ? 128 : 64;
5362 assert((UnpaddedBitSize <= PaddedBitSize) &&
"Invalid argument size.");
5364 unsigned PaddedSize = PaddedBitSize / 8;
5365 unsigned Padding = (PaddedBitSize - UnpaddedBitSize) / 8;
5367 llvm::Type *IndexTy = CGF.
Int64Ty;
5368 llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize);
5375 CGF.
Builder.CreateStructGEP(
nullptr, VAListAddr, 2,
5376 "overflow_arg_area_ptr");
5378 CGF.
Builder.CreateLoad(OverflowArgAreaPtr,
"overflow_arg_area");
5380 CGF.
Builder.CreateBitCast(OverflowArgArea, APTy,
"mem_addr");
5384 CGF.
Builder.CreateGEP(OverflowArgArea, PaddedSizeV,
"overflow_arg_area");
5385 CGF.
Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
5390 unsigned MaxRegs, RegCountField, RegSaveIndex, RegPadding;
5400 RegPadding = Padding;
5404 nullptr, VAListAddr, RegCountField,
"reg_count_ptr");
5406 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
5413 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
5420 CGF.
Builder.CreateMul(RegCount, PaddedSizeV,
"scaled_reg_count");
5422 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize + RegPadding);
5424 CGF.
Builder.CreateAdd(ScaledRegCount, RegBase,
"reg_offset");
5426 CGF.
Builder.CreateStructGEP(
nullptr, VAListAddr, 3,
"reg_save_area_ptr");
5428 CGF.
Builder.CreateLoad(RegSaveAreaPtr,
"reg_save_area");
5430 CGF.
Builder.CreateGEP(RegSaveArea, RegOffset,
"raw_reg_addr");
5432 CGF.
Builder.CreateBitCast(RawRegAddr, APTy,
"reg_addr");
5435 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
5437 CGF.
Builder.CreateAdd(RegCount, One,
"reg_count");
5438 CGF.
Builder.CreateStore(NewRegCount, RegCountPtr);
5446 nullptr, VAListAddr, 2,
"overflow_arg_area_ptr");
5448 CGF.
Builder.CreateLoad(OverflowArgAreaPtr,
"overflow_arg_area");
5449 llvm::Value *PaddingV = llvm::ConstantInt::get(IndexTy, Padding);
5451 CGF.
Builder.CreateGEP(OverflowArgArea, PaddingV,
"raw_mem_addr");
5453 CGF.
Builder.CreateBitCast(RawMemAddr, APTy,
"mem_addr");
5457 CGF.
Builder.CreateGEP(OverflowArgArea, PaddedSizeV,
"overflow_arg_area");
5458 CGF.
Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
5463 llvm::PHINode *ResAddr = CGF.
Builder.CreatePHI(APTy, 2,
"va_arg.addr");
5464 ResAddr->addIncoming(RegAddr, InRegBlock);
5465 ResAddr->addIncoming(MemAddr, InMemBlock);
5468 return CGF.
Builder.CreateLoad(ResAddr,
"indirect_arg");
5476 if (isVectorArgumentType(RetTy))
5478 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
5480 return (isPromotableIntegerType(RetTy) ?
5490 if (isPromotableIntegerType(Ty))
5496 uint64_t Size = getContext().getTypeSize(Ty);
5497 QualType SingleElementTy = GetSingleElementType(Ty);
5498 if (isVectorArgumentType(SingleElementTy) &&
5499 getContext().getTypeSize(SingleElementTy) == Size)
5503 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
5516 if (isFPArgumentType(SingleElementTy)) {
5517 assert(Size == 32 || Size == 64);
5519 PassTy = llvm::Type::getFloatTy(getVMContext());
5521 PassTy = llvm::Type::getDoubleTy(getVMContext());
5523 PassTy = llvm::IntegerType::get(getVMContext(), Size);
5528 if (isCompoundType(Ty))
5544 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5550 void MSP430TargetCodeGenInfo::setTargetAttributes(
const Decl *D,
5551 llvm::GlobalValue *GV,
5553 if (
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
5554 if (
const MSP430InterruptAttr *attr = FD->
getAttr<MSP430InterruptAttr>()) {
5556 llvm::Function *F = cast<llvm::Function>(GV);
5559 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
5562 F->addFnAttr(llvm::Attribute::NoInline);
5565 unsigned Num = attr->getNumber() / 2;
5567 "__isr_" + Twine(Num), F);
5578 class MipsABIInfo :
public ABIInfo {
5580 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
5581 void CoerceToIntArgs(uint64_t TySize,
5583 llvm::Type* HandleAggregates(
QualType Ty, uint64_t TySize)
const;
5584 llvm::Type* returnAggregateInRegs(
QualType RetTy, uint64_t Size)
const;
5585 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset)
const;
5588 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
5589 StackAlignInBytes(IsO32 ? 8 : 16) {}
5596 bool shouldSignExtUnsignedType(
QualType Ty)
const override;
5600 unsigned SizeOfUnwindException;
5604 SizeOfUnwindException(IsO32 ? 24 : 32) {}
5610 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5614 llvm::Function *Fn = cast<llvm::Function>(GV);
5615 if (FD->
hasAttr<Mips16Attr>()) {
5616 Fn->addFnAttr(
"mips16");
5618 else if (FD->
hasAttr<NoMips16Attr>()) {
5619 Fn->addFnAttr(
"nomips16");
5626 unsigned getSizeOfUnwindException()
const override {
5627 return SizeOfUnwindException;
5632 void MipsABIInfo::CoerceToIntArgs(
5634 llvm::IntegerType *IntTy =
5635 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
5638 for (
unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
5639 ArgList.push_back(IntTy);
5642 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
5645 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
5650 llvm::Type* MipsABIInfo::HandleAggregates(
QualType Ty, uint64_t TySize)
const {
5654 CoerceToIntArgs(TySize, ArgList);
5655 return llvm::StructType::get(getVMContext(), ArgList);
5659 return CGT.ConvertType(Ty);
5665 CoerceToIntArgs(TySize, ArgList);
5666 return llvm::StructType::get(getVMContext(), ArgList);
5671 assert(!(TySize % 8) &&
"Size of structure must be multiple of 8.");
5673 uint64_t LastOffset = 0;
5675 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
5680 i != e; ++i, ++idx) {
5684 if (!BT || BT->
getKind() != BuiltinType::Double)
5692 for (
unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
5693 ArgList.push_back(I64);
5696 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
5697 LastOffset = Offset + 64;
5700 CoerceToIntArgs(TySize - LastOffset, IntArgList);
5701 ArgList.append(IntArgList.begin(), IntArgList.end());
5703 return llvm::StructType::get(getVMContext(), ArgList);
5706 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
5707 uint64_t Offset)
const {
5708 if (OrigOffset + MinABIStackAlignInBytes > Offset)
5711 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
5715 MipsABIInfo::classifyArgumentType(
QualType Ty, uint64_t &Offset)
const {
5718 uint64_t OrigOffset =
Offset;
5719 uint64_t TySize = getContext().getTypeSize(Ty);
5720 uint64_t Align = getContext().getTypeAlign(Ty) / 8;
5722 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
5723 (uint64_t)StackAlignInBytes);
5724 unsigned CurrOffset = llvm::RoundUpToAlignment(Offset, Align);
5725 Offset = CurrOffset + llvm::RoundUpToAlignment(TySize, Align * 8) / 8;
5733 Offset = OrigOffset + MinABIStackAlignInBytes;
5742 getPaddingType(OrigOffset, CurrOffset));
5749 Ty = EnumTy->getDecl()->getIntegerType();
5756 nullptr, 0, IsO32 ?
nullptr : getPaddingType(OrigOffset, CurrOffset));
5760 MipsABIInfo::returnAggregateInRegs(
QualType RetTy, uint64_t Size)
const {
5780 for (; b != e; ++b) {
5786 RTList.push_back(CGT.ConvertType(b->getType()));
5790 return llvm::StructType::get(getVMContext(), RTList,
5797 CoerceToIntArgs(Size, RTList);
5798 return llvm::StructType::get(getVMContext(), RTList);
5802 uint64_t Size = getContext().getTypeSize(RetTy);
5809 if (!IsO32 && Size == 0)
5833 RetTy = EnumTy->getDecl()->getIntegerType();
5841 if (!getCXXABI().classifyReturnType(FI))
5845 uint64_t Offset = RetInfo.
isIndirect() ? MinABIStackAlignInBytes : 0;
5848 I.info = classifyArgumentType(I.type, Offset);
5858 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
5859 unsigned PtrWidth = getTarget().getPointerWidth(0);
5868 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
"ap");
5869 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP,
"ap.cur");
5871 std::min(getContext().getTypeAlign(Ty) / 8, StackAlignInBytes);
5872 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.
ConvertType(Ty));
5874 llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.
Int32Ty : CGF.
Int64Ty;
5876 if (TypeAlign > MinABIStackAlignInBytes) {
5878 llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1);
5879 llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign);
5882 AddrTyped = CGF.
Builder.CreateIntToPtr(And, PTy);
5885 AddrTyped = Builder.CreateBitCast(Addr, PTy);
5887 llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP);
5888 TypeAlign = std::max((
unsigned)TypeAlign, MinABIStackAlignInBytes);
5890 uint64_t Offset = llvm::RoundUpToAlignment(ArgSizeInBits / 8, TypeAlign);
5892 Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset),
5894 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
5899 bool MipsABIInfo::shouldSignExtUnsignedType(
QualType Ty)
const {
5900 int TySize = getContext().getTypeSize(Ty);
5945 class TCETargetCodeGenInfo :
public DefaultTargetCodeGenInfo {
5948 : DefaultTargetCodeGenInfo(CGT) {}
5950 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5954 void TCETargetCodeGenInfo::setTargetAttributes(
5959 llvm::Function *F = cast<llvm::Function>(GV);
5962 if (FD->
hasAttr<OpenCLKernelAttr>()) {
5964 F->addFnAttr(llvm::Attribute::NoInline);
5965 const ReqdWorkGroupSizeAttr *Attr = FD->
getAttr<ReqdWorkGroupSizeAttr>();
5968 llvm::LLVMContext &Context = F->getContext();
5969 llvm::NamedMDNode *OpenCLMetadata =
5971 "opencl.kernel_wg_size_info");
5974 Operands.push_back(llvm::ConstantAsMetadata::get(F));
5977 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
5978 M.
Int32Ty, llvm::APInt(32, Attr->getXDim()))));
5980 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
5981 M.
Int32Ty, llvm::APInt(32, Attr->getYDim()))));
5983 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
5984 M.
Int32Ty, llvm::APInt(32, Attr->getZDim()))));
5990 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
5991 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
6005 class HexagonABIInfo :
public ABIInfo {
6035 if (!getCXXABI().classifyReturnType(FI))
6038 I.info = classifyArgumentType(I.type);
6045 Ty = EnumTy->getDecl()->getIntegerType();
6058 uint64_t Size = getContext().getTypeSize(Ty);
6077 if (RetTy->
isVectorType() && getContext().getTypeSize(RetTy) > 64)
6083 RetTy = EnumTy->getDecl()->getIntegerType();
6094 uint64_t Size = getContext().getTypeSize(RetTy);
6115 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
6117 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP,
"ap.cur");
6119 llvm::PointerType::getUnqual(CGF.
ConvertType(Ty));
6120 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
6125 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.
Int32Ty, Offset),
6127 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
6142 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6148 void AMDGPUTargetCodeGenInfo::setTargetAttributes(
6150 llvm::GlobalValue *GV,
6156 if (
const auto Attr = FD->
getAttr<AMDGPUNumVGPRAttr>()) {
6157 llvm::Function *F = cast<llvm::Function>(GV);
6158 uint32_t NumVGPR = Attr->getNumVGPR();
6160 F->addFnAttr(
"amdgpu_num_vgpr", llvm::utostr(NumVGPR));
6163 if (
const auto Attr = FD->
getAttr<AMDGPUNumSGPRAttr>()) {
6164 llvm::Function *F = cast<llvm::Function>(GV);
6165 unsigned NumSGPR = Attr->getNumSGPR();
6167 F->addFnAttr(
"amdgpu_num_sgpr", llvm::utostr(NumSGPR));
6199 class SparcV9ABIInfo :
public ABIInfo {
6220 struct CoerceBuilder {
6222 const llvm::DataLayout &DL;
6227 CoerceBuilder(llvm::LLVMContext &c,
const llvm::DataLayout &dl)
6228 : Context(c), DL(dl), Size(0), InReg(
false) {}
6231 void pad(uint64_t ToSize) {
6232 assert(ToSize >= Size &&
"Cannot remove elements");
6237 uint64_t Aligned = llvm::RoundUpToAlignment(Size, 64);
6238 if (Aligned > Size && Aligned <= ToSize) {
6239 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
6244 while (Size + 64 <= ToSize) {
6245 Elems.push_back(llvm::Type::getInt64Ty(Context));
6250 if (Size < ToSize) {
6251 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
6257 void addFloat(uint64_t Offset, llvm::Type *Ty,
unsigned Bits) {
6265 Elems.push_back(Ty);
6266 Size = Offset + Bits;
6270 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
6271 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
6272 for (
unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
6273 llvm::Type *ElemTy = StrTy->getElementType(i);
6274 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
6275 switch (ElemTy->getTypeID()) {
6276 case llvm::Type::StructTyID:
6277 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
6279 case llvm::Type::FloatTyID:
6280 addFloat(ElemOffset, ElemTy, 32);
6282 case llvm::Type::DoubleTyID:
6283 addFloat(ElemOffset, ElemTy, 64);
6285 case llvm::Type::FP128TyID:
6286 addFloat(ElemOffset, ElemTy, 128);
6288 case llvm::Type::PointerTyID:
6289 if (ElemOffset % 64 == 0) {
6291 Elems.push_back(ElemTy);
6302 bool isUsableType(llvm::StructType *Ty)
const {
6303 return llvm::makeArrayRef(Elems) == Ty->elements();
6307 llvm::Type *getType()
const {
6308 if (Elems.size() == 1)
6309 return Elems.front();
6311 return llvm::StructType::get(Context, Elems);
6318 SparcV9ABIInfo::classifyType(
QualType Ty,
unsigned SizeLimit)
const {
6322 uint64_t Size = getContext().getTypeSize(Ty);
6326 if (Size > SizeLimit)
6331 Ty = EnumTy->getDecl()->getIntegerType();
6334 if (Size < 64 && Ty->isIntegerType())
6348 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
6352 CoerceBuilder CB(getVMContext(), getDataLayout());
6353 CB.addStruct(0, StrTy);
6354 CB.pad(llvm::RoundUpToAlignment(CB.DL.getTypeSizeInBits(StrTy), 64));
6357 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
6368 llvm::Type *ArgTy = CGT.ConvertType(Ty);
6374 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
"ap");
6375 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP,
"ap.cur");
6376 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
6383 llvm_unreachable(
"Unsupported ABI kind for va_arg");
6388 .CreateConstGEP1_32(Addr, 8 - getDataLayout().getTypeAllocSize(ArgTy),
6399 ArgAddr = Builder.CreateBitCast(Addr,
6400 llvm::PointerType::getUnqual(ArgPtrTy),
6402 ArgAddr = Builder.CreateLoad(ArgAddr,
"indirect.arg");
6406 return llvm::UndefValue::get(ArgPtrTy);
6410 Addr = Builder.CreateConstGEP1_32(Addr, Stride,
"ap.next");
6411 Builder.CreateStore(Addr, VAListAddrAsBPP);
6413 return Builder.CreatePointerCast(ArgAddr, ArgPtrTy,
"arg.addr");
6419 I.info = classifyType(I.type, 16 * 8);
6445 llvm::IntegerType *i8 = CGF.
Int8Ty;
6446 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
6447 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
6536 class TypeStringCache {
6537 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
6541 std::string Swapped;
6544 std::map<const IdentifierInfo *, struct Entry>
Map;
6545 unsigned IncompleteCount;
6546 unsigned IncompleteUsedCount;
6548 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {};
6558 class FieldEncoding {
6562 FieldEncoding(
bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {};
6563 StringRef str() {
return Enc.c_str();};
6564 bool operator<(
const FieldEncoding &rhs)
const {
6565 if (HasName != rhs.HasName)
return HasName;
6566 return Enc < rhs.Enc;
6570 class XCoreABIInfo :
public DefaultABIInfo {
6578 mutable TypeStringCache TSC;
6582 void emitTargetMD(
const Decl *D, llvm::GlobalValue *GV,
6593 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr,
6595 llvm::Value *AP = Builder.CreateLoad(VAListAddrAsBPP);
6599 llvm::Type *ArgTy = CGT.ConvertType(Ty);
6602 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
6604 uint64_t ArgSize = 0;
6608 llvm_unreachable(
"Unsupported ABI kind for va_arg");
6610 Val = llvm::UndefValue::get(ArgPtrTy);
6615 Val = Builder.CreatePointerCast(AP, ArgPtrTy);
6622 ArgAddr = Builder.CreateBitCast(AP, llvm::PointerType::getUnqual(ArgPtrTy));
6623 ArgAddr = Builder.CreateLoad(ArgAddr);
6624 Val = Builder.CreatePointerCast(ArgAddr, ArgPtrTy);
6631 llvm::Value *APN = Builder.CreateConstGEP1_32(AP, ArgSize);
6632 Builder.CreateStore(APN, VAListAddrAsBPP);
6643 std::string StubEnc) {
6647 assert( (E.Str.empty() || E.State == Recursive) &&
6648 "Incorrectly use of addIncomplete");
6649 assert(!StubEnc.empty() &&
"Passing an empty string to addIncomplete()");
6650 E.Swapped.swap(E.Str);
6651 E.Str.swap(StubEnc);
6652 E.State = Incomplete;
6660 bool TypeStringCache::removeIncomplete(
const IdentifierInfo *ID) {
6663 auto I =
Map.find(ID);
6664 assert(I !=
Map.end() &&
"Entry not present");
6665 Entry &E = I->second;
6666 assert( (E.State == Incomplete ||
6667 E.State == IncompleteUsed) &&
6668 "Entry must be an incomplete type");
6669 bool IsRecursive =
false;
6670 if (E.State == IncompleteUsed) {
6673 --IncompleteUsedCount;
6675 if (E.Swapped.empty())
6679 E.Swapped.swap(E.Str);
6681 E.State = Recursive;
6689 void TypeStringCache::addIfComplete(
const IdentifierInfo *ID, StringRef Str,
6691 if (!ID || IncompleteUsedCount)
6694 if (IsRecursive && !E.Str.empty()) {
6695 assert(E.State==Recursive && E.Str.size() == Str.size() &&
6696 "This is not the same Recursive entry");
6702 assert(E.Str.empty() &&
"Entry already present");
6704 E.State = IsRecursive? Recursive : NonRecursive;
6713 auto I =
Map.find(ID);
6716 Entry &E = I->second;
6717 if (E.State == Recursive && IncompleteCount)
6720 if (E.State == Incomplete) {
6722 E.State = IncompleteUsed;
6723 ++IncompleteUsedCount;
6725 return E.Str.c_str();
6744 void XCoreTargetCodeGenInfo::emitTargetMD(
const Decl *D, llvm::GlobalValue *GV,
6748 llvm::LLVMContext &Ctx = CGM.
getModule().getContext();
6750 MDVals.push_back(llvm::ConstantAsMetadata::get(GV));
6751 MDVals.push_back(llvm::MDString::get(Ctx, Enc.str()));
6752 llvm::NamedMDNode *MD =
6753 CGM.
getModule().getOrInsertNamedMetadata(
"xcore.typestrings");
6754 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
6760 TypeStringCache &TSC);
6768 TypeStringCache &TSC) {
6769 for (
const auto *Field : RD->
fields()) {
6772 Enc += Field->getName();
6774 if (Field->isBitField()) {
6776 llvm::raw_svector_ostream OS(Enc);
6778 OS << Field->getBitWidthValue(CGM.
getContext());
6782 if (!
appendType(Enc, Field->getType(), CGM, TSC))
6784 if (Field->isBitField())
6787 FE.emplace_back(!Field->getName().empty(), Enc);
6799 StringRef TypeString = TSC.lookupStr(ID);
6800 if (!TypeString.empty()) {
6806 size_t Start = Enc.size();
6814 bool IsRecursive =
false;
6821 std::string StubEnc(Enc.substr(Start).str());
6823 TSC.addIncomplete(ID, std::move(StubEnc));
6825 (void) TSC.removeIncomplete(ID);
6828 IsRecursive = TSC.removeIncomplete(ID);
6832 std::sort(FE.begin(), FE.end());
6834 unsigned E = FE.size();
6835 for (
unsigned I = 0; I != E; ++I) {
6842 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
6848 TypeStringCache &TSC,
6851 StringRef TypeString = TSC.lookupStr(ID);
6852 if (!TypeString.empty()) {
6857 size_t Start = Enc.size();
6866 for (
auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
6868 SmallStringEnc EnumEnc;
6870 EnumEnc += I->getName();
6872 I->getInitVal().toString(EnumEnc);
6874 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
6876 std::sort(FE.begin(), FE.end());
6877 unsigned E = FE.size();
6878 for (
unsigned I = 0; I != E; ++I) {
6885 TSC.addIfComplete(ID, Enc.substr(Start),
false);
6893 static const char *Table[] = {
"",
"c:",
"r:",
"cr:",
"v:",
"cv:",
"rv:",
"crv:"};
6901 Enc += Table[Lookup];
6906 const char *EncType;
6908 case BuiltinType::Void:
6911 case BuiltinType::Bool:
6914 case BuiltinType::Char_U:
6917 case BuiltinType::UChar:
6920 case BuiltinType::SChar:
6923 case BuiltinType::UShort:
6926 case BuiltinType::Short:
6929 case BuiltinType::UInt:
6932 case BuiltinType::Int:
6935 case BuiltinType::ULong:
6938 case BuiltinType::Long:
6941 case BuiltinType::ULongLong:
6944 case BuiltinType::LongLong:
6947 case BuiltinType::Float:
6950 case BuiltinType::Double:
6953 case BuiltinType::LongDouble:
6966 TypeStringCache &TSC) {
6978 TypeStringCache &TSC, StringRef NoSizeEnc) {
6983 CAT->getSize().toStringUnsigned(Enc);
6999 TypeStringCache &TSC) {
7006 auto I = FPT->param_type_begin();
7007 auto E = FPT->param_type_end();
7016 if (FPT->isVariadic())
7019 if (FPT->isVariadic())
7033 TypeStringCache &TSC) {
7070 if (
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
7076 if (
const VarDecl *VD = dyn_cast<VarDecl>(D)) {
7079 QualType QT = VD->getType().getCanonicalType();
7101 return !
getTriple().isOSBinFormatMachO();
7105 if (TheTargetCodeGenInfo)
7106 return *TheTargetCodeGenInfo;
7109 switch (Triple.getArch()) {
7111 return *(TheTargetCodeGenInfo =
new DefaultTargetCodeGenInfo(Types));
7113 case llvm::Triple::le32:
7114 return *(TheTargetCodeGenInfo =
new PNaClTargetCodeGenInfo(Types));
7115 case llvm::Triple::mips:
7116 case llvm::Triple::mipsel:
7117 if (Triple.getOS() == llvm::Triple::NaCl)
7118 return *(TheTargetCodeGenInfo =
new PNaClTargetCodeGenInfo(Types));
7119 return *(TheTargetCodeGenInfo =
new MIPSTargetCodeGenInfo(Types,
true));
7121 case llvm::Triple::mips64:
7122 case llvm::Triple::mips64el:
7123 return *(TheTargetCodeGenInfo =
new MIPSTargetCodeGenInfo(Types,
false));
7125 case llvm::Triple::aarch64:
7126 case llvm::Triple::aarch64_be: {
7127 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
7129 Kind = AArch64ABIInfo::DarwinPCS;
7131 return *(TheTargetCodeGenInfo =
new AArch64TargetCodeGenInfo(Types, Kind));
7134 case llvm::Triple::arm:
7135 case llvm::Triple::armeb:
7136 case llvm::Triple::thumb:
7137 case llvm::Triple::thumbeb:
7139 if (Triple.getOS() == llvm::Triple::Win32) {
7140 TheTargetCodeGenInfo =
7141 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP);
7142 return *TheTargetCodeGenInfo;
7145 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
7147 Kind = ARMABIInfo::APCS;
7148 else if (CodeGenOpts.
FloatABI ==
"hard" ||
7150 Triple.getEnvironment() == llvm::Triple::GNUEABIHF))
7151 Kind = ARMABIInfo::AAPCS_VFP;
7153 return *(TheTargetCodeGenInfo =
new ARMTargetCodeGenInfo(Types, Kind));
7156 case llvm::Triple::ppc:
7157 return *(TheTargetCodeGenInfo =
new PPC32TargetCodeGenInfo(Types));
7158 case llvm::Triple::ppc64:
7159 if (Triple.isOSBinFormatELF()) {
7160 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
7162 Kind = PPC64_SVR4_ABIInfo::ELFv2;
7165 return *(TheTargetCodeGenInfo =
7166 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX));
7168 return *(TheTargetCodeGenInfo =
new PPC64TargetCodeGenInfo(Types));
7169 case llvm::Triple::ppc64le: {
7170 assert(Triple.isOSBinFormatELF() &&
"PPC64 LE non-ELF not supported!");
7171 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
7173 Kind = PPC64_SVR4_ABIInfo::ELFv1;
7176 return *(TheTargetCodeGenInfo =
7177 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX));
7180 case llvm::Triple::nvptx:
7181 case llvm::Triple::nvptx64:
7182 return *(TheTargetCodeGenInfo =
new NVPTXTargetCodeGenInfo(Types));
7184 case llvm::Triple::msp430:
7185 return *(TheTargetCodeGenInfo =
new MSP430TargetCodeGenInfo(Types));
7187 case llvm::Triple::systemz: {
7189 return *(TheTargetCodeGenInfo =
new SystemZTargetCodeGenInfo(Types,
7193 case llvm::Triple::tce:
7194 return *(TheTargetCodeGenInfo =
new TCETargetCodeGenInfo(Types));
7196 case llvm::Triple::x86: {
7197 bool IsDarwinVectorABI = Triple.isOSDarwin();
7198 bool IsSmallStructInRegABI =
7199 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
7200 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
7202 if (Triple.getOS() == llvm::Triple::Win32) {
7203 return *(TheTargetCodeGenInfo =
new WinX86_32TargetCodeGenInfo(
7204 Types, IsDarwinVectorABI, IsSmallStructInRegABI,
7205 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
7207 return *(TheTargetCodeGenInfo =
new X86_32TargetCodeGenInfo(
7208 Types, IsDarwinVectorABI, IsSmallStructInRegABI,
7209 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
7213 case llvm::Triple::x86_64: {
7215 X86AVXABILevel AVXLevel = (ABI ==
"avx512" ? X86AVXABILevel::AVX512 :
7216 ABI ==
"avx" ? X86AVXABILevel::AVX :
7219 switch (Triple.getOS()) {
7220 case llvm::Triple::Win32:
7221 return *(TheTargetCodeGenInfo =
7222 new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
7223 case llvm::Triple::PS4:
7224 return *(TheTargetCodeGenInfo =
7225 new PS4TargetCodeGenInfo(Types, AVXLevel));
7227 return *(TheTargetCodeGenInfo =
7228 new X86_64TargetCodeGenInfo(Types, AVXLevel));
7231 case llvm::Triple::hexagon:
7232 return *(TheTargetCodeGenInfo =
new HexagonTargetCodeGenInfo(Types));
7233 case llvm::Triple::r600:
7234 return *(TheTargetCodeGenInfo =
new AMDGPUTargetCodeGenInfo(Types));
7235 case llvm::Triple::amdgcn:
7236 return *(TheTargetCodeGenInfo =
new AMDGPUTargetCodeGenInfo(Types));
7237 case llvm::Triple::sparcv9:
7238 return *(TheTargetCodeGenInfo =
new SparcV9TargetCodeGenInfo(Types));
7239 case llvm::Triple::xcore:
7240 return *(TheTargetCodeGenInfo =
new XCoreTargetCodeGenInfo(Types));
llvm::PointerType * Int8PtrPtrTy
const ABIInfo & getABIInfo() const
getABIInfo() - Returns ABI info helper for the target.
void setEffectiveCallingConvention(unsigned Value)
External linkage, which indicates that the entity can be referred to from other translation units...
static ABIArgInfo getExtend(llvm::Type *T=nullptr)
llvm::Type * ConvertTypeForMem(QualType T)
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
CanQualType getReturnType() const
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g., it is a floating-point type or a vector thereof.
bool isBitField() const
Determines whether this field is a bitfield.
bool isMemberPointerType() const
unsigned getInAllocaFieldIndex() const
llvm::Module & getModule() const
llvm::LLVMContext & getLLVMContext()
const TargetInfo & getTarget() const
unsigned getIntWidth(QualType T) const
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET, TypeStringCache &TSC, const IdentifierInfo *ID)
Appends enum types to Enc and adds the encoding to the cache.
static const Type * isSingleElementStruct(QualType T, ASTContext &Context)
bool hasFlexibleArrayMember() const
bool isEnumeralType() const
static ABIArgInfo getIndirect(unsigned Alignment, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
const llvm::DataLayout & getDataLayout() const
static bool appendType(SmallStringEnc &Enc, QualType QType, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
bool isBlockPointerType() const
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::Value * getAddress() const
virtual ~TargetCodeGenInfo()
CallingConv getCallConv() const
field_iterator field_begin() const
void setCoerceToType(llvm::Type *T)
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
static llvm::Value * EmitVAArgFromMemory(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF)
static ABIArgInfo getIgnore()
static bool isAggregateTypeForABI(QualType T)
void setAddress(llvm::Value *address)
const_arg_iterator arg_end() const
static ABIArgInfo getIndirectInReg(unsigned Alignment, bool ByVal=true, bool Realign=false)
bool isScalarType() const
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
llvm::IntegerType * Int64Ty
static llvm::Type * GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, const llvm::DataLayout &TD)
bool isReferenceType() const
bool isStructureOrClassType() const
static ABIArgInfo getExtendInReg(llvm::Type *T=nullptr)
const RecordType * getAsUnionType() const
NOTE: getAs*ArrayType are methods on ASTContext.
llvm::Type * getCoerceToType() const
unsigned getIndirectAlign() const
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true)
static bool hasScalarEvaluationKind(QualType T)
static ABIArgInfo getExpandWithPadding(bool PaddingInReg, llvm::Type *Padding)
static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC, const IdentifierInfo *ID)
QualType getReturnType() const
void setArgStruct(llvm::StructType *Ty)
virtual unsigned getSizeOfUnwindException() const
field_range fields() const
static void rewriteInputConstraintReferences(unsigned FirstIn, unsigned NumNewOuts, std::string &AsmString)
static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty)
RecordDecl * getDecl() const
unsigned getCallingConvention() const
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
uint64_t getFieldOffset(unsigned FieldNo) const
static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, unsigned EndBit, ASTContext &Context)
static ABIArgInfo getExpand()
virtual StringRef getABI() const
Get the ABI currently in use.
static QualType useFirstFieldIfTransparentUnion(QualType Ty)
std::pair< CharUnits, CharUnits > getTypeInfoInChars(const Type *T) const
bool isUnnamedBitfield() const
Determines whether this is an unnamed bitfield.
std::string FloatABI
The ABI to use for passing floating point arguments.
field_iterator field_end() const
const ArrayType * getAsArrayTypeUnsafe() const
EnumDecl * getDecl() const
unsigned getNumRequiredArgs() const
static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, const llvm::DataLayout &TD)
bool isFloatingPoint() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
static bool extractFieldType(SmallVectorImpl< FieldEncoding > &FE, const RecordDecl *RD, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
const TargetInfo & getTarget() const
static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, llvm::Value *Array, llvm::Value *Value, unsigned FirstIndex, unsigned LastIndex)
static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context)
ID
Defines the set of possible language-specific address spaces.
bool isRealFloatingType() const
Floating point categories.
Exposes information about the current target.
StringRef getName() const
Return the actual identifier string.
bool isAnyComplexType() const
llvm::LLVMContext & getVMContext() const
static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT)
Appends built-in types to Enc.
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
ASTContext & getContext() const
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
bool isFloatingType() const
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static bool getTypeString(SmallStringEnc &Enc, const Decl *D, CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
llvm::LLVMContext & getLLVMContext()
llvm::IntegerType * Int32Ty
virtual void getDependentLibraryOption(llvm::StringRef Lib, llvm::SmallString< 24 > &Opt) const
const IdentifierInfo * getBaseTypeIdentifier() const
Retrieves a pointer to the name of the base type.
ASTContext & getContext() const
Implements C++ ABI-specific semantic analysis functions.
unsigned getRegParm() const
The result type of a method or function.
RecordDecl * getDefinition() const
bool isUnsignedIntegerOrEnumerationType() const
llvm::IRBuilder< PreserveNames, llvm::ConstantFolder, CGBuilderInserterTy > CGBuilderTy
Pass it as a pointer to temporary memory.
static void appendQualifier(SmallStringEnc &Enc, QualType QT)
static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays)
unsigned getTypeAlign(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in bits.
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
virtual bool shouldSignExtUnsignedType(QualType Ty) const
ASTContext & getContext() const
const Type * getTypePtr() const
bool isComplexType() const
bool isBuiltinType() const
isBuiltinType - returns true if the type is a builtin type.
static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays)
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
bool isHomogeneousAggregate(QualType Ty, const Type *&Base, uint64_t &Members) const
const ConstantArrayType * getAsConstantArrayType(QualType T) const
const CodeGenOptions & getCodeGenOpts() const
const LangOptions & getLangOpts() const
llvm::AllocaInst * CreateMemTemp(QualType T, const Twine &Name="tmp")
bool operator<(DeclarationName LHS, DeclarationName RHS)
bool isVectorType() const
bool isPromotableIntegerType() const
More type predicates useful for type checking/promotion.
bool isMemberFunctionPointerType() const
X86AVXABILevel
The AVX ABI level for X86 targets.
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
QualType getPointeeType() const
bool isSRetAfterThis() const
if(T->getSizeExpr()) TRY_TO(TraverseStmt(T-> getSizeExpr()))
static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context)
bool canHaveCoerceToType() const
static const Type * getElementType(const Expr *BaseExpr)
bool isAggregateType() const
Determines whether the type is a C++ aggregate type or C aggregate or union type. ...
static ABIArgInfo getDirectInReg(llvm::Type *T=nullptr)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
static __inline__ uint32_t volatile uint32_t * p
static bool appendArrayType(SmallStringEnc &Enc, QualType QT, const ArrayType *AT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC, StringRef NoSizeEnc)
Appends array encoding to Enc before calling appendType for the element.
std::unique_ptr< DiagnosticConsumer > create(StringRef OutputFile, DiagnosticOptions *Diags, bool MergeChildRecords=false)
Returns a DiagnosticConsumer that serializes diagnostics to a bitcode file.
llvm::IntegerType * IntPtrTy
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
for(auto typeArg:T->getTypeArgsAsWritten())
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
const llvm::Triple & getTriple() const
const RecordType * getAsStructureType() const
QualType getCanonicalType() const
LanguageLinkage getLanguageLinkage() const
Compute the language linkage.
Implements C++ ABI-specific code generation functions.
llvm::PointerType * Int8PtrTy
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
static bool isArgInAlloca(const ABIArgInfo &Info)
static ABIArgInfo getInAlloca(unsigned FieldIndex)
ABIArgInfo & getReturnInfo()
static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
Represents a C++ struct/union/class.
BoundNodesTreeBuilder *const Builder
void EmitBranch(llvm::BasicBlock *Block)
llvm::Type * ConvertType(QualType T)
CodeGen::CGCXXABI & getCXXABI() const
bool getHasRegParm() const
ArraySizeModifier getSizeModifier() const
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
const_arg_iterator arg_begin() const
bool getIndirectByVal() const
static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Appends a pointer encoding to Enc before calling appendType for the pointee.
QualType getElementType() const
bool getIndirectRealign() const
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
void setInAllocaSRet(bool SRet)
const llvm::DataLayout & getDataLayout() const
EnumDecl * getDefinition() const
bool isSignedIntegerType() const
bool isConstQualified() const
Determine whether this type is const-qualified.
RecordArgABI
Specify how one should pass an argument of a record type.
bool isNull() const
isNull - Return true if this QualType doesn't point to a type yet.
static bool isIntegerLikeType(QualType Ty, ASTContext &Context, llvm::LLVMContext &VMContext)
static bool isSSEVectorType(ASTContext &Context, QualType Ty)
static bool PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address)
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
Attr - This represents one attribute.
bool supportsCOMDAT() const
bool hasPointerRepresentation() const
bool isIntegerType() const
bool isPointerType() const