23 #include "llvm/ADT/StringExtras.h"
24 #include "llvm/ADT/Triple.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/Type.h"
27 #include "llvm/Support/raw_ostream.h"
30 using namespace clang;
31 using namespace CodeGen;
39 for (
unsigned I = FirstIndex;
I <= LastIndex; ++
I) {
41 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array,
I);
55 ByRef, Realign, Padding);
92 if (UD->
hasAttr<TransparentUnionAttr>()) {
93 assert(!UD->
field_empty() &&
"sema created an empty transparent union");
101 return CGT.getCXXABI();
105 return CGT.getContext();
109 return CGT.getLLVMContext();
113 return CGT.getDataLayout();
117 return CGT.getTarget();
125 uint64_t Members)
const {
134 raw_ostream &OS = llvm::errs();
135 OS <<
"(ABIArgInfo Kind=";
138 OS <<
"Direct Type=";
172 PtrAsInt = CGF.
Builder.CreateAdd(PtrAsInt,
174 PtrAsInt = CGF.
Builder.CreateAnd(PtrAsInt,
176 PtrAsInt = CGF.
Builder.CreateIntToPtr(PtrAsInt,
178 Ptr->getName() +
".aligned");
202 bool AllowHigherAlign) {
212 if (AllowHigherAlign && DirectAlign > SlotSize) {
251 std::pair<CharUnits, CharUnits> ValueInfo,
253 bool AllowHigherAlign) {
260 DirectSize = ValueInfo.first;
261 DirectAlign = ValueInfo.second;
267 DirectTy = DirectTy->getPointerTo(0);
270 DirectSize, DirectAlign,
283 Address Addr1, llvm::BasicBlock *Block1,
284 Address Addr2, llvm::BasicBlock *Block2,
285 const llvm::Twine &
Name =
"") {
342 if (AT->getSize() == 0)
344 FT = AT->getElementType();
355 if (isa<CXXRecordDecl>(RT->
getDecl()))
373 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
374 for (
const auto &
I : CXXRD->bases())
378 for (
const auto *
I : RD->
fields())
401 const Type *Found =
nullptr;
404 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
405 for (
const auto &
I : CXXRD->bases()) {
423 for (
const auto *FD : RD->
fields()) {
437 if (AT->getSize().getZExtValue() != 1)
439 FT = AT->getElementType();
462 Ty = CTy->getElementType();
472 return Size == 32 || Size == 64;
498 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
499 if (!CXXRD->isCLike())
505 for (
const auto *FD : RD->
fields()) {
512 if (FD->isBitField())
530 class DefaultABIInfo :
public ABIInfo {
538 if (!getCXXABI().classifyReturnType(FI))
541 I.info = classifyArgumentType(
I.type);
568 return getNaturalAlignIndirect(Ty);
573 Ty = EnumTy->getDecl()->getIntegerType();
584 return getNaturalAlignIndirect(RetTy);
588 RetTy = EnumTy->getDecl()->getIntegerType();
600 class WebAssemblyABIInfo final :
public DefaultABIInfo {
603 : DefaultABIInfo(CGT) {}
612 if (!getCXXABI().classifyReturnType(FI))
615 Arg.info = classifyArgumentType(Arg.type);
645 return DefaultABIInfo::classifyArgumentType(Ty);
665 return DefaultABIInfo::classifyReturnType(RetTy);
675 class PNaClABIInfo :
public ABIInfo {
694 if (!getCXXABI().classifyReturnType(FI))
698 I.info = classifyArgumentType(
I.type);
711 return getNaturalAlignIndirect(Ty);
714 Ty = EnumTy->getDecl()->getIntegerType();
730 return getNaturalAlignIndirect(RetTy);
734 RetTy = EnumTy->getDecl()->getIntegerType();
743 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
744 cast<llvm::VectorType>(IRType)->
getElementType()->isIntegerTy() &&
745 IRType->getScalarSizeInBits() != 64;
749 StringRef Constraint,
751 if ((Constraint ==
"y" || Constraint ==
"&y") && Ty->isVectorTy()) {
752 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
768 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half)
774 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
782 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
783 return NumMembers <= 4;
792 CCState(
unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
796 unsigned FreeSSERegs;
800 class X86_32ABIInfo :
public ABIInfo {
806 static const unsigned MinABIStackAlignInBytes = 4;
808 bool IsDarwinVectorABI;
809 bool IsRetSmallStructInRegABI;
810 bool IsWin32StructABI;
813 unsigned DefaultNumRegisterParameters;
815 static bool isRegisterSize(
unsigned Size) {
816 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
819 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override {
821 return isX86VectorTypeForVectorCall(getContext(), Ty);
824 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
825 uint64_t NumMembers)
const override {
827 return isX86VectorCallAggregateSmallEnough(NumMembers);
839 unsigned getTypeStackAlignInBytes(
QualType Ty,
unsigned Align)
const;
848 bool shouldAggregateUseDirect(
QualType Ty, CCState &
State,
bool &InReg,
849 bool &NeedsPadding)
const;
850 bool shouldPrimitiveUseInReg(
QualType Ty, CCState &
State)
const;
867 bool RetSmallStructInRegABI,
bool Win32StructABI,
868 unsigned NumRegisterParameters,
bool SoftFloatABI)
869 :
ABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
870 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
871 IsWin32StructABI(Win32StructABI),
872 IsSoftFloatABI(SoftFloatABI),
873 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
874 DefaultNumRegisterParameters(NumRegisterParameters) {}
880 bool RetSmallStructInRegABI,
bool Win32StructABI,
881 unsigned NumRegisterParameters,
bool SoftFloatABI)
883 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
884 NumRegisterParameters, SoftFloatABI)) {}
886 static bool isStructReturnInRegABI(
889 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
902 StringRef Constraint,
904 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
908 std::string &Constraints,
909 std::vector<llvm::Type *> &ResultRegTypes,
910 std::vector<llvm::Type *> &ResultTruncRegTypes,
911 std::vector<LValue> &ResultRegDests,
912 std::string &AsmString,
913 unsigned NumOutputs)
const override;
917 unsigned Sig = (0xeb << 0) |
921 return llvm::ConstantInt::get(CGM.
Int32Ty, Sig);
937 std::string &AsmString) {
939 llvm::raw_string_ostream OS(Buf);
941 while (Pos < AsmString.size()) {
942 size_t DollarStart = AsmString.find(
'$', Pos);
943 if (DollarStart == std::string::npos)
944 DollarStart = AsmString.size();
945 size_t DollarEnd = AsmString.find_first_not_of(
'$', DollarStart);
946 if (DollarEnd == std::string::npos)
947 DollarEnd = AsmString.size();
948 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
950 size_t NumDollars = DollarEnd - DollarStart;
951 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
953 size_t DigitStart = Pos;
954 size_t DigitEnd = AsmString.find_first_not_of(
"0123456789", DigitStart);
955 if (DigitEnd == std::string::npos)
956 DigitEnd = AsmString.size();
957 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
958 unsigned OperandIndex;
959 if (!OperandStr.getAsInteger(10, OperandIndex)) {
960 if (OperandIndex >= FirstIn)
961 OperandIndex += NumNewOuts;
969 AsmString = std::move(OS.str());
973 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
975 std::vector<llvm::Type *> &ResultRegTypes,
976 std::vector<llvm::Type *> &ResultTruncRegTypes,
977 std::vector<LValue> &ResultRegDests, std::string &AsmString,
978 unsigned NumOutputs)
const {
983 if (!Constraints.empty())
985 if (RetWidth <= 32) {
986 Constraints +=
"={eax}";
987 ResultRegTypes.push_back(CGF.
Int32Ty);
991 ResultRegTypes.push_back(CGF.
Int64Ty);
996 ResultTruncRegTypes.push_back(CoerceTy);
1000 CoerceTy->getPointerTo()));
1001 ResultRegDests.push_back(ReturnSlot);
1008 bool X86_32ABIInfo::shouldReturnTypeInRegister(
QualType Ty,
1014 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1020 if (Size == 64 || Size == 128)
1035 return shouldReturnTypeInRegister(AT->getElementType(),
Context);
1039 if (!RT)
return false;
1051 if (!shouldReturnTypeInRegister(FD->getType(),
Context))
1060 if (State.FreeRegs) {
1063 return getNaturalAlignIndirectInReg(RetTy);
1065 return getNaturalAlignIndirect(RetTy,
false);
1069 CCState &State)
const {
1074 uint64_t NumElts = 0;
1075 if (State.CC == llvm::CallingConv::X86_VectorCall &&
1076 isHomogeneousAggregate(RetTy, Base, NumElts)) {
1083 if (IsDarwinVectorABI) {
1084 uint64_t Size = getContext().getTypeSize(RetTy);
1091 llvm::Type::getInt64Ty(getVMContext()), 2));
1095 if ((Size == 8 || Size == 16 || Size == 32) ||
1096 (Size == 64 && VT->getNumElements() == 1))
1100 return getIndirectReturnResult(RetTy, State);
1110 return getIndirectReturnResult(RetTy, State);
1115 return getIndirectReturnResult(RetTy, State);
1119 if (shouldReturnTypeInRegister(RetTy, getContext())) {
1120 uint64_t Size = getContext().getTypeSize(RetTy);
1128 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1129 || SeltTy->hasPointerRepresentation())
1137 return getIndirectReturnResult(RetTy, State);
1142 RetTy = EnumTy->getDecl()->getIntegerType();
1159 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1160 for (
const auto &
I : CXXRD->bases())
1164 for (
const auto *i : RD->
fields()) {
1177 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(
QualType Ty,
1178 unsigned Align)
const {
1181 if (Align <= MinABIStackAlignInBytes)
1185 if (!IsDarwinVectorABI) {
1187 return MinABIStackAlignInBytes;
1195 return MinABIStackAlignInBytes;
1199 CCState &State)
const {
1201 if (State.FreeRegs) {
1204 return getNaturalAlignIndirectInReg(Ty);
1206 return getNaturalAlignIndirect(Ty,
false);
1210 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
1211 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1212 if (StackAlign == 0)
1217 bool Realign = TypeAlign > StackAlign;
1222 X86_32ABIInfo::Class X86_32ABIInfo::classify(
QualType Ty)
const {
1229 if (K == BuiltinType::Float || K == BuiltinType::Double)
1235 bool X86_32ABIInfo::updateFreeRegs(
QualType Ty, CCState &State)
const {
1236 if (!IsSoftFloatABI) {
1237 Class
C = classify(Ty);
1242 unsigned Size = getContext().getTypeSize(Ty);
1243 unsigned SizeInRegs = (Size + 31) / 32;
1245 if (SizeInRegs == 0)
1249 if (SizeInRegs > State.FreeRegs) {
1258 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1262 State.FreeRegs -= SizeInRegs;
1266 bool X86_32ABIInfo::shouldAggregateUseDirect(
QualType Ty, CCState &State,
1268 bool &NeedsPadding)
const {
1269 NeedsPadding =
false;
1272 if (!updateFreeRegs(Ty, State))
1278 if (State.CC == llvm::CallingConv::X86_FastCall ||
1279 State.CC == llvm::CallingConv::X86_VectorCall) {
1280 if (getContext().
getTypeSize(Ty) <= 32 && State.FreeRegs)
1281 NeedsPadding =
true;
1289 bool X86_32ABIInfo::shouldPrimitiveUseInReg(
QualType Ty, CCState &State)
const {
1290 if (!updateFreeRegs(Ty, State))
1296 if (State.CC == llvm::CallingConv::X86_FastCall ||
1297 State.CC == llvm::CallingConv::X86_VectorCall) {
1309 CCState &State)
const {
1319 return getIndirectResult(Ty,
false, State);
1328 const Type *Base =
nullptr;
1329 uint64_t NumElts = 0;
1330 if (State.CC == llvm::CallingConv::X86_VectorCall &&
1331 isHomogeneousAggregate(Ty, Base, NumElts)) {
1332 if (State.FreeSSERegs >= NumElts) {
1333 State.FreeSSERegs -= NumElts;
1338 return getIndirectResult(Ty,
false, State);
1344 if (IsWin32StructABI)
1345 return getIndirectResult(Ty,
true, State);
1349 return getIndirectResult(Ty,
true, State);
1356 llvm::LLVMContext &LLVMContext = getVMContext();
1357 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1358 bool NeedsPadding, InReg;
1359 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1360 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
1368 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 :
nullptr;
1378 (!IsMCUABI || State.FreeRegs == 0))
1380 State.CC == llvm::CallingConv::X86_FastCall ||
1381 State.CC == llvm::CallingConv::X86_VectorCall,
1384 return getIndirectResult(Ty,
true, State);
1390 if (IsDarwinVectorABI) {
1391 uint64_t Size = getContext().getTypeSize(Ty);
1392 if ((Size == 8 || Size == 16 || Size == 32) ||
1393 (Size == 64 && VT->getNumElements() == 1))
1398 if (IsX86_MMXType(CGT.ConvertType(Ty)))
1406 Ty = EnumTy->getDecl()->getIntegerType();
1408 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1425 else if (State.CC == llvm::CallingConv::X86_FastCall)
1427 else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1429 State.FreeSSERegs = 6;
1433 State.FreeRegs = DefaultNumRegisterParameters;
1435 if (!getCXXABI().classifyReturnType(FI)) {
1440 if (State.FreeRegs) {
1451 bool UsedInAlloca =
false;
1453 I.info = classifyArgumentType(
I.type, State);
1460 rewriteWithInAlloca(FI);
1470 assert(StackOffset.
isMultipleOf(FieldAlign) &&
"unaligned inalloca struct");
1472 FrameFields.push_back(CGT.ConvertTypeForMem(Type));
1473 StackOffset += getContext().getTypeSizeInChars(Type);
1478 if (StackOffset != FieldEnd) {
1479 CharUnits NumBytes = StackOffset - FieldEnd;
1480 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1481 Ty = llvm::ArrayType::get(Ty, NumBytes.
getQuantity());
1482 FrameFields.push_back(Ty);
1503 llvm_unreachable(
"invalid enum");
1506 void X86_32ABIInfo::rewriteWithInAlloca(
CGFunctionInfo &FI)
const {
1507 assert(IsWin32StructABI &&
"inalloca only supported on win32");
1524 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1531 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1541 for (; I !=
E; ++
I) {
1543 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1546 FI.
setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1554 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
1561 getTypeStackAlignInBytes(Ty,
TypeInfo.second.getQuantity()));
1568 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1570 assert(Triple.getArch() == llvm::Triple::x86);
1572 switch (Opts.getStructReturnConvention()) {
1581 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1584 switch (Triple.getOS()) {
1585 case llvm::Triple::DragonFly:
1586 case llvm::Triple::FreeBSD:
1587 case llvm::Triple::OpenBSD:
1588 case llvm::Triple::Bitrig:
1589 case llvm::Triple::Win32:
1596 void X86_32TargetCodeGenInfo::setTargetAttributes(
const Decl *D,
1597 llvm::GlobalValue *GV,
1599 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1600 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1602 llvm::Function *Fn = cast<llvm::Function>(GV);
1605 llvm::AttrBuilder B;
1606 B.addStackAlignmentAttr(16);
1607 Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
1609 llvm::AttributeSet::FunctionIndex,
1615 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1638 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.
Int8Ty, Address, 9),
1665 static unsigned getNativeVectorSizeForAVXABI(
X86AVXABILevel AVXLevel) {
1667 case X86AVXABILevel::AVX512:
1669 case X86AVXABILevel::AVX:
1674 llvm_unreachable(
"Unknown AVXLevel");
1678 class X86_64ABIInfo :
public ABIInfo {
1699 static Class merge(Class Accum, Class Field);
1715 void postMerge(
unsigned AggregateSize, Class &Lo, Class &Hi)
const;
1741 void classify(
QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
1742 bool isNamedArg)
const;
1746 unsigned IROffset,
QualType SourceTy,
1747 unsigned SourceOffset)
const;
1749 unsigned IROffset,
QualType SourceTy,
1750 unsigned SourceOffset)
const;
1766 unsigned freeIntRegs,
1767 unsigned &neededInt,
1768 unsigned &neededSSE,
1769 bool isNamedArg)
const;
1771 bool IsIllegalVectorType(
QualType Ty)
const;
1778 bool honorsRevision0_98()
const {
1779 return !getTarget().getTriple().isOSDarwin();
1785 bool Has64BitPointers;
1789 ABIInfo(CGT), AVXLevel(AVXLevel),
1790 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
1794 unsigned neededInt, neededSSE;
1796 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
1800 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1801 return (vectorTy->getBitWidth() > 128);
1813 bool has64BitPointers()
const {
1814 return Has64BitPointers;
1819 class WinX86_64ABIInfo :
public ABIInfo {
1823 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
1830 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override {
1832 return isX86VectorTypeForVectorCall(getContext(), Ty);
1835 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
1836 uint64_t NumMembers)
const override {
1838 return isX86VectorCallAggregateSmallEnough(NumMembers);
1843 bool IsReturnType)
const;
1853 const X86_64ABIInfo &getABIInfo()
const {
1872 StringRef Constraint,
1874 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1877 bool isNoProtoCallVariadic(
const CallArgList &args,
1886 bool HasAVXType =
false;
1887 for (CallArgList::const_iterator
1888 it = args.begin(), ie = args.end(); it != ie; ++it) {
1889 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
1905 if (getABIInfo().has64BitPointers())
1915 return llvm::ConstantInt::get(CGM.
Int32Ty, Sig);
1919 class PS4TargetCodeGenInfo :
public X86_64TargetCodeGenInfo {
1922 : X86_64TargetCodeGenInfo(CGT, AVXLevel) {}
1924 void getDependentLibraryOption(llvm::StringRef Lib,
1928 if (Lib.find(
" ") != StringRef::npos)
1929 Opt +=
"\"" + Lib.str() +
"\"";
1935 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
1939 bool Quote = (Lib.find(
" ") != StringRef::npos);
1940 std::string ArgStr = Quote ?
"\"" :
"";
1942 if (!Lib.endswith_lower(
".lib"))
1944 ArgStr += Quote ?
"\"" :
"";
1948 class WinX86_32TargetCodeGenInfo :
public X86_32TargetCodeGenInfo {
1951 bool DarwinVectorABI,
bool RetSmallStructInRegABI,
bool Win32StructABI,
1952 unsigned NumRegisterParameters)
1953 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
1954 Win32StructABI, NumRegisterParameters,
false) {}
1956 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1959 void getDependentLibraryOption(llvm::StringRef Lib,
1961 Opt =
"/DEFAULTLIB:";
1962 Opt += qualifyWindowsLibrary(Lib);
1965 void getDetectMismatchOption(llvm::StringRef
Name,
1966 llvm::StringRef
Value,
1968 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
1972 static void addStackProbeSizeTargetAttribute(
const Decl *D,
1973 llvm::GlobalValue *GV,
1975 if (D && isa<FunctionDecl>(D)) {
1977 llvm::Function *Fn = cast<llvm::Function>(GV);
1979 Fn->addFnAttr(
"stack-probe-size",
1985 void WinX86_32TargetCodeGenInfo::setTargetAttributes(
const Decl *D,
1986 llvm::GlobalValue *GV,
1988 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
1990 addStackProbeSizeTargetAttribute(D, GV, CGM);
1999 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
2016 void getDependentLibraryOption(llvm::StringRef Lib,
2018 Opt =
"/DEFAULTLIB:";
2019 Opt += qualifyWindowsLibrary(Lib);
2022 void getDetectMismatchOption(llvm::StringRef Name,
2023 llvm::StringRef Value,
2025 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" + Value.str() +
"\"";
2029 void WinX86_64TargetCodeGenInfo::setTargetAttributes(
const Decl *D,
2030 llvm::GlobalValue *GV,
2034 addStackProbeSizeTargetAttribute(D, GV, CGM);
2038 void X86_64ABIInfo::postMerge(
unsigned AggregateSize, Class &Lo,
2063 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2065 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2067 if (Hi == SSEUp && Lo != SSE)
2071 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2095 assert((Accum != Memory && Accum != ComplexX87) &&
2096 "Invalid accumulated classification during merge.");
2097 if (Accum == Field || Field == NoClass)
2099 if (Field == Memory)
2101 if (Accum == NoClass)
2103 if (Accum == Integer || Field == Integer)
2105 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2106 Accum == X87 || Accum == X87Up)
2111 void X86_64ABIInfo::classify(
QualType Ty, uint64_t OffsetBase,
2112 Class &Lo, Class &Hi,
bool isNamedArg)
const {
2123 Class &
Current = OffsetBase < 64 ? Lo : Hi;
2129 if (k == BuiltinType::Void) {
2131 }
else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2134 }
else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2136 }
else if (k == BuiltinType::Float || k == BuiltinType::Double) {
2138 }
else if (k == BuiltinType::LongDouble) {
2139 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2140 if (LDF == &llvm::APFloat::IEEEquad) {
2143 }
else if (LDF == &llvm::APFloat::x87DoubleExtended) {
2146 }
else if (LDF == &llvm::APFloat::IEEEdouble) {
2149 llvm_unreachable(
"unexpected long double representation!");
2158 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2169 if (Has64BitPointers) {
2176 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2177 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2178 if (EB_FuncPtr != EB_ThisAdj) {
2191 uint64_t Size = getContext().getTypeSize(VT);
2192 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2201 uint64_t EB_Lo = (OffsetBase) / 64;
2202 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2205 }
else if (Size == 64) {
2207 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
2211 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) ||
2212 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) ||
2213 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) ||
2214 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong))
2221 if (OffsetBase && OffsetBase != 64)
2223 }
else if (Size == 128 ||
2224 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2248 uint64_t Size = getContext().getTypeSize(Ty);
2252 else if (Size <= 128)
2254 }
else if (ET == getContext().FloatTy) {
2256 }
else if (ET == getContext().DoubleTy) {
2258 }
else if (ET == getContext().LongDoubleTy) {
2259 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2260 if (LDF == &llvm::APFloat::IEEEquad)
2262 else if (LDF == &llvm::APFloat::x87DoubleExtended)
2263 Current = ComplexX87;
2264 else if (LDF == &llvm::APFloat::IEEEdouble)
2267 llvm_unreachable(
"unexpected long double representation!");
2272 uint64_t EB_Real = (OffsetBase) / 64;
2273 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
2274 if (Hi == NoClass && EB_Real != EB_Imag)
2283 uint64_t Size = getContext().getTypeSize(Ty);
2294 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2300 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
2301 uint64_t ArraySize = AT->getSize().getZExtValue();
2306 if (Size > 128 && EltSize != 256)
2309 for (uint64_t i=0,
Offset=OffsetBase; i<ArraySize; ++i,
Offset += EltSize) {
2310 Class FieldLo, FieldHi;
2311 classify(AT->getElementType(),
Offset, FieldLo, FieldHi, isNamedArg);
2312 Lo = merge(Lo, FieldLo);
2313 Hi = merge(Hi, FieldHi);
2314 if (Lo == Memory || Hi == Memory)
2318 postMerge(Size, Lo, Hi);
2319 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp array classification.");
2324 uint64_t Size = getContext().getTypeSize(Ty);
2349 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2350 for (
const auto &I : CXXRD->bases()) {
2351 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2352 "Unexpected base class!");
2354 cast<CXXRecordDecl>(I.getType()->getAs<
RecordType>()->getDecl());
2361 Class FieldLo, FieldHi;
2364 classify(I.getType(),
Offset, FieldLo, FieldHi, isNamedArg);
2365 Lo = merge(Lo, FieldLo);
2366 Hi = merge(Hi, FieldHi);
2367 if (Lo == Memory || Hi == Memory) {
2368 postMerge(Size, Lo, Hi);
2377 i != e; ++i, ++idx) {
2379 bool BitField = i->isBitField();
2388 if (Size > 128 && getContext().
getTypeSize(i->getType()) != 256) {
2390 postMerge(Size, Lo, Hi);
2394 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
2396 postMerge(Size, Lo, Hi);
2406 Class FieldLo, FieldHi;
2413 if (i->isUnnamedBitfield())
2417 uint64_t Size = i->getBitWidthValue(getContext());
2419 uint64_t EB_Lo = Offset / 64;
2420 uint64_t EB_Hi = (Offset + Size - 1) / 64;
2423 assert(EB_Hi == EB_Lo &&
"Invalid classification, type > 16 bytes.");
2428 FieldHi = EB_Hi ? Integer : NoClass;
2431 classify(i->getType(),
Offset, FieldLo, FieldHi, isNamedArg);
2432 Lo = merge(Lo, FieldLo);
2433 Hi = merge(Hi, FieldHi);
2434 if (Lo == Memory || Hi == Memory)
2438 postMerge(Size, Lo, Hi);
2448 Ty = EnumTy->getDecl()->getIntegerType();
2454 return getNaturalAlignIndirect(Ty);
2457 bool X86_64ABIInfo::IsIllegalVectorType(
QualType Ty)
const {
2459 uint64_t Size = getContext().getTypeSize(VecTy);
2460 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2461 if (Size <= 64 || Size > LargestVector)
2469 unsigned freeIntRegs)
const {
2481 Ty = EnumTy->getDecl()->getIntegerType();
2492 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2515 if (freeIntRegs == 0) {
2516 uint64_t Size = getContext().getTypeSize(Ty);
2520 if (Align == 8 && Size <= 64)
2537 if (isa<llvm::VectorType>(IRType) ||
2538 IRType->getTypeID() == llvm::Type::FP128TyID)
2542 uint64_t Size = getContext().getTypeSize(Ty);
2543 assert((Size == 128 || Size == 256) &&
"Invalid type found!");
2546 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()),
2563 if (TySize <= StartBit)
2568 unsigned NumElts = (
unsigned)AT->getSize().getZExtValue();
2571 for (
unsigned i = 0; i != NumElts; ++i) {
2573 unsigned EltOffset = i*EltSize;
2574 if (EltOffset >= EndBit)
break;
2576 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2590 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2591 for (
const auto &I : CXXRD->bases()) {
2592 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2593 "Unexpected base class!");
2595 cast<CXXRecordDecl>(I.getType()->getAs<
RecordType>()->getDecl());
2599 if (BaseOffset >= EndBit)
continue;
2601 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
2614 i != e; ++i, ++idx) {
2618 if (FieldOffset >= EndBit)
break;
2620 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2639 const llvm::DataLayout &TD) {
2641 if (IROffset == 0 && IRType->isFloatTy())
2645 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2646 const llvm::StructLayout *SL = TD.getStructLayout(STy);
2647 unsigned Elt = SL->getElementContainingOffset(IROffset);
2648 IROffset -= SL->getElementOffset(Elt);
2653 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2655 unsigned EltSize = TD.getTypeAllocSize(EltTy);
2656 IROffset -= IROffset/EltSize*EltSize;
2667 GetSSETypeAtOffset(
llvm::Type *IRType,
unsigned IROffset,
2668 QualType SourceTy,
unsigned SourceOffset)
const {
2673 SourceOffset*8+64, getContext()))
2674 return llvm::Type::getFloatTy(getVMContext());
2681 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
2683 return llvm::Type::getDoubleTy(getVMContext());
2702 GetINTEGERTypeAtOffset(
llvm::Type *IRType,
unsigned IROffset,
2703 QualType SourceTy,
unsigned SourceOffset)
const {
2706 if (IROffset == 0) {
2708 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
2709 IRType->isIntegerTy(64))
2718 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
2719 IRType->isIntegerTy(32) ||
2720 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
2721 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
2722 cast<llvm::IntegerType>(IRType)->getBitWidth();
2725 SourceOffset*8+64, getContext()))
2730 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2732 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
2733 if (IROffset < SL->getSizeInBytes()) {
2734 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2735 IROffset -= SL->getElementOffset(FieldIdx);
2737 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2738 SourceTy, SourceOffset);
2742 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2744 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
2745 unsigned EltOffset = IROffset/EltSize*EltSize;
2746 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2752 unsigned TySizeInBytes =
2753 (
unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
2755 assert(TySizeInBytes != SourceOffset &&
"Empty field?");
2759 return llvm::IntegerType::get(getVMContext(),
2760 std::min(TySizeInBytes-SourceOffset, 8U)*8);
2771 const llvm::DataLayout &TD) {
2776 unsigned LoSize = (
unsigned)TD.getTypeAllocSize(Lo);
2777 unsigned HiAlign = TD.getABITypeAlignment(Hi);
2778 unsigned HiStart = llvm::RoundUpToAlignment(LoSize, HiAlign);
2779 assert(HiStart != 0 && HiStart <= 8 &&
"Invalid x86-64 argument pair!");
2791 if (Lo->isFloatTy())
2792 Lo = llvm::Type::getDoubleTy(Lo->getContext());
2794 assert((Lo->isIntegerTy() || Lo->isPointerTy())
2795 &&
"Invalid/unknown lo type");
2796 Lo = llvm::Type::getInt64Ty(Lo->getContext());
2800 llvm::StructType *Result = llvm::StructType::get(Lo, Hi,
nullptr);
2804 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
2805 "Invalid x86-64 argument pair!");
2810 classifyReturnType(
QualType RetTy)
const {
2813 X86_64ABIInfo::Class Lo, Hi;
2814 classify(RetTy, 0, Lo, Hi,
true);
2817 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
2818 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
2827 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2828 "Unknown missing lo part");
2833 llvm_unreachable(
"Invalid classification for lo word.");
2838 return getIndirectReturnResult(RetTy);
2843 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2847 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2850 RetTy = EnumTy->getDecl()->getIntegerType();
2861 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2867 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
2874 assert(Hi == ComplexX87 &&
"Unexpected ComplexX87 classification.");
2875 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
2876 llvm::Type::getX86_FP80Ty(getVMContext()),
2887 llvm_unreachable(
"Invalid classification for hi word.");
2894 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2899 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2910 assert(Lo == SSE &&
"Unexpected SSEUp classification.");
2911 ResType = GetByteVectorType(RetTy);
2922 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2938 ABIArgInfo X86_64ABIInfo::classifyArgumentType(
2939 QualType Ty,
unsigned freeIntRegs,
unsigned &neededInt,
unsigned &neededSSE,
2945 X86_64ABIInfo::Class Lo, Hi;
2946 classify(Ty, 0, Lo, Hi, isNamedArg);
2950 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
2951 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
2962 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2963 "Unknown missing lo part");
2976 return getIndirectResult(Ty, freeIntRegs);
2980 llvm_unreachable(
"Invalid classification for lo word.");
2989 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
2993 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2996 Ty = EnumTy->getDecl()->getIntegerType();
3010 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3024 llvm_unreachable(
"Invalid classification for hi word.");
3026 case NoClass:
break;
3031 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3041 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3053 assert(Lo == SSE &&
"Unexpected SSEUp classification");
3054 ResType = GetByteVectorType(Ty);
3069 if (!getCXXABI().classifyReturnType(FI))
3073 unsigned freeIntRegs = 6, freeSSERegs = 8;
3089 it != ie; ++it, ++ArgNo) {
3090 bool IsNamedArg = ArgNo < NumRequiredArgs;
3092 unsigned neededInt, neededSSE;
3093 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt,
3094 neededSSE, IsNamedArg);
3100 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
3101 freeIntRegs -= neededInt;
3102 freeSSERegs -= neededSSE;
3104 it->info = getIndirectResult(it->type, freeIntRegs);
3130 llvm::PointerType::getUnqual(LTy));
3139 llvm::ConstantInt::get(CGF.
Int32Ty, (SizeInBytes + 7) & ~7);
3140 overflow_arg_area = CGF.
Builder.CreateGEP(overflow_arg_area, Offset,
3141 "overflow_arg_area.next");
3145 return Address(Res, Align);
3148 Address X86_64ABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
3157 unsigned neededInt, neededSSE;
3160 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
3165 if (!neededInt && !neededSSE)
3181 llvm::Value *gp_offset =
nullptr, *fp_offset =
nullptr;
3187 InRegs = llvm::ConstantInt::get(CGF.
Int32Ty, 48 - neededInt * 8);
3188 InRegs = CGF.
Builder.CreateICmpULE(gp_offset, InRegs,
"fits_in_gp");
3197 llvm::ConstantInt::get(CGF.
Int32Ty, 176 - neededSSE * 16);
3198 FitsInFP = CGF.
Builder.CreateICmpULE(fp_offset, FitsInFP,
"fits_in_fp");
3199 InRegs = InRegs ? CGF.
Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3205 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3227 if (neededInt && neededSSE) {
3229 assert(AI.
isDirect() &&
"Unexpected ABI info for mixed regs");
3233 assert(ST->getNumElements() == 2 &&
"Unexpected ABI info for mixed regs");
3236 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3237 "Unexpected ABI info for mixed regs");
3238 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
3239 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
3242 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3243 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3256 getDataLayout().getStructLayout(ST)->getElementOffset(1));
3260 }
else if (neededInt) {
3261 RegAddr = Address(CGF.
Builder.CreateGEP(RegSaveArea, gp_offset),
3266 std::pair<CharUnits, CharUnits> SizeAlign =
3267 getContext().getTypeInfoInChars(Ty);
3268 uint64_t TySize = SizeAlign.first.getQuantity();
3279 }
else if (neededSSE == 1) {
3280 RegAddr = Address(CGF.
Builder.CreateGEP(RegSaveArea, fp_offset),
3284 assert(neededSSE == 2 &&
"Invalid number of needed registers!");
3291 Address RegAddrLo = Address(CGF.
Builder.CreateGEP(RegSaveArea, fp_offset),
3297 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy,
nullptr);
3336 Address ResAddr =
emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
3341 Address X86_64ABIInfo::EmitMSVAArg(
CodeGenFunction &CGF, Address VAListAddr,
3350 bool IsReturnType)
const {
3356 Ty = EnumTy->getDecl()->getIntegerType();
3358 TypeInfo Info = getContext().getTypeInfo(Ty);
3359 uint64_t Width = Info.
Width;
3364 if (!IsReturnType) {
3370 return getNaturalAlignIndirect(Ty,
false);
3376 const Type *Base =
nullptr;
3377 uint64_t NumElts = 0;
3378 if (FreeSSERegs && isHomogeneousAggregate(Ty, Base, NumElts)) {
3379 if (FreeSSERegs >= NumElts) {
3380 FreeSSERegs -= NumElts;
3393 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3400 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3401 return getNaturalAlignIndirect(Ty,
false);
3410 if (BT && BT->
getKind() == BuiltinType::Bool)
3415 if (IsMingw64 && BT && BT->
getKind() == BuiltinType::LongDouble) {
3416 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
3417 if (LDF == &llvm::APFloat::x87DoubleExtended)
3429 unsigned FreeSSERegs = IsVectorCall ? 4 : 0;
3430 if (!getCXXABI().classifyReturnType(FI))
3434 FreeSSERegs = IsVectorCall ? 6 : 0;
3436 I.info = classify(I.type, FreeSSERegs,
false);
3439 Address WinX86_64ABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
3450 class PPC32_SVR4_ABIInfo :
public DefaultABIInfo {
3451 bool IsSoftFloatABI;
3454 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {}
3462 PPC32TargetCodeGenInfo(
CodeGenTypes &CGT,
bool SoftFloatABI)
3476 Address PPC32_SVR4_ABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAList,
3478 const unsigned OverflowLimit = 8;
3493 bool isI64 = Ty->
isIntegerType() && getContext().getTypeSize(Ty) == 64;
3496 bool isF64 = Ty->
isFloatingType() && getContext().getTypeSize(Ty) == 64;
3506 if (isInt || IsSoftFloatABI) {
3515 if (isI64 || (isF64 && IsSoftFloatABI)) {
3516 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
3517 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
3521 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit),
"cond");
3527 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
3530 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
3537 Address RegSaveAreaPtr =
3539 RegAddr = Address(Builder.
CreateLoad(RegSaveAreaPtr),
3544 if (!(isInt || IsSoftFloatABI)) {
3553 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.
getQuantity()));
3554 RegAddr = Address(Builder.CreateInBoundsGEP(CGF.
Int8Ty,
3561 Builder.CreateAdd(NumRegs,
3562 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
3573 Builder.
CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
3581 Size =
TypeInfo.first.RoundUpToAlignment(OverflowAreaAlign);
3586 Address OverflowAreaAddr =
3588 Address OverflowArea(Builder.
CreateLoad(OverflowAreaAddr,
"argp.cur"),
3592 if (Align > OverflowAreaAlign) {
3602 Builder.
CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
3609 Address Result =
emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
3614 Result = Address(Builder.
CreateLoad(Result,
"aggr"),
3615 getContext().getTypeAlignInChars(Ty));
3629 llvm::IntegerType *i8 = CGF.
Int8Ty;
3630 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
3631 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
3632 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
3666 class PPC64_SVR4_ABIInfo :
public DefaultABIInfo {
3674 static const unsigned GPRBits = 64;
3680 bool IsQPXVectorTy(
const Type *Ty)
const {
3685 unsigned NumElements = VT->getNumElements();
3686 if (NumElements == 1)
3689 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
3690 if (getContext().getTypeSize(Ty) <= 256)
3692 }
else if (VT->getElementType()->
3693 isSpecificBuiltinType(BuiltinType::Float)) {
3694 if (getContext().getTypeSize(Ty) <= 128)
3702 bool IsQPXVectorTy(
QualType Ty)
const {
3708 : DefaultABIInfo(CGT), Kind(Kind), HasQPX(HasQPX) {}
3710 bool isPromotableTypeForABI(
QualType Ty)
const;
3716 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override;
3717 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
3718 uint64_t Members)
const override;
3727 if (!getCXXABI().classifyReturnType(FI))
3736 if (IsQPXVectorTy(T) ||
3737 (T->
isVectorType() && getContext().getTypeSize(T) == 128) ||
3744 I.info = classifyArgumentType(I.type);
3756 PPC64_SVR4_ABIInfo::ABIKind
Kind,
bool HasQPX)
3768 class PPC64TargetCodeGenInfo :
public DefaultTargetCodeGenInfo {
3770 PPC64TargetCodeGenInfo(
CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
3786 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(
QualType Ty)
const {
3789 Ty = EnumTy->getDecl()->getIntegerType();
3799 case BuiltinType::Int:
3800 case BuiltinType::UInt:
3814 Ty = CTy->getElementType();
3818 if (IsQPXVectorTy(Ty)) {
3829 const Type *AlignAsType =
nullptr;
3833 if (IsQPXVectorTy(EltType) || (EltType->
isVectorType() &&
3834 getContext().getTypeSize(EltType) == 128) ||
3836 AlignAsType = EltType;
3840 const Type *Base =
nullptr;
3841 uint64_t Members = 0;
3842 if (!AlignAsType &&
Kind == ELFv2 &&
3847 if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
3848 if (getContext().getTypeSize(AlignAsType) > 128)
3852 }
else if (AlignAsType) {
3859 if (HasQPX && getContext().getTypeAlign(Ty) >= 256)
3871 uint64_t &Members)
const {
3873 uint64_t NElements = AT->getSize().getZExtValue();
3876 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
3878 Members *= NElements;
3887 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3888 for (
const auto &I : CXXRD->bases()) {
3893 uint64_t FldMembers;
3894 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
3897 Members += FldMembers;
3901 for (
const auto *FD : RD->
fields()) {
3905 getContext().getAsConstantArrayType(FT)) {
3906 if (AT->getSize().getZExtValue() == 0)
3908 FT = AT->getElementType();
3914 if (getContext().getLangOpts().
CPlusPlus &&
3915 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
3918 uint64_t FldMembers;
3919 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
3923 std::max(Members, FldMembers) : Members + FldMembers);
3930 if (getContext().getTypeSize(Base) * Members !=
3931 getContext().getTypeSize(Ty))
3937 Ty = CT->getElementType();
3941 if (!isHomogeneousAggregateBaseType(Ty))
3952 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
3955 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
3958 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
3962 if (BT->
getKind() == BuiltinType::Float ||
3963 BT->
getKind() == BuiltinType::Double ||
3964 BT->
getKind() == BuiltinType::LongDouble)
3968 if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty))
3974 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
3975 const Type *Base, uint64_t Members)
const {
3979 Base->
isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64;
3982 return Members * NumRegs <= 8;
3986 PPC64_SVR4_ABIInfo::classifyArgumentType(
QualType Ty)
const {
3995 uint64_t Size = getContext().getTypeSize(Ty);
3997 return getNaturalAlignIndirect(Ty,
false);
3998 else if (Size < 128) {
3999 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4008 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
4009 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).
getQuantity();
4012 const Type *Base =
nullptr;
4013 uint64_t Members = 0;
4014 if (
Kind == ELFv2 &&
4015 isHomogeneousAggregate(Ty, Base, Members)) {
4017 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4025 uint64_t Bits = getContext().getTypeSize(Ty);
4026 if (Bits > 0 && Bits <= 8 * GPRBits) {
4031 if (Bits <= GPRBits)
4032 CoerceTy = llvm::IntegerType::get(getVMContext(),
4033 llvm::RoundUpToAlignment(Bits, 8));
4037 uint64_t RegBits = ABIAlign * 8;
4038 uint64_t NumRegs = llvm::RoundUpToAlignment(Bits, RegBits) / RegBits;
4039 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
4040 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
4049 TyAlign > ABIAlign);
4052 return (isPromotableTypeForABI(Ty) ?
4057 PPC64_SVR4_ABIInfo::classifyReturnType(
QualType RetTy)
const {
4067 uint64_t Size = getContext().getTypeSize(RetTy);
4069 return getNaturalAlignIndirect(RetTy);
4070 else if (Size < 128) {
4071 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4078 const Type *Base =
nullptr;
4079 uint64_t Members = 0;
4080 if (
Kind == ELFv2 &&
4081 isHomogeneousAggregate(RetTy, Base, Members)) {
4083 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4088 uint64_t Bits = getContext().getTypeSize(RetTy);
4089 if (
Kind == ELFv2 && Bits <= 2 * GPRBits) {
4094 if (Bits > GPRBits) {
4095 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
4096 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy,
nullptr);
4098 CoerceTy = llvm::IntegerType::get(getVMContext(),
4099 llvm::RoundUpToAlignment(Bits, 8));
4104 return getNaturalAlignIndirect(RetTy);
4107 return (isPromotableTypeForABI(RetTy) ?
4112 Address PPC64_SVR4_ABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
4114 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
4115 TypeInfo.second = getParamTypeAlignment(Ty);
4127 if (EltSize < SlotSize) {
4129 SlotSize * 2, SlotSize,
4132 Address RealAddr = Addr;
4133 Address ImagAddr = RealAddr;
4136 SlotSize - EltSize);
4138 2 * SlotSize - EltSize);
4169 llvm::IntegerType *i8 = CGF.
Int8Ty;
4170 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4171 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4172 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4203 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
4223 class AArch64ABIInfo :
public ABIInfo {
4237 ABIKind getABIKind()
const {
return Kind; }
4238 bool isDarwinPCS()
const {
return Kind == DarwinPCS; }
4242 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override;
4243 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
4244 uint64_t Members)
const override;
4246 bool isIllegalVectorType(
QualType Ty)
const;
4249 if (!getCXXABI().classifyReturnType(FI))
4253 it.info = classifyArgumentType(it.type);
4256 Address EmitDarwinVAArg(Address VAListAddr,
QualType Ty,
4259 Address EmitAAPCSVAArg(Address VAListAddr,
QualType Ty,
4264 return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
4265 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
4274 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
4275 return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue";
4282 bool doesReturnSlotInterfereWithArgs()
const override {
return false; }
4290 if (isIllegalVectorType(Ty)) {
4291 uint64_t Size = getContext().getTypeSize(Ty);
4293 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
4298 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
4303 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
4306 return getNaturalAlignIndirect(Ty,
false);
4312 Ty = EnumTy->getDecl()->getIntegerType();
4322 return getNaturalAlignIndirect(Ty, RAA ==
4329 if (!getContext().getLangOpts().
CPlusPlus || isDarwinPCS())
4336 const Type *Base =
nullptr;
4337 uint64_t Members = 0;
4338 if (isHomogeneousAggregate(Ty, Base, Members)) {
4340 llvm::ArrayType::get(CGT.ConvertType(
QualType(Base, 0)), Members));
4344 uint64_t Size = getContext().getTypeSize(Ty);
4346 unsigned Alignment = getContext().getTypeAlign(Ty);
4347 Size = 64 * ((Size + 63) / 64);
4351 if (Alignment < 128 && Size == 128) {
4352 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
4358 return getNaturalAlignIndirect(Ty,
false);
4366 if (RetTy->
isVectorType() && getContext().getTypeSize(RetTy) > 128)
4367 return getNaturalAlignIndirect(RetTy);
4372 RetTy = EnumTy->getDecl()->getIntegerType();
4382 const Type *Base =
nullptr;
4383 uint64_t Members = 0;
4384 if (isHomogeneousAggregate(RetTy, Base, Members))
4389 uint64_t Size = getContext().getTypeSize(RetTy);
4391 unsigned Alignment = getContext().getTypeAlign(RetTy);
4392 Size = 64 * ((Size + 63) / 64);
4396 if (Alignment < 128 && Size == 128) {
4397 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
4403 return getNaturalAlignIndirect(RetTy);
4407 bool AArch64ABIInfo::isIllegalVectorType(
QualType Ty)
const {
4410 unsigned NumElements = VT->getNumElements();
4411 uint64_t Size = getContext().getTypeSize(VT);
4413 if ((NumElements & (NumElements - 1)) != 0 || NumElements > 16)
4415 return Size != 64 && (Size != 128 || NumElements == 1);
4420 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
4429 unsigned VecSize = getContext().getTypeSize(VT);
4430 if (VecSize == 64 || VecSize == 128)
4436 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base,
4437 uint64_t Members)
const {
4438 return Members <= 4;
4441 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
4449 BaseTy = llvm::PointerType::getUnqual(BaseTy);
4453 unsigned NumRegs = 1;
4454 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
4455 BaseTy = ArrTy->getElementType();
4456 NumRegs = ArrTy->getNumElements();
4458 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
4476 auto TyInfo = getContext().getTypeInfoInChars(Ty);
4483 int RegSize = IsIndirect ? 8 : TyInfo.first.
getQuantity();
4492 RegSize = llvm::RoundUpToAlignment(RegSize, 8);
4501 RegSize = 16 * NumRegs;
4513 UsingStack = CGF.
Builder.CreateICmpSGE(
4514 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, 0));
4516 CGF.
Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
4525 if (!IsFPR && !IsIndirect && TyAlign.
getQuantity() > 8) {
4528 reg_offs = CGF.
Builder.CreateAdd(
4529 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, Align - 1),
4531 reg_offs = CGF.
Builder.CreateAnd(
4532 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, -Align),
4541 NewOffset = CGF.
Builder.CreateAdd(
4542 reg_offs, llvm::ConstantInt::get(CGF.
Int32Ty, RegSize),
"new_reg_offs");
4548 InRegs = CGF.
Builder.CreateICmpSLE(
4549 NewOffset, llvm::ConstantInt::get(CGF.
Int32Ty, 0),
"inreg");
4551 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
4563 reg_top_offset,
"reg_top_p");
4565 Address BaseAddr(CGF.
Builder.CreateInBoundsGEP(reg_top, reg_offs),
4573 MemTy = llvm::PointerType::getUnqual(MemTy);
4576 const Type *Base =
nullptr;
4577 uint64_t NumMembers = 0;
4578 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
4579 if (IsHFA && NumMembers > 1) {
4584 assert(!IsIndirect &&
"Homogeneous aggregates should be passed directly");
4585 auto BaseTyInfo = getContext().getTypeInfoInChars(
QualType(Base, 0));
4587 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
4589 std::max(TyAlign, BaseTyInfo.second));
4594 BaseTyInfo.first.getQuantity() < 16)
4595 Offset = 16 - BaseTyInfo.first.getQuantity();
4597 for (
unsigned i = 0; i < NumMembers; ++i) {
4615 CharUnits SlotSize = BaseAddr.getAlignment();
4618 TyInfo.first < SlotSize) {
4619 CharUnits Offset = SlotSize - TyInfo.first;
4642 OnStackPtr = CGF.
Builder.CreatePtrToInt(OnStackPtr, CGF.
Int64Ty);
4644 OnStackPtr = CGF.
Builder.CreateAdd(
4645 OnStackPtr, llvm::ConstantInt::get(CGF.
Int64Ty, Align - 1),
4647 OnStackPtr = CGF.
Builder.CreateAnd(
4648 OnStackPtr, llvm::ConstantInt::get(CGF.
Int64Ty, -Align),
4653 Address OnStackAddr(OnStackPtr,
4660 StackSize = StackSlotSize;
4666 CGF.
Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC,
"new_stack");
4672 TyInfo.first < StackSlotSize) {
4673 CharUnits Offset = StackSlotSize - TyInfo.first;
4686 Address ResAddr =
emitMergePHI(CGF, RegAddr, InRegBlock,
4687 OnStackAddr, OnStackBlock,
"vaargs.addr");
4696 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr,
QualType Ty,
4715 auto TyInfo = getContext().getTypeInfoInChars(Ty);
4719 bool IsIndirect =
false;
4720 if (TyInfo.first.getQuantity() > 16) {
4721 const Type *Base =
nullptr;
4722 uint64_t Members = 0;
4723 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
4727 TyInfo, SlotSize,
true);
4736 class ARMABIInfo :
public ABIInfo {
4753 bool isEABI()
const {
4754 switch (getTarget().getTriple().getEnvironment()) {
4755 case llvm::Triple::Android:
4756 case llvm::Triple::EABI:
4757 case llvm::Triple::EABIHF:
4758 case llvm::Triple::GNUEABI:
4759 case llvm::Triple::GNUEABIHF:
4766 bool isEABIHF()
const {
4767 switch (getTarget().getTriple().getEnvironment()) {
4768 case llvm::Triple::EABIHF:
4769 case llvm::Triple::GNUEABIHF:
4776 bool isAndroid()
const {
4777 return (getTarget().getTriple().getEnvironment() ==
4778 llvm::Triple::Android);
4781 ABIKind getABIKind()
const {
return Kind; }
4786 bool isIllegalVectorType(
QualType Ty)
const;
4788 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override;
4789 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
4790 uint64_t Members)
const override;
4804 ARMTargetCodeGenInfo(
CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
4807 const ARMABIInfo &getABIInfo()
const {
4815 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
4816 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
4828 unsigned getSizeOfUnwindException()
const override {
4829 if (getABIInfo().isEABI())
return 88;
4833 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
4835 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
4839 const ARMInterruptAttr *
Attr = FD->
getAttr<ARMInterruptAttr>();
4844 switch (Attr->getInterrupt()) {
4845 case ARMInterruptAttr::Generic: Kind =
"";
break;
4846 case ARMInterruptAttr::IRQ: Kind =
"IRQ";
break;
4847 case ARMInterruptAttr::FIQ: Kind =
"FIQ";
break;
4848 case ARMInterruptAttr::SWI: Kind =
"SWI";
break;
4849 case ARMInterruptAttr::ABORT: Kind =
"ABORT";
break;
4850 case ARMInterruptAttr::UNDEF: Kind =
"UNDEF";
break;
4853 llvm::Function *Fn = cast<llvm::Function>(GV);
4855 Fn->addFnAttr(
"interrupt", Kind);
4857 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
4858 if (ABI == ARMABIInfo::APCS)
4864 llvm::AttrBuilder B;
4865 B.addStackAlignmentAttr(8);
4866 Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
4868 llvm::AttributeSet::FunctionIndex,
4873 class WindowsARMTargetCodeGenInfo :
public ARMTargetCodeGenInfo {
4875 WindowsARMTargetCodeGenInfo(
CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
4876 : ARMTargetCodeGenInfo(CGT, K) {}
4878 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
4882 void WindowsARMTargetCodeGenInfo::setTargetAttributes(
4884 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
4885 addStackProbeSizeTargetAttribute(D, GV, CGM);
4890 if (!getCXXABI().classifyReturnType(FI))
4895 I.info = classifyArgumentType(I.type, FI.
isVariadic());
4909 if (isEABIHF() || getTarget().getTriple().isWatchOS())
4910 return llvm::CallingConv::ARM_AAPCS_VFP;
4912 return llvm::CallingConv::ARM_AAPCS;
4914 return llvm::CallingConv::ARM_APCS;
4920 switch (getABIKind()) {
4921 case APCS:
return llvm::CallingConv::ARM_APCS;
4922 case AAPCS:
return llvm::CallingConv::ARM_AAPCS;
4923 case AAPCS_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
4924 case AAPCS16_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
4926 llvm_unreachable(
"bad ABI kind");
4929 void ARMABIInfo::setCCs() {
4935 if (abiCC != getLLVMDefaultCC())
4941 switch (getABIKind()) {
4944 if (abiCC != getLLVMDefaultCC())
4949 BuiltinCC = llvm::CallingConv::ARM_AAPCS;
4955 bool isVariadic)
const {
4963 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
4968 if (isIllegalVectorType(Ty)) {
4969 uint64_t Size = getContext().getTypeSize(Ty);
4972 llvm::Type::getInt32Ty(getVMContext());
4977 llvm::Type::getInt32Ty(getVMContext()), 2);
4982 llvm::Type::getInt32Ty(getVMContext()), 4);
4985 return getNaturalAlignIndirect(Ty,
false);
4991 if (Ty->
isHalfType() && !getContext().getLangOpts().OpenCL) {
4992 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
4993 llvm::Type::getFloatTy(getVMContext()) :
4994 llvm::Type::getInt32Ty(getVMContext());
5001 Ty = EnumTy->getDecl()->getIntegerType();
5016 if (IsEffectivelyAAPCS_VFP) {
5019 const Type *Base =
nullptr;
5020 uint64_t Members = 0;
5021 if (isHomogeneousAggregate(Ty, Base, Members)) {
5022 assert(Base &&
"Base class should be set for homogeneous aggregate");
5026 }
else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5030 const Type *Base =
nullptr;
5031 uint64_t Members = 0;
5032 if (isHomogeneousAggregate(Ty, Base, Members)) {
5033 assert(Base && Members <= 4 &&
"unexpected homogeneous aggregate");
5035 llvm::ArrayType::get(CGT.ConvertType(
QualType(Base, 0)), Members);
5040 if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5053 uint64_t ABIAlign = 4;
5054 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
5055 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5056 getABIKind() == ARMABIInfo::AAPCS)
5057 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
5060 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP &&
"unexpected byval");
5063 TyAlign > ABIAlign);
5071 if (getContext().getTypeAlign(Ty) <= 32) {
5072 ElemTy = llvm::Type::getInt32Ty(getVMContext());
5073 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
5075 ElemTy = llvm::Type::getInt64Ty(getVMContext());
5076 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
5083 llvm::LLVMContext &VMContext) {
5115 if (!RT)
return false;
5126 bool HadField =
false;
5129 i != e; ++i, ++idx) {
5168 bool isVariadic)
const {
5169 bool IsEffectivelyAAPCS_VFP =
5170 (getABIKind() == AAPCS_VFP || getABIKind() == AAPCS16_VFP) && !isVariadic;
5176 if (RetTy->
isVectorType() && getContext().getTypeSize(RetTy) > 128) {
5177 return getNaturalAlignIndirect(RetTy);
5183 if (RetTy->
isHalfType() && !getContext().getLangOpts().OpenCL) {
5184 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5185 llvm::Type::getFloatTy(getVMContext()) :
5186 llvm::Type::getInt32Ty(getVMContext());
5193 RetTy = EnumTy->getDecl()->getIntegerType();
5200 if (getABIKind() == APCS) {
5210 getVMContext(), getContext().
getTypeSize(RetTy)));
5215 uint64_t Size = getContext().getTypeSize(RetTy);
5224 return getNaturalAlignIndirect(RetTy);
5233 if (IsEffectivelyAAPCS_VFP) {
5234 const Type *Base =
nullptr;
5235 uint64_t Members = 0;
5236 if (isHomogeneousAggregate(RetTy, Base, Members)) {
5237 assert(Base &&
"Base class should be set for homogeneous aggregate");
5245 uint64_t Size = getContext().getTypeSize(RetTy);
5247 if (getDataLayout().isBigEndian())
5257 }
else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
5258 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext());
5260 llvm::ArrayType::get(Int32Ty, llvm::RoundUpToAlignment(Size, 32) / 32);
5264 return getNaturalAlignIndirect(RetTy);
5268 bool ARMABIInfo::isIllegalVectorType(
QualType Ty)
const {
5276 unsigned NumElements = VT->getNumElements();
5278 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
5282 unsigned NumElements = VT->getNumElements();
5283 uint64_t Size = getContext().getTypeSize(VT);
5285 if (!llvm::isPowerOf2_32(NumElements))
5294 bool ARMABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
5298 if (BT->
getKind() == BuiltinType::Float ||
5299 BT->
getKind() == BuiltinType::Double ||
5300 BT->
getKind() == BuiltinType::LongDouble)
5303 unsigned VecSize = getContext().getTypeSize(VT);
5304 if (VecSize == 64 || VecSize == 128)
5310 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base,
5311 uint64_t Members)
const {
5312 return Members <= 4;
5315 Address ARMABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
5326 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5327 CharUnits TyAlignForABI = TyInfo.second;
5330 bool IsIndirect =
false;
5331 const Type *Base =
nullptr;
5332 uint64_t Members = 0;
5339 getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5340 !isHomogeneousAggregate(Ty, Base, Members)) {
5347 }
else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5348 getABIKind() == ARMABIInfo::AAPCS) {
5351 }
else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5358 TyInfo.second = TyAlignForABI;
5370 class NVPTXABIInfo :
public ABIInfo {
5387 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5392 static void addNVVMMetadata(llvm::Function *F, StringRef Name,
int Operand);
5405 RetTy = EnumTy->getDecl()->getIntegerType();
5414 Ty = EnumTy->getDecl()->getIntegerType();
5418 return getNaturalAlignIndirect(Ty,
true);
5425 if (!getCXXABI().classifyReturnType(FI))
5428 I.info = classifyArgumentType(I.type);
5437 Address NVPTXABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
5439 llvm_unreachable(
"NVPTX does not support varargs");
5442 void NVPTXTargetCodeGenInfo::
5443 setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5445 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5448 llvm::Function *F = cast<llvm::Function>(GV);
5454 if (FD->
hasAttr<OpenCLKernelAttr>()) {
5457 addNVVMMetadata(F,
"kernel", 1);
5459 F->addFnAttr(llvm::Attribute::NoInline);
5468 if (FD->
hasAttr<CUDAGlobalAttr>()) {
5470 addNVVMMetadata(F,
"kernel", 1);
5472 if (CUDALaunchBoundsAttr *Attr = FD->
getAttr<CUDALaunchBoundsAttr>()) {
5474 llvm::APSInt MaxThreads(32);
5475 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.
getContext());
5477 addNVVMMetadata(F,
"maxntidx", MaxThreads.getExtValue());
5482 if (Attr->getMinBlocks()) {
5483 llvm::APSInt MinBlocks(32);
5484 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.
getContext());
5487 addNVVMMetadata(F,
"minctasm", MinBlocks.getExtValue());
5493 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
5495 llvm::Module *M = F->getParent();
5499 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata(
"nvvm.annotations");
5501 llvm::Metadata *MDVals[] = {
5502 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
5503 llvm::ConstantAsMetadata::get(
5504 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
5506 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
5516 class SystemZABIInfo :
public ABIInfo {
5521 :
ABIInfo(CGT), HasVector(HV) {}
5523 bool isPromotableIntegerType(
QualType Ty)
const;
5524 bool isCompoundType(
QualType Ty)
const;
5525 bool isVectorArgumentType(
QualType Ty)
const;
5526 bool isFPArgumentType(
QualType Ty)
const;
5533 if (!getCXXABI().classifyReturnType(FI))
5536 I.info = classifyArgumentType(I.type);
5545 SystemZTargetCodeGenInfo(
CodeGenTypes &CGT,
bool HasVector)
5551 bool SystemZABIInfo::isPromotableIntegerType(
QualType Ty)
const {
5554 Ty = EnumTy->getDecl()->getIntegerType();
5563 case BuiltinType::Int:
5564 case BuiltinType::UInt:
5572 bool SystemZABIInfo::isCompoundType(
QualType Ty)
const {
5578 bool SystemZABIInfo::isVectorArgumentType(
QualType Ty)
const {
5579 return (HasVector &&
5581 getContext().getTypeSize(Ty) <= 128);
5584 bool SystemZABIInfo::isFPArgumentType(
QualType Ty)
const {
5587 case BuiltinType::Float:
5588 case BuiltinType::Double:
5603 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
5604 for (
const auto &I : CXXRD->bases()) {
5613 Found = GetSingleElementType(Base);
5617 for (
const auto *FD : RD->
fields()) {
5621 if (getContext().getLangOpts().CPlusPlus &&
5622 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
5629 Found = GetSingleElementType(FD->
getType());
5641 Address SystemZABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
5655 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5660 bool InFPRs =
false;
5661 bool IsVector =
false;
5665 DirectTy = llvm::PointerType::getUnqual(DirectTy);
5670 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
5671 IsVector = ArgTy->isVectorTy();
5672 UnpaddedSize = TyInfo.first;
5673 DirectAlign = TyInfo.second;
5676 if (IsVector && UnpaddedSize > PaddedSize)
5678 assert((UnpaddedSize <= PaddedSize) &&
"Invalid argument size.");
5680 CharUnits Padding = (PaddedSize - UnpaddedSize);
5684 llvm::ConstantInt::get(IndexTy, PaddedSize.
getQuantity());
5690 Address OverflowArgAreaPtr =
5692 "overflow_arg_area_ptr");
5693 Address OverflowArgArea =
5702 "overflow_arg_area");
5710 unsigned MaxRegs, RegCountField, RegSaveIndex;
5721 RegPadding = Padding;
5728 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
5735 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
5742 CGF.
Builder.CreateMul(RegCount, PaddedSizeV,
"scaled_reg_count");
5744 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.
getQuantity()
5747 CGF.
Builder.CreateAdd(ScaledRegCount, RegBase,
"reg_offset");
5748 Address RegSaveAreaPtr =
5750 "reg_save_area_ptr");
5753 Address RawRegAddr(CGF.
Builder.CreateGEP(RegSaveArea, RegOffset,
5760 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
5762 CGF.
Builder.CreateAdd(RegCount, One,
"reg_count");
5772 Address OverflowArgArea =
5775 Address RawMemAddr =
5783 "overflow_arg_area");
5789 Address ResAddr =
emitMergePHI(CGF, RegAddr, InRegBlock,
5790 MemAddr, InMemBlock,
"va_arg.addr");
5802 if (isVectorArgumentType(RetTy))
5804 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
5805 return getNaturalAlignIndirect(RetTy);
5806 return (isPromotableIntegerType(RetTy) ?
5816 if (isPromotableIntegerType(Ty))
5822 uint64_t Size = getContext().getTypeSize(Ty);
5823 QualType SingleElementTy = GetSingleElementType(Ty);
5824 if (isVectorArgumentType(SingleElementTy) &&
5825 getContext().
getTypeSize(SingleElementTy) == Size)
5829 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
5830 return getNaturalAlignIndirect(Ty,
false);
5838 return getNaturalAlignIndirect(Ty,
false);
5842 if (isFPArgumentType(SingleElementTy)) {
5843 assert(Size == 32 || Size == 64);
5845 PassTy = llvm::Type::getFloatTy(getVMContext());
5847 PassTy = llvm::Type::getDoubleTy(getVMContext());
5849 PassTy = llvm::IntegerType::get(getVMContext(), Size);
5854 if (isCompoundType(Ty))
5855 return getNaturalAlignIndirect(Ty,
false);
5870 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5876 void MSP430TargetCodeGenInfo::setTargetAttributes(
const Decl *D,
5877 llvm::GlobalValue *GV,
5879 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
5880 if (
const MSP430InterruptAttr *attr = FD->
getAttr<MSP430InterruptAttr>()) {
5882 llvm::Function *F = cast<llvm::Function>(GV);
5885 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
5888 F->addFnAttr(llvm::Attribute::NoInline);
5891 unsigned Num = attr->getNumber() / 2;
5893 "__isr_" + Twine(Num), F);
5904 class MipsABIInfo :
public ABIInfo {
5906 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
5907 void CoerceToIntArgs(uint64_t TySize,
5911 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset)
const;
5914 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
5915 StackAlignInBytes(IsO32 ? 8 : 16) {}
5922 bool shouldSignExtUnsignedType(
QualType Ty)
const override;
5926 unsigned SizeOfUnwindException;
5930 SizeOfUnwindException(IsO32 ? 24 : 32) {}
5936 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
5938 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5940 llvm::Function *Fn = cast<llvm::Function>(GV);
5941 if (FD->
hasAttr<Mips16Attr>()) {
5942 Fn->addFnAttr(
"mips16");
5944 else if (FD->
hasAttr<NoMips16Attr>()) {
5945 Fn->addFnAttr(
"nomips16");
5948 const MipsInterruptAttr *Attr = FD->
getAttr<MipsInterruptAttr>();
5953 switch (Attr->getInterrupt()) {
5954 case MipsInterruptAttr::eic: Kind =
"eic";
break;
5955 case MipsInterruptAttr::sw0: Kind =
"sw0";
break;
5956 case MipsInterruptAttr::sw1: Kind =
"sw1";
break;
5957 case MipsInterruptAttr::hw0: Kind =
"hw0";
break;
5958 case MipsInterruptAttr::hw1: Kind =
"hw1";
break;
5959 case MipsInterruptAttr::hw2: Kind =
"hw2";
break;
5960 case MipsInterruptAttr::hw3: Kind =
"hw3";
break;
5961 case MipsInterruptAttr::hw4: Kind =
"hw4";
break;
5962 case MipsInterruptAttr::hw5: Kind =
"hw5";
break;
5965 Fn->addFnAttr(
"interrupt", Kind);
5972 unsigned getSizeOfUnwindException()
const override {
5973 return SizeOfUnwindException;
5978 void MipsABIInfo::CoerceToIntArgs(
5980 llvm::IntegerType *IntTy =
5981 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
5984 for (
unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
5985 ArgList.push_back(IntTy);
5988 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
5991 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
6000 CoerceToIntArgs(TySize, ArgList);
6001 return llvm::StructType::get(getVMContext(), ArgList);
6005 return CGT.ConvertType(Ty);
6011 CoerceToIntArgs(TySize, ArgList);
6012 return llvm::StructType::get(getVMContext(), ArgList);
6017 assert(!(TySize % 8) &&
"Size of structure must be multiple of 8.");
6019 uint64_t LastOffset = 0;
6021 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
6026 i != e; ++i, ++idx) {
6030 if (!BT || BT->
getKind() != BuiltinType::Double)
6038 for (
unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
6039 ArgList.push_back(I64);
6042 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
6043 LastOffset = Offset + 64;
6046 CoerceToIntArgs(TySize - LastOffset, IntArgList);
6047 ArgList.append(IntArgList.begin(), IntArgList.end());
6049 return llvm::StructType::get(getVMContext(), ArgList);
6052 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
6053 uint64_t Offset)
const {
6054 if (OrigOffset + MinABIStackAlignInBytes > Offset)
6057 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
6061 MipsABIInfo::classifyArgumentType(
QualType Ty, uint64_t &Offset)
const {
6064 uint64_t OrigOffset =
Offset;
6065 uint64_t TySize = getContext().getTypeSize(Ty);
6066 uint64_t Align = getContext().getTypeAlign(Ty) / 8;
6068 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
6069 (uint64_t)StackAlignInBytes);
6070 unsigned CurrOffset = llvm::RoundUpToAlignment(Offset, Align);
6071 Offset = CurrOffset + llvm::RoundUpToAlignment(TySize, Align * 8) / 8;
6079 Offset = OrigOffset + MinABIStackAlignInBytes;
6088 getPaddingType(OrigOffset, CurrOffset));
6095 Ty = EnumTy->getDecl()->getIntegerType();
6102 nullptr, 0, IsO32 ?
nullptr : getPaddingType(OrigOffset, CurrOffset));
6106 MipsABIInfo::returnAggregateInRegs(
QualType RetTy, uint64_t Size)
const {
6126 for (; b != e; ++b) {
6132 RTList.push_back(CGT.ConvertType(b->getType()));
6136 return llvm::StructType::get(getVMContext(), RTList,
6143 CoerceToIntArgs(Size, RTList);
6144 return llvm::StructType::get(getVMContext(), RTList);
6148 uint64_t Size = getContext().getTypeSize(RetTy);
6155 if (!IsO32 && Size == 0)
6174 return getNaturalAlignIndirect(RetTy);
6179 RetTy = EnumTy->getDecl()->getIntegerType();
6187 if (!getCXXABI().classifyReturnType(FI))
6191 uint64_t Offset = RetInfo.
isIndirect() ? MinABIStackAlignInBytes : 0;
6194 I.info = classifyArgumentType(I.type, Offset);
6197 Address MipsABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
6203 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
6204 unsigned PtrWidth = getTarget().getPointerWidth(0);
6205 bool DidPromote =
false;
6207 getContext().getIntWidth(Ty) < SlotSizeInBits) ||
6210 Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits,
6214 auto TyInfo = getContext().getTypeInfoInChars(Ty);
6225 TyInfo, ArgSlotSize,
true);
6231 Address Temp = CGF.
CreateMemTemp(OrigTy,
"vaarg.promotion-temp");
6248 bool MipsABIInfo::shouldSignExtUnsignedType(
QualType Ty)
const {
6249 int TySize = getContext().getTypeSize(Ty);
6294 class TCETargetCodeGenInfo :
public DefaultTargetCodeGenInfo {
6297 : DefaultTargetCodeGenInfo(CGT) {}
6299 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6303 void TCETargetCodeGenInfo::setTargetAttributes(
6305 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6308 llvm::Function *F = cast<llvm::Function>(GV);
6311 if (FD->
hasAttr<OpenCLKernelAttr>()) {
6313 F->addFnAttr(llvm::Attribute::NoInline);
6314 const ReqdWorkGroupSizeAttr *Attr = FD->
getAttr<ReqdWorkGroupSizeAttr>();
6317 llvm::LLVMContext &Context = F->getContext();
6318 llvm::NamedMDNode *OpenCLMetadata =
6320 "opencl.kernel_wg_size_info");
6323 Operands.push_back(llvm::ConstantAsMetadata::get(F));
6326 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
6327 M.
Int32Ty, llvm::APInt(32, Attr->getXDim()))));
6329 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
6330 M.
Int32Ty, llvm::APInt(32, Attr->getYDim()))));
6332 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
6333 M.
Int32Ty, llvm::APInt(32, Attr->getZDim()))));
6339 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
6340 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
6354 class HexagonABIInfo :
public ABIInfo {
6384 if (!getCXXABI().classifyReturnType(FI))
6387 I.info = classifyArgumentType(I.type);
6394 Ty = EnumTy->getDecl()->getIntegerType();
6407 uint64_t Size = getContext().getTypeSize(Ty);
6409 return getNaturalAlignIndirect(Ty,
true);
6426 if (RetTy->
isVectorType() && getContext().getTypeSize(RetTy) > 64)
6427 return getNaturalAlignIndirect(RetTy);
6432 RetTy = EnumTy->getDecl()->getIntegerType();
6443 uint64_t Size = getContext().getTypeSize(RetTy);
6455 return getNaturalAlignIndirect(RetTy,
true);
6458 Address HexagonABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
6462 getContext().getTypeInfoInChars(Ty),
6477 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
6483 void AMDGPUTargetCodeGenInfo::setTargetAttributes(
6485 llvm::GlobalValue *GV,
6487 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6491 if (
const auto Attr = FD->
getAttr<AMDGPUNumVGPRAttr>()) {
6492 llvm::Function *F = cast<llvm::Function>(GV);
6493 uint32_t NumVGPR = Attr->getNumVGPR();
6495 F->addFnAttr(
"amdgpu_num_vgpr", llvm::utostr(NumVGPR));
6498 if (
const auto Attr = FD->
getAttr<AMDGPUNumSGPRAttr>()) {
6499 llvm::Function *F = cast<llvm::Function>(GV);
6500 unsigned NumSGPR = Attr->getNumSGPR();
6502 F->addFnAttr(
"amdgpu_num_sgpr", llvm::utostr(NumSGPR));
6534 class SparcV9ABIInfo :
public ABIInfo {
6555 struct CoerceBuilder {
6557 const llvm::DataLayout &DL;
6562 CoerceBuilder(llvm::LLVMContext &c,
const llvm::DataLayout &dl)
6563 : Context(c), DL(dl), Size(0), InReg(
false) {}
6566 void pad(uint64_t ToSize) {
6567 assert(ToSize >= Size &&
"Cannot remove elements");
6572 uint64_t Aligned = llvm::RoundUpToAlignment(Size, 64);
6573 if (Aligned > Size && Aligned <= ToSize) {
6574 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
6579 while (Size + 64 <= ToSize) {
6580 Elems.push_back(llvm::Type::getInt64Ty(Context));
6585 if (Size < ToSize) {
6586 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
6592 void addFloat(uint64_t Offset,
llvm::Type *Ty,
unsigned Bits) {
6600 Elems.push_back(Ty);
6601 Size = Offset + Bits;
6605 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
6606 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
6607 for (
unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
6608 llvm::Type *ElemTy = StrTy->getElementType(i);
6609 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
6610 switch (ElemTy->getTypeID()) {
6611 case llvm::Type::StructTyID:
6612 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
6614 case llvm::Type::FloatTyID:
6615 addFloat(ElemOffset, ElemTy, 32);
6617 case llvm::Type::DoubleTyID:
6618 addFloat(ElemOffset, ElemTy, 64);
6620 case llvm::Type::FP128TyID:
6621 addFloat(ElemOffset, ElemTy, 128);
6623 case llvm::Type::PointerTyID:
6624 if (ElemOffset % 64 == 0) {
6626 Elems.push_back(ElemTy);
6637 bool isUsableType(llvm::StructType *Ty)
const {
6638 return llvm::makeArrayRef(Elems) == Ty->elements();
6643 if (Elems.size() == 1)
6644 return Elems.front();
6646 return llvm::StructType::get(Context, Elems);
6653 SparcV9ABIInfo::classifyType(
QualType Ty,
unsigned SizeLimit)
const {
6657 uint64_t Size = getContext().getTypeSize(Ty);
6661 if (Size > SizeLimit)
6662 return getNaturalAlignIndirect(Ty,
false);
6666 Ty = EnumTy->getDecl()->getIntegerType();
6669 if (Size < 64 && Ty->isIntegerType())
6683 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
6687 CoerceBuilder CB(getVMContext(), getDataLayout());
6688 CB.addStruct(0, StrTy);
6689 CB.pad(llvm::RoundUpToAlignment(CB.DL.getTypeSizeInBits(StrTy), 64));
6692 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
6700 Address SparcV9ABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
6710 Address Addr(Builder.
CreateLoad(VAListAddr,
"ap.cur"), SlotSize);
6711 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
6713 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
6720 llvm_unreachable(
"Unsupported ABI kind for va_arg");
6730 auto AllocSize = getDataLayout().getTypeAllocSize(AI.
getCoerceToType());
6739 ArgAddr = Address(Builder.
CreateLoad(ArgAddr,
"indirect.arg"),
6744 return Address(llvm::UndefValue::get(ArgPtrTy),
TypeInfo.second);
6752 return Builder.
CreateBitCast(ArgAddr, ArgPtrTy,
"arg.addr");
6758 I.info = classifyType(I.type, 16 * 8);
6784 llvm::IntegerType *i8 = CGF.
Int8Ty;
6785 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
6786 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
6875 class TypeStringCache {
6876 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
6880 std::string Swapped;
6883 std::map<const IdentifierInfo *, struct Entry>
Map;
6884 unsigned IncompleteCount;
6885 unsigned IncompleteUsedCount;
6887 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
6897 class FieldEncoding {
6901 FieldEncoding(
bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
6902 StringRef str() {
return Enc.c_str();}
6903 bool operator<(
const FieldEncoding &rhs)
const {
6904 if (HasName != rhs.HasName)
return HasName;
6905 return Enc < rhs.Enc;
6909 class XCoreABIInfo :
public DefaultABIInfo {
6917 mutable TypeStringCache TSC;
6921 void emitTargetMD(
const Decl *D, llvm::GlobalValue *GV,
6927 Address XCoreABIInfo::EmitVAArg(
CodeGenFunction &CGF, Address VAListAddr,
6933 Address AP(Builder.
CreateLoad(VAListAddr), SlotSize);
6937 CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty);
6939 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
6940 AI.setCoerceToType(ArgTy);
6941 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
6945 switch (AI.getKind()) {
6948 llvm_unreachable(
"Unsupported ABI kind for va_arg");
6950 Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
6957 getDataLayout().getTypeAllocSize(AI.getCoerceToType()));
6962 Val = Address(Builder.
CreateLoad(Val), TypeAlign);
6983 std::string StubEnc) {
6987 assert( (E.Str.empty() || E.State == Recursive) &&
6988 "Incorrectly use of addIncomplete");
6989 assert(!StubEnc.empty() &&
"Passing an empty string to addIncomplete()");
6990 E.Swapped.swap(E.Str);
6991 E.Str.swap(StubEnc);
6992 E.State = Incomplete;
7000 bool TypeStringCache::removeIncomplete(
const IdentifierInfo *ID) {
7003 auto I =
Map.find(ID);
7004 assert(I !=
Map.end() &&
"Entry not present");
7005 Entry &E = I->second;
7006 assert( (E.State == Incomplete ||
7007 E.State == IncompleteUsed) &&
7008 "Entry must be an incomplete type");
7009 bool IsRecursive =
false;
7010 if (E.State == IncompleteUsed) {
7013 --IncompleteUsedCount;
7015 if (E.Swapped.empty())
7019 E.Swapped.swap(E.Str);
7021 E.State = Recursive;
7029 void TypeStringCache::addIfComplete(
const IdentifierInfo *ID, StringRef Str,
7031 if (!ID || IncompleteUsedCount)
7034 if (IsRecursive && !E.Str.empty()) {
7035 assert(E.State==Recursive && E.Str.size() == Str.size() &&
7036 "This is not the same Recursive entry");
7042 assert(E.Str.empty() &&
"Entry already present");
7044 E.State = IsRecursive? Recursive : NonRecursive;
7053 auto I =
Map.find(ID);
7056 Entry &E = I->second;
7057 if (E.State == Recursive && IncompleteCount)
7060 if (E.State == Incomplete) {
7062 E.State = IncompleteUsed;
7063 ++IncompleteUsedCount;
7065 return E.Str.c_str();
7084 void XCoreTargetCodeGenInfo::emitTargetMD(
const Decl *D, llvm::GlobalValue *GV,
7088 llvm::LLVMContext &Ctx = CGM.
getModule().getContext();
7090 MDVals.push_back(llvm::ConstantAsMetadata::get(GV));
7091 MDVals.push_back(llvm::MDString::get(Ctx, Enc.str()));
7092 llvm::NamedMDNode *MD =
7093 CGM.
getModule().getOrInsertNamedMetadata(
"xcore.typestrings");
7094 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
7100 TypeStringCache &TSC);
7108 TypeStringCache &TSC) {
7109 for (
const auto *Field : RD->
fields()) {
7112 Enc += Field->getName();
7114 if (Field->isBitField()) {
7116 llvm::raw_svector_ostream OS(Enc);
7117 OS << Field->getBitWidthValue(CGM.
getContext());
7120 if (!
appendType(Enc, Field->getType(), CGM, TSC))
7122 if (Field->isBitField())
7125 FE.emplace_back(!Field->getName().empty(), Enc);
7137 StringRef TypeString = TSC.lookupStr(ID);
7138 if (!TypeString.empty()) {
7144 size_t Start = Enc.size();
7152 bool IsRecursive =
false;
7159 std::string StubEnc(Enc.substr(Start).str());
7161 TSC.addIncomplete(ID, std::move(StubEnc));
7163 (void) TSC.removeIncomplete(ID);
7166 IsRecursive = TSC.removeIncomplete(ID);
7170 std::sort(FE.begin(), FE.end());
7172 unsigned E = FE.size();
7173 for (
unsigned I = 0; I !=
E; ++
I) {
7180 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
7186 TypeStringCache &TSC,
7189 StringRef TypeString = TSC.lookupStr(ID);
7190 if (!TypeString.empty()) {
7195 size_t Start = Enc.size();
7204 for (
auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I !=
E;
7206 SmallStringEnc EnumEnc;
7208 EnumEnc += I->getName();
7210 I->getInitVal().toString(EnumEnc);
7212 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
7214 std::sort(FE.begin(), FE.end());
7215 unsigned E = FE.size();
7216 for (
unsigned I = 0; I !=
E; ++
I) {
7223 TSC.addIfComplete(ID, Enc.substr(Start),
false);
7231 static const char *
const Table[]={
"",
"c:",
"r:",
"cr:",
"v:",
"cv:",
"rv:",
"crv:"};
7239 Enc += Table[Lookup];
7244 const char *EncType;
7246 case BuiltinType::Void:
7249 case BuiltinType::Bool:
7252 case BuiltinType::Char_U:
7255 case BuiltinType::UChar:
7258 case BuiltinType::SChar:
7261 case BuiltinType::UShort:
7264 case BuiltinType::Short:
7267 case BuiltinType::UInt:
7270 case BuiltinType::Int:
7273 case BuiltinType::ULong:
7276 case BuiltinType::Long:
7279 case BuiltinType::ULongLong:
7282 case BuiltinType::LongLong:
7285 case BuiltinType::Float:
7288 case BuiltinType::Double:
7291 case BuiltinType::LongDouble:
7304 TypeStringCache &TSC) {
7316 TypeStringCache &TSC, StringRef NoSizeEnc) {
7321 CAT->getSize().toStringUnsigned(Enc);
7337 TypeStringCache &TSC) {
7344 auto I = FPT->param_type_begin();
7345 auto E = FPT->param_type_end();
7354 if (FPT->isVariadic())
7357 if (FPT->isVariadic())
7371 TypeStringCache &TSC) {
7408 if (
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
7414 if (
const VarDecl *VD = dyn_cast<VarDecl>(D)) {
7417 QualType QT = VD->getType().getCanonicalType();
7439 return !
getTriple().isOSBinFormatMachO();
7443 if (TheTargetCodeGenInfo)
7444 return *TheTargetCodeGenInfo;
7447 switch (Triple.getArch()) {
7449 return *(TheTargetCodeGenInfo =
new DefaultTargetCodeGenInfo(Types));
7451 case llvm::Triple::le32:
7452 return *(TheTargetCodeGenInfo =
new PNaClTargetCodeGenInfo(Types));
7453 case llvm::Triple::mips:
7454 case llvm::Triple::mipsel:
7455 if (Triple.getOS() == llvm::Triple::NaCl)
7456 return *(TheTargetCodeGenInfo =
new PNaClTargetCodeGenInfo(Types));
7457 return *(TheTargetCodeGenInfo =
new MIPSTargetCodeGenInfo(Types,
true));
7459 case llvm::Triple::mips64:
7460 case llvm::Triple::mips64el:
7461 return *(TheTargetCodeGenInfo =
new MIPSTargetCodeGenInfo(Types,
false));
7463 case llvm::Triple::aarch64:
7464 case llvm::Triple::aarch64_be: {
7465 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
7467 Kind = AArch64ABIInfo::DarwinPCS;
7469 return *(TheTargetCodeGenInfo =
new AArch64TargetCodeGenInfo(Types, Kind));
7472 case llvm::Triple::wasm32:
7473 case llvm::Triple::wasm64:
7474 return *(TheTargetCodeGenInfo =
new WebAssemblyTargetCodeGenInfo(Types));
7476 case llvm::Triple::arm:
7477 case llvm::Triple::armeb:
7478 case llvm::Triple::thumb:
7479 case llvm::Triple::thumbeb:
7481 if (Triple.getOS() == llvm::Triple::Win32) {
7482 TheTargetCodeGenInfo =
7483 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP);
7484 return *TheTargetCodeGenInfo;
7487 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
7489 if (ABIStr ==
"apcs-gnu")
7490 Kind = ARMABIInfo::APCS;
7491 else if (ABIStr ==
"aapcs16")
7492 Kind = ARMABIInfo::AAPCS16_VFP;
7493 else if (CodeGenOpts.
FloatABI ==
"hard" ||
7495 Triple.getEnvironment() == llvm::Triple::GNUEABIHF))
7496 Kind = ARMABIInfo::AAPCS_VFP;
7498 return *(TheTargetCodeGenInfo =
new ARMTargetCodeGenInfo(Types, Kind));
7501 case llvm::Triple::ppc:
7502 return *(TheTargetCodeGenInfo =
7503 new PPC32TargetCodeGenInfo(Types, CodeGenOpts.
FloatABI ==
"soft"));
7504 case llvm::Triple::ppc64:
7505 if (Triple.isOSBinFormatELF()) {
7506 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
7508 Kind = PPC64_SVR4_ABIInfo::ELFv2;
7511 return *(TheTargetCodeGenInfo =
7512 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX));
7514 return *(TheTargetCodeGenInfo =
new PPC64TargetCodeGenInfo(Types));
7515 case llvm::Triple::ppc64le: {
7516 assert(Triple.isOSBinFormatELF() &&
"PPC64 LE non-ELF not supported!");
7517 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
7519 Kind = PPC64_SVR4_ABIInfo::ELFv1;
7522 return *(TheTargetCodeGenInfo =
7523 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX));
7526 case llvm::Triple::nvptx:
7527 case llvm::Triple::nvptx64:
7528 return *(TheTargetCodeGenInfo =
new NVPTXTargetCodeGenInfo(Types));
7530 case llvm::Triple::msp430:
7531 return *(TheTargetCodeGenInfo =
new MSP430TargetCodeGenInfo(Types));
7533 case llvm::Triple::systemz: {
7535 return *(TheTargetCodeGenInfo =
new SystemZTargetCodeGenInfo(Types,
7539 case llvm::Triple::tce:
7540 return *(TheTargetCodeGenInfo =
new TCETargetCodeGenInfo(Types));
7542 case llvm::Triple::x86: {
7543 bool IsDarwinVectorABI = Triple.isOSDarwin();
7544 bool RetSmallStructInRegABI =
7545 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
7546 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
7548 if (Triple.getOS() == llvm::Triple::Win32) {
7549 return *(TheTargetCodeGenInfo =
new WinX86_32TargetCodeGenInfo(
7550 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
7551 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
7553 return *(TheTargetCodeGenInfo =
new X86_32TargetCodeGenInfo(
7554 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
7555 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
7560 case llvm::Triple::x86_64: {
7562 X86AVXABILevel AVXLevel = (ABI ==
"avx512" ? X86AVXABILevel::AVX512 :
7563 ABI ==
"avx" ? X86AVXABILevel::AVX :
7566 switch (Triple.getOS()) {
7567 case llvm::Triple::Win32:
7568 return *(TheTargetCodeGenInfo =
7569 new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
7570 case llvm::Triple::PS4:
7571 return *(TheTargetCodeGenInfo =
7572 new PS4TargetCodeGenInfo(Types, AVXLevel));
7574 return *(TheTargetCodeGenInfo =
7575 new X86_64TargetCodeGenInfo(Types, AVXLevel));
7578 case llvm::Triple::hexagon:
7579 return *(TheTargetCodeGenInfo =
new HexagonTargetCodeGenInfo(Types));
7580 case llvm::Triple::r600:
7581 return *(TheTargetCodeGenInfo =
new AMDGPUTargetCodeGenInfo(Types));
7582 case llvm::Triple::amdgcn:
7583 return *(TheTargetCodeGenInfo =
new AMDGPUTargetCodeGenInfo(Types));
7584 case llvm::Triple::sparcv9:
7585 return *(TheTargetCodeGenInfo =
new SparcV9TargetCodeGenInfo(Types));
7586 case llvm::Triple::xcore:
7587 return *(TheTargetCodeGenInfo =
new XCoreTargetCodeGenInfo(Types));
Ignore - Ignore the argument (treat as void).
FunctionDecl - An instance of this class is created to represent a function declaration or definition...
const ABIInfo & getABIInfo() const
getABIInfo() - Returns ABI info helper for the target.
void setEffectiveCallingConvention(unsigned Value)
External linkage, which indicates that the entity can be referred to from other translation units...
static ABIArgInfo getExtend(llvm::Type *T=nullptr)
PointerType - C99 6.7.5.1 - Pointer Declarators.
A (possibly-)qualified type.
llvm::Type * ConvertTypeForMem(QualType T)
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
CanQualType getReturnType() const
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g., it is a floating-point type or a vector thereof.
bool isBitField() const
Determines whether this field is a bitfield.
bool isMemberPointerType() const
unsigned getInAllocaFieldIndex() const
llvm::Module & getModule() const
llvm::LLVMContext & getLLVMContext()
const TargetInfo & getTarget() const
FunctionType - C99 6.7.5.3 - Function Declarators.
llvm::ConstantInt * getSize(CharUnits N)
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateTempAlloca - This creates a alloca and inserts it into the entry block.
Extend - Valid only for integer argument types.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
llvm::LoadInst * CreateDefaultAlignedLoad(llvm::Value *Addr, const llvm::Twine &Name="")
Decl - This represents one declaration (or definition), e.g.
Address getAddress() const
static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET, TypeStringCache &TSC, const IdentifierInfo *ID)
Appends enum types to Enc and adds the encoding to the cache.
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
static const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "single element struct", i.e.
bool hasFlexibleArrayMember() const
bool isEnumeralType() const
const llvm::DataLayout & getDataLayout() const
The base class of the type hierarchy.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
static bool appendType(SmallStringEnc &Enc, QualType QType, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Handles the type's qualifier before dispatching a call to handle specific type encodings.
bool isBlockPointerType() const
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
VarDecl - An instance of this class is created to represent a variable declaration or definition...
llvm::Type * getElementType() const
Return the type of the values stored in this address.
virtual ~TargetCodeGenInfo()
CallingConv getCallConv() const
field_iterator field_begin() const
void setCoerceToType(llvm::Type *T)
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
static ABIArgInfo getIgnore()
static bool isAggregateTypeForABI(QualType T)
static llvm::Value * getTypeSize(CodeGenFunction &CGF, QualType Ty)
RecordDecl - Represents a struct/union/class.
const_arg_iterator arg_end() const
One of these records is kept for each identifier that is lexed.
bool isScalarType() const
class LLVM_ALIGNAS(8) DependentTemplateSpecializationType const IdentifierInfo * Name
Represents a template specialization type whose template cannot be resolved, e.g. ...
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
llvm::IntegerType * Int64Ty
static llvm::Type * GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, const llvm::DataLayout &TD)
GetX86_64ByValArgumentPair - Given a high and low type that can ideally be used as elements of a two ...
static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, Address VAListAddr, QualType Ty)
bool isReferenceType() const
bool isStructureOrClassType() const
FieldDecl - An instance of this class is created by Sema::ActOnField to represent a member of a struc...
static ABIArgInfo getExtendInReg(llvm::Type *T=nullptr)
const RecordType * getAsUnionType() const
NOTE: getAs*ArrayType are methods on ASTContext.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
llvm::Type * getCoerceToType() const
static ABIArgInfo getIndirectInReg(CharUnits Alignment, bool ByVal=true, bool Realign=false)
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true)
static bool hasScalarEvaluationKind(QualType T)
static ABIArgInfo getExpandWithPadding(bool PaddingInReg, llvm::Type *Padding)
static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC, const IdentifierInfo *ID)
Appends structure and union types to Enc and adds encoding to cache.
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
CharUnits - This is an opaque type for sizes expressed in character units.
QualType getReturnType() const
virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const
Emit the target dependent code to load a value of.
virtual unsigned getSizeOfUnwindException() const
Determines the size of struct _Unwind_Exception on this platform, in 8-bit units. ...
field_range fields() const
static void rewriteInputConstraintReferences(unsigned FirstIn, unsigned NumNewOuts, std::string &AsmString)
Rewrite input constraint references after adding some output constraints.
static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty)
RecordDecl * getDecl() const
CharUnits getPointerSize() const
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, unsigned EndBit, ASTContext &Context)
BitsContainNoUserData - Return true if the specified [start,end) bit range is known to either be off ...
static ABIArgInfo getExpand()
virtual StringRef getABI() const
Get the ABI currently in use.
detail::InMemoryDirectory::const_iterator I
static QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::pair< CharUnits, CharUnits > getTypeInfoInChars(const Type *T) const
bool isUnnamedBitfield() const
Determines whether this is an unnamed bitfield.
std::string FloatABI
The ABI to use for passing floating point arguments.
field_iterator field_end() const
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions...
const ArrayType * getAsArrayTypeUnsafe() const
A variant of getAs<> for array types which silently discards qualifiers from the outermost type...
EnumDecl * getDecl() const
Represents a K&R-style 'int foo()' function, which has no information available about its arguments...
unsigned getNumRequiredArgs() const
static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, const llvm::DataLayout &TD)
ContainsFloatAtOffset - Return true if the specified LLVM IR type has a float member at the specified...
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource AlignSource=AlignmentSource::Type)
CodeGen::ABIArgInfo getNaturalAlignIndirectInReg(QualType Ty, bool Realign=false) const
static CharUnits One()
One - Construct a CharUnits quantity of one.
Represents a prototype with parameter type info, e.g.
bool isFloatingPoint() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
static bool extractFieldType(SmallVectorImpl< FieldEncoding > &FE, const RecordDecl *RD, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Helper function for appendRecordType().
const TargetInfo & getTarget() const
static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, llvm::Value *Array, llvm::Value *Value, unsigned FirstIndex, unsigned LastIndex)
static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context)
void setAddress(Address address)
ID
Defines the set of possible language-specific address spaces.
bool isRealFloatingType() const
Floating point categories.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Exposes information about the current target.
llvm::Value * getPointer() const
StringRef getName() const
Return the actual identifier string.
bool isAnyComplexType() const
llvm::LLVMContext & getVMContext() const
static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT)
Appends built-in types to Enc.
CharUnits getIndirectAlign() const
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
ASTContext & getContext() const
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
bool isFloatingType() const
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static bool getTypeString(SmallStringEnc &Enc, const Decl *D, CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
The XCore ABI includes a type information section that communicates symbol type information to the li...
llvm::LLVMContext & getLLVMContext()
llvm::IntegerType * Int32Ty
virtual void getDependentLibraryOption(llvm::StringRef Lib, llvm::SmallString< 24 > &Opt) const
Gets the linker options necessary to link a dependent library on this platform.
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues, like target-specific attributes, builtins and so on.
static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF, Address VAListAddr, llvm::Type *DirectTy, CharUnits DirectSize, CharUnits DirectAlign, CharUnits SlotSize, bool AllowHigherAlign)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
const IdentifierInfo * getBaseTypeIdentifier() const
Retrieves a pointer to the name of the base type.
Represents a GCC generic vector type.
ASTContext & getContext() const
Implements C++ ABI-specific semantic analysis functions.
unsigned getRegParm() const
The result type of a method or function.
RecordDecl * getDefinition() const
getDefinition - Returns the RecordDecl that actually defines this struct/union/class.
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
The l-value was considered opaque, so the alignment was determined from a type.
Pass it as a pointer to temporary memory.
static void appendQualifier(SmallStringEnc &Enc, QualType QT)
Appends type's qualifier to Enc.
static Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays)
isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
virtual bool shouldSignExtUnsignedType(QualType Ty) const
ASTContext & getContext() const
CharUnits getPointerAlign() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums...
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
bool isComplexType() const
isComplexType() does not include complex integers (a GCC extension).
bool isBuiltinType() const
Helper methods to distinguish type categories.
static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays)
isEmptyRecord - Return true iff a structure contains only empty fields.
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Appends a function encoding to Enc, calling appendType for the return type and the arguments...
bool isHomogeneousAggregate(QualType Ty, const Type *&Base, uint64_t &Members) const
isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous aggregate. ...
void setArgStruct(llvm::StructType *Ty, CharUnits Align)
const ConstantArrayType * getAsConstantArrayType(QualType T) const
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
const CodeGenOptions & getCodeGenOpts() const
const LangOptions & getLangOpts() const
bool operator<(DeclarationName LHS, DeclarationName RHS)
Ordering on two declaration names.
bool isVectorType() const
bool isPromotableIntegerType() const
More type predicates useful for type checking/promotion.
bool isMemberFunctionPointerType() const
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
X86AVXABILevel
The AVX ABI level for X86 targets.
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
static llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
QualType getPointeeType() const
bool isSRetAfterThis() const
CGFunctionInfo - Class to encapsulate the information about a function definition.
static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context)
canExpandIndirectArgument - Test whether an argument type which is to be passed indirectly (on the st...
CharUnits getAlignment() const
Return the alignment of this pointer.
This class organizes the cross-function state that is used while generating LLVM code.
bool canHaveCoerceToType() const
static const Type * getElementType(const Expr *BaseExpr)
bool isAggregateType() const
Determines whether the type is a C++ aggregate type or C aggregate or union type. ...
bool isZero() const
isZero - Test whether the quantity equals zero.
static ABIArgInfo getDirectInReg(llvm::Type *T=nullptr)
Address CreateMemTemp(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignment...
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset, const llvm::Twine &Name="")
static bool appendArrayType(SmallStringEnc &Enc, QualType QT, const ArrayType *AT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC, StringRef NoSizeEnc)
Appends array encoding to Enc before calling appendType for the element.
std::unique_ptr< DiagnosticConsumer > create(StringRef OutputFile, DiagnosticOptions *Diags, bool MergeChildRecords=false)
Returns a DiagnosticConsumer that serializes diagnostics to a bitcode file.
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::IntegerType * IntPtrTy
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
EnumDecl - Represents an enum.
detail::InMemoryDirectory::const_iterator E
for(auto typeArg:T->getTypeArgsAsWritten())
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
const llvm::Triple & getTriple() const
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext, providing only those that are of type SpecificDecl (or a class derived from it).
unsigned Map[Count]
The type of a lookup table which maps from language-specific address spaces to target-specific ones...
const RecordType * getAsStructureType() const
llvm::PointerType * getType() const
Return the type of the pointer value.
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Complex values, per C99 6.2.5p11.
const T * getAs() const
Member-template getAs<specific type>'.
QualType getCanonicalType() const
Pass it using the normal C aggregate rules for the ABI, potentially introducing extra copies and pass...
Address CreateConstArrayGEP(Address Addr, uint64_t Index, CharUnits EltSize, const llvm::Twine &Name="")
Given addr = [n x T]* ...
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
LanguageLinkage getLanguageLinkage() const
Compute the language linkage.
Implements C++ ABI-specific code generation functions.
This class organizes the cross-module state that is used while lowering AST types to LLVM types...
llvm::PointerType * Int8PtrTy
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
Expand - Only valid for aggregate argument types.
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
static bool isArgInAlloca(const ABIArgInfo &Info)
static ABIArgInfo getInAlloca(unsigned FieldIndex)
ABIArgInfo & getReturnInfo()
Pass it on the stack using its defined layout.
static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
Address CreateConstByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Represents a C++ struct/union/class.
BoundNodesTreeBuilder *const Builder
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block, taking care to avoid creation of branches from dummy blocks.
llvm::Type * ConvertType(QualType T)
CodeGen::CGCXXABI & getCXXABI() const
bool getHasRegParm() const
ArraySizeModifier getSizeModifier() const
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
This class is used for builtin types like 'int'.
static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, std::pair< CharUnits, CharUnits > ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
const_arg_iterator arg_begin() const
bool getIndirectByVal() const
CodeGen::ABIArgInfo getNaturalAlignIndirect(QualType Ty, bool ByRef=true, bool Realign=false, llvm::Type *Padding=nullptr) const
A convenience method to return an indirect ABIArgInfo with an expected alignment equal to the ABI ali...
static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC)
Appends a pointer encoding to Enc before calling appendType for the pointee.
QualType getElementType() const
llvm::StoreInst * CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr, CharUnits Align, bool IsVolatile=false)
bool getIndirectRealign() const
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
LValue - This represents an lvalue references.
void setInAllocaSRet(bool SRet)
const llvm::DataLayout & getDataLayout() const
EnumDecl * getDefinition() const
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char, signed char, short, int, long..], or an enum decl which has a signed representation.
bool isConstQualified() const
Determine whether this type is const-qualified.
RecordArgABI
Specify how one should pass an argument of a record type.
bool isNull() const
Return true if this QualType doesn't point to a type yet.
static bool isIntegerLikeType(QualType Ty, ASTContext &Context, llvm::LLVMContext &VMContext)
static bool isSSEVectorType(ASTContext &Context, QualType Ty)
CharUnits RoundUpToAlignment(const CharUnits &Align) const
RoundUpToAlignment - Returns the next integer (mod 2**64) that is greater than or equal to this quant...
CallArgList - Type for representing both the value and type of arguments in a call.
static bool PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address)
Represents the canonical version of C arrays with a specified constant size.
static ABIArgInfo getIndirect(CharUnits Alignment, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
Attr - This represents one attribute.
bool supportsCOMDAT() const
bool hasPointerRepresentation() const
Whether this type is represented natively as a pointer.
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
bool isPointerType() const