28#include "llvm/IR/IntrinsicsLoongArch.h"
37#define DEBUG_TYPE "loongarch-isel-lowering"
42 cl::desc(
"Trap on integer division by zero."),
54 if (Subtarget.hasBasicF())
56 if (Subtarget.hasBasicD())
60 MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32, MVT::v2f64};
62 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64, MVT::v8f32, MVT::v4f64};
64 if (Subtarget.hasExtLSX())
68 if (Subtarget.hasExtLASX())
69 for (
MVT VT : LASXVTs)
167 if (Subtarget.hasBasicF()) {
191 if (!Subtarget.hasBasicD()) {
202 if (Subtarget.hasBasicD()) {
231 if (Subtarget.hasExtLSX()) {
246 for (
MVT VT : LSXVTs) {
259 for (
MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) {
273 for (
MVT VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
275 for (
MVT VT : {MVT::v8i16, MVT::v4i32, MVT::v2i64})
277 for (
MVT VT : {MVT::v4i32, MVT::v2i64}) {
281 for (
MVT VT : {MVT::v4f32, MVT::v2f64}) {
300 if (Subtarget.hasExtLASX()) {
301 for (
MVT VT : LASXVTs) {
315 for (
MVT VT : {MVT::v4i64, MVT::v8i32, MVT::v16i16, MVT::v32i8}) {
329 for (
MVT VT : {MVT::v32i8, MVT::v16i16, MVT::v8i32})
331 for (
MVT VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64})
333 for (
MVT VT : {MVT::v8i32, MVT::v4i32, MVT::v4i64}) {
337 for (
MVT VT : {MVT::v8f32, MVT::v4f64}) {
358 if (Subtarget.hasExtLSX())
381 if (Subtarget.hasLAMCAS())
396 switch (
Op.getOpcode()) {
398 return lowerATOMIC_FENCE(
Op, DAG);
400 return lowerEH_DWARF_CFA(
Op, DAG);
402 return lowerGlobalAddress(
Op, DAG);
404 return lowerGlobalTLSAddress(
Op, DAG);
406 return lowerINTRINSIC_WO_CHAIN(
Op, DAG);
408 return lowerINTRINSIC_W_CHAIN(
Op, DAG);
410 return lowerINTRINSIC_VOID(
Op, DAG);
412 return lowerBlockAddress(
Op, DAG);
414 return lowerJumpTable(
Op, DAG);
416 return lowerShiftLeftParts(
Op, DAG);
418 return lowerShiftRightParts(
Op, DAG,
true);
420 return lowerShiftRightParts(
Op, DAG,
false);
422 return lowerConstantPool(
Op, DAG);
424 return lowerFP_TO_SINT(
Op, DAG);
426 return lowerBITCAST(
Op, DAG);
428 return lowerUINT_TO_FP(
Op, DAG);
430 return lowerSINT_TO_FP(
Op, DAG);
432 return lowerVASTART(
Op, DAG);
434 return lowerFRAMEADDR(
Op, DAG);
436 return lowerRETURNADDR(
Op, DAG);
438 return lowerWRITE_REGISTER(
Op, DAG);
440 return lowerINSERT_VECTOR_ELT(
Op, DAG);
442 return lowerEXTRACT_VECTOR_ELT(
Op, DAG);
444 return lowerBUILD_VECTOR(
Op, DAG);
446 return lowerVECTOR_SHUFFLE(
Op, DAG);
448 return lowerBITREVERSE(
Op, DAG);
455 EVT ResTy =
Op->getValueType(0);
466 for (
unsigned int i = 0; i < NewEltNum; i++) {
469 SDValue RevOp = DAG.
getNode((ResTy == MVT::v16i8 || ResTy == MVT::v32i8)
489 for (
unsigned int i = 0; i < NewEltNum; i++)
490 for (
int j = OrigEltNum / NewEltNum - 1; j >= 0; j--)
491 Mask.push_back(j + (OrigEltNum / NewEltNum) * i);
499template <
typename ValType>
502 unsigned CheckStride,
504 ValType ExpectedIndex,
unsigned ExpectedIndexStride) {
508 if (*
I != -1 && *
I != ExpectedIndex)
510 ExpectedIndex += ExpectedIndexStride;
514 for (
unsigned n = 0; n < CheckStride &&
I !=
End; ++n, ++
I)
533 for (
const auto &M : Mask) {
540 if (SplatIndex == -1)
543 assert(SplatIndex < (
int)Mask.size() &&
"Out of bounds mask index");
544 if (fitsRegularPattern<int>(Mask.begin(), 1, Mask.end(), SplatIndex, 0)) {
545 APInt Imm(64, SplatIndex);
579 int SubMask[4] = {-1, -1, -1, -1};
580 for (
unsigned i = 0; i < 4; ++i) {
581 for (
unsigned j = i; j < Mask.size(); j += 4) {
588 if (Idx < 0 || Idx >= 4)
594 if (SubMask[i] == -1)
598 else if (
Idx != -1 &&
Idx != SubMask[i])
605 for (
int i = 3; i >= 0; --i) {
606 int Idx = SubMask[i];
638 const auto &Begin = Mask.begin();
639 const auto &
End = Mask.end();
640 SDValue OriV1 = V1, OriV2 = V2;
642 if (fitsRegularPattern<int>(Begin, 2,
End, 0, 2))
644 else if (fitsRegularPattern<int>(Begin, 2,
End, Mask.size(), 2))
649 if (fitsRegularPattern<int>(Begin + 1, 2,
End, 0, 2))
651 else if (fitsRegularPattern<int>(Begin + 1, 2,
End, Mask.size(), 2))
678 const auto &Begin = Mask.begin();
679 const auto &
End = Mask.end();
680 SDValue OriV1 = V1, OriV2 = V2;
682 if (fitsRegularPattern<int>(Begin, 2,
End, 1, 2))
684 else if (fitsRegularPattern<int>(Begin, 2,
End, Mask.size() + 1, 2))
689 if (fitsRegularPattern<int>(Begin + 1, 2,
End, 1, 2))
691 else if (fitsRegularPattern<int>(Begin + 1, 2,
End, Mask.size() + 1, 2))
719 const auto &Begin = Mask.begin();
720 const auto &
End = Mask.end();
721 unsigned HalfSize = Mask.size() / 2;
722 SDValue OriV1 = V1, OriV2 = V2;
724 if (fitsRegularPattern<int>(Begin, 2,
End, HalfSize, 1))
726 else if (fitsRegularPattern<int>(Begin, 2,
End, Mask.size() + HalfSize, 1))
731 if (fitsRegularPattern<int>(Begin + 1, 2,
End, HalfSize, 1))
733 else if (fitsRegularPattern<int>(Begin + 1, 2,
End, Mask.size() + HalfSize,
762 const auto &Begin = Mask.begin();
763 const auto &
End = Mask.end();
764 SDValue OriV1 = V1, OriV2 = V2;
766 if (fitsRegularPattern<int>(Begin, 2,
End, 0, 1))
768 else if (fitsRegularPattern<int>(Begin, 2,
End, Mask.size(), 1))
773 if (fitsRegularPattern<int>(Begin + 1, 2,
End, 0, 1))
775 else if (fitsRegularPattern<int>(Begin + 1, 2,
End, Mask.size(), 1))
802 const auto &Begin = Mask.begin();
803 const auto &Mid = Mask.begin() + Mask.size() / 2;
804 const auto &
End = Mask.end();
805 SDValue OriV1 = V1, OriV2 = V2;
807 if (fitsRegularPattern<int>(Begin, 1, Mid, 0, 2))
809 else if (fitsRegularPattern<int>(Begin, 1, Mid, Mask.size(), 2))
814 if (fitsRegularPattern<int>(Mid, 1,
End, 0, 2))
816 else if (fitsRegularPattern<int>(Mid, 1,
End, Mask.size(), 2))
844 const auto &Begin = Mask.begin();
845 const auto &Mid = Mask.begin() + Mask.size() / 2;
846 const auto &
End = Mask.end();
847 SDValue OriV1 = V1, OriV2 = V2;
849 if (fitsRegularPattern<int>(Begin, 1, Mid, 1, 2))
851 else if (fitsRegularPattern<int>(Begin, 1, Mid, Mask.size() + 1, 2))
856 if (fitsRegularPattern<int>(Mid, 1,
End, 1, 2))
858 else if (fitsRegularPattern<int>(Mid, 1,
End, Mask.size() + 1, 2))
900 "Vector type is unsupported for lsx!");
902 "Two operands have different types!");
904 "Unexpected mask size for shuffle!");
905 assert(Mask.size() % 2 == 0 &&
"Expected even mask size.");
953 for (
const auto &M : Mask) {
960 if (SplatIndex == -1)
963 const auto &Begin = Mask.begin();
964 const auto &
End = Mask.end();
965 unsigned HalfSize = Mask.size() / 2;
967 assert(SplatIndex < (
int)Mask.size() &&
"Out of bounds mask index");
968 if (fitsRegularPattern<int>(Begin, 1,
End - HalfSize, SplatIndex, 0) &&
969 fitsRegularPattern<int>(Begin + HalfSize, 1,
End, SplatIndex + HalfSize,
971 APInt Imm(64, SplatIndex);
985 if (Mask.size() <= 4)
1009 const auto &Begin = Mask.begin();
1010 const auto &
End = Mask.end();
1011 unsigned HalfSize = Mask.size() / 2;
1012 unsigned LeftSize = HalfSize / 2;
1013 SDValue OriV1 = V1, OriV2 = V2;
1015 if (fitsRegularPattern<int>(Begin, 2,
End - HalfSize, HalfSize - LeftSize,
1017 fitsRegularPattern<int>(Begin + HalfSize, 2,
End, HalfSize + LeftSize, 1))
1019 else if (fitsRegularPattern<int>(Begin, 2,
End - HalfSize,
1020 Mask.size() + HalfSize - LeftSize, 1) &&
1021 fitsRegularPattern<int>(Begin + HalfSize, 2,
End,
1022 Mask.size() + HalfSize + LeftSize, 1))
1027 if (fitsRegularPattern<int>(Begin + 1, 2,
End - HalfSize, HalfSize - LeftSize,
1029 fitsRegularPattern<int>(Begin + 1 + HalfSize, 2,
End, HalfSize + LeftSize,
1032 else if (fitsRegularPattern<int>(Begin + 1, 2,
End - HalfSize,
1033 Mask.size() + HalfSize - LeftSize, 1) &&
1034 fitsRegularPattern<int>(Begin + 1 + HalfSize, 2,
End,
1035 Mask.size() + HalfSize + LeftSize, 1))
1048 const auto &Begin = Mask.begin();
1049 const auto &
End = Mask.end();
1050 unsigned HalfSize = Mask.size() / 2;
1051 SDValue OriV1 = V1, OriV2 = V2;
1053 if (fitsRegularPattern<int>(Begin, 2,
End - HalfSize, 0, 1) &&
1054 fitsRegularPattern<int>(Begin + HalfSize, 2,
End, HalfSize, 1))
1056 else if (fitsRegularPattern<int>(Begin, 2,
End - HalfSize, Mask.size(), 1) &&
1057 fitsRegularPattern<int>(Begin + HalfSize, 2,
End,
1058 Mask.size() + HalfSize, 1))
1063 if (fitsRegularPattern<int>(Begin + 1, 2,
End - HalfSize, 0, 1) &&
1064 fitsRegularPattern<int>(Begin + 1 + HalfSize, 2,
End, HalfSize, 1))
1066 else if (fitsRegularPattern<int>(Begin + 1, 2,
End - HalfSize, Mask.size(),
1068 fitsRegularPattern<int>(Begin + 1 + HalfSize, 2,
End,
1069 Mask.size() + HalfSize, 1))
1082 const auto &Begin = Mask.begin();
1083 const auto &LeftMid = Mask.begin() + Mask.size() / 4;
1084 const auto &Mid = Mask.begin() + Mask.size() / 2;
1085 const auto &RightMid = Mask.end() - Mask.size() / 4;
1086 const auto &
End = Mask.end();
1087 unsigned HalfSize = Mask.size() / 2;
1088 SDValue OriV1 = V1, OriV2 = V2;
1090 if (fitsRegularPattern<int>(Begin, 1, LeftMid, 0, 2) &&
1091 fitsRegularPattern<int>(Mid, 1, RightMid, HalfSize, 2))
1093 else if (fitsRegularPattern<int>(Begin, 1, LeftMid, Mask.size(), 2) &&
1094 fitsRegularPattern<int>(Mid, 1, RightMid, Mask.size() + HalfSize, 2))
1099 if (fitsRegularPattern<int>(LeftMid, 1, Mid, 0, 2) &&
1100 fitsRegularPattern<int>(RightMid, 1,
End, HalfSize, 2))
1102 else if (fitsRegularPattern<int>(LeftMid, 1, Mid, Mask.size(), 2) &&
1103 fitsRegularPattern<int>(RightMid, 1,
End, Mask.size() + HalfSize, 2))
1117 const auto &Begin = Mask.begin();
1118 const auto &LeftMid = Mask.begin() + Mask.size() / 4;
1119 const auto &Mid = Mask.begin() + Mask.size() / 2;
1120 const auto &RightMid = Mask.end() - Mask.size() / 4;
1121 const auto &
End = Mask.end();
1122 unsigned HalfSize = Mask.size() / 2;
1123 SDValue OriV1 = V1, OriV2 = V2;
1125 if (fitsRegularPattern<int>(Begin, 1, LeftMid, 1, 2) &&
1126 fitsRegularPattern<int>(Mid, 1, RightMid, HalfSize + 1, 2))
1128 else if (fitsRegularPattern<int>(Begin, 1, LeftMid, Mask.size() + 1, 2) &&
1129 fitsRegularPattern<int>(Mid, 1, RightMid, Mask.size() + HalfSize + 1,
1135 if (fitsRegularPattern<int>(LeftMid, 1, Mid, 1, 2) &&
1136 fitsRegularPattern<int>(RightMid, 1,
End, HalfSize + 1, 2))
1138 else if (fitsRegularPattern<int>(LeftMid, 1, Mid, Mask.size() + 1, 2) &&
1139 fitsRegularPattern<int>(RightMid, 1,
End, Mask.size() + HalfSize + 1,
1153 int MaskSize = Mask.size();
1154 int HalfSize = Mask.size() / 2;
1155 const auto &Begin = Mask.begin();
1156 const auto &Mid = Mask.begin() + HalfSize;
1157 const auto &
End = Mask.end();
1169 for (
auto it = Begin; it < Mid; it++) {
1172 else if ((*it >= 0 && *it < HalfSize) ||
1173 (*it >= MaskSize && *it <= MaskSize + HalfSize)) {
1174 int M = *it < HalfSize ? *it : *it - HalfSize;
1179 assert((
int)MaskAlloc.
size() == HalfSize &&
"xvshuf convert failed!");
1181 for (
auto it = Mid; it <
End; it++) {
1184 else if ((*it >= HalfSize && *it < MaskSize) ||
1185 (*it >= MaskSize + HalfSize && *it < MaskSize * 2)) {
1186 int M = *it < MaskSize ? *it - HalfSize : *it - MaskSize;
1191 assert((
int)MaskAlloc.
size() == MaskSize &&
"xvshuf convert failed!");
1222 enum HalfMaskType { HighLaneTy, LowLaneTy,
None };
1224 int MaskSize = Mask.size();
1225 int HalfSize = Mask.size() / 2;
1227 HalfMaskType preMask =
None, postMask =
None;
1229 if (std::all_of(Mask.begin(), Mask.begin() + HalfSize, [&](
int M) {
1230 return M < 0 || (M >= 0 && M < HalfSize) ||
1231 (M >= MaskSize && M < MaskSize + HalfSize);
1233 preMask = HighLaneTy;
1234 else if (std::all_of(Mask.begin(), Mask.begin() + HalfSize, [&](
int M) {
1235 return M < 0 || (M >= HalfSize && M < MaskSize) ||
1236 (M >= MaskSize + HalfSize && M < MaskSize * 2);
1238 preMask = LowLaneTy;
1240 if (std::all_of(Mask.begin() + HalfSize, Mask.end(), [&](
int M) {
1241 return M < 0 || (M >= 0 && M < HalfSize) ||
1242 (M >= MaskSize && M < MaskSize + HalfSize);
1244 postMask = HighLaneTy;
1245 else if (std::all_of(Mask.begin() + HalfSize, Mask.end(), [&](
int M) {
1246 return M < 0 || (M >= HalfSize && M < MaskSize) ||
1247 (M >= MaskSize + HalfSize && M < MaskSize * 2);
1249 postMask = LowLaneTy;
1257 if (preMask == HighLaneTy && postMask == LowLaneTy) {
1260 if (preMask == LowLaneTy && postMask == HighLaneTy) {
1266 if (!V2.isUndef()) {
1273 for (
auto it = Mask.begin(); it < Mask.begin() + HalfSize; it++) {
1274 *it = *it < 0 ? *it : *it - HalfSize;
1276 for (
auto it = Mask.begin() + HalfSize; it < Mask.end(); it++) {
1277 *it = *it < 0 ? *it : *it + HalfSize;
1279 }
else if (preMask == LowLaneTy && postMask == LowLaneTy) {
1285 if (!V2.isUndef()) {
1292 for (
auto it = Mask.begin(); it < Mask.begin() + HalfSize; it++) {
1293 *it = *it < 0 ? *it : *it - HalfSize;
1295 }
else if (preMask == HighLaneTy && postMask == HighLaneTy) {
1301 if (!V2.isUndef()) {
1308 for (
auto it = Mask.begin() + HalfSize; it < Mask.end(); it++) {
1309 *it = *it < 0 ? *it : *it + HalfSize;
1325 "Vector type is unsupported for lasx!");
1327 "Two operands have different types!");
1329 "Unexpected mask size for shuffle!");
1330 assert(Mask.size() % 2 == 0 &&
"Expected even mask size.");
1331 assert(Mask.size() >= 4 &&
"Mask size is less than 4.");
1376 MVT VT =
Op.getSimpleValueType();
1380 bool V1IsUndef = V1.
isUndef();
1381 bool V2IsUndef =
V2.isUndef();
1382 if (V1IsUndef && V2IsUndef)
1395 any_of(OrigMask, [NumElements](
int M) {
return M >= NumElements; })) {
1397 for (
int &M : NewMask)
1398 if (M >= NumElements)
1404 int MaskUpperLimit = OrigMask.
size() * (V2IsUndef ? 1 : 2);
1405 (void)MaskUpperLimit;
1407 [&](
int M) {
return -1 <=
M &&
M < MaskUpperLimit; }) &&
1408 "Out of bounds shuffle index");
1423 if (isa<ConstantSDNode>(
Op))
1425 if (isa<ConstantFPSDNode>(
Op))
1440 EVT ResTy =
Op->getValueType(0);
1442 APInt SplatValue, SplatUndef;
1443 unsigned SplatBitSize;
1448 if ((!Subtarget.hasExtLSX() || !Is128Vec) &&
1449 (!Subtarget.hasExtLASX() || !Is256Vec))
1452 if (
Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs,
1454 SplatBitSize <= 64) {
1456 if (SplatBitSize != 8 && SplatBitSize != 16 && SplatBitSize != 32 &&
1462 switch (SplatBitSize) {
1466 ViaVecTy = Is128Vec ? MVT::v16i8 : MVT::v32i8;
1469 ViaVecTy = Is128Vec ? MVT::v8i16 : MVT::v16i16;
1472 ViaVecTy = Is128Vec ? MVT::v4i32 : MVT::v8i32;
1475 ViaVecTy = Is128Vec ? MVT::v2i64 : MVT::v4i64;
1483 if (ViaVecTy != ResTy)
1496 EVT ResTy =
Node->getValueType(0);
1502 for (
unsigned i = 0; i < NumElts; ++i) {
1504 Node->getOperand(i),
1514LoongArchTargetLowering::lowerEXTRACT_VECTOR_ELT(
SDValue Op,
1516 EVT VecTy =
Op->getOperand(0)->getValueType(0);
1521 if (isa<ConstantSDNode>(
Idx) &&
1522 (EltTy == MVT::i32 || EltTy == MVT::i64 || EltTy == MVT::f32 ||
1523 EltTy == MVT::f64 ||
Idx->getAsZExtVal() < NumElts / 2))
1530LoongArchTargetLowering::lowerINSERT_VECTOR_ELT(
SDValue Op,
1532 if (isa<ConstantSDNode>(
Op->getOperand(2)))
1556 if (Subtarget.
is64Bit() &&
Op.getOperand(2).getValueType() == MVT::i32) {
1558 "On LA64, only 64-bit registers can be written.");
1559 return Op.getOperand(0);
1562 if (!Subtarget.
is64Bit() &&
Op.getOperand(2).getValueType() == MVT::i64) {
1564 "On LA32, only 32-bit registers can be written.");
1565 return Op.getOperand(0);
1573 if (!isa<ConstantSDNode>(
Op.getOperand(0))) {
1575 "be a constant integer");
1582 EVT VT =
Op.getValueType();
1585 unsigned Depth =
Op.getConstantOperandVal(0);
1586 int GRLenInBytes = Subtarget.
getGRLen() / 8;
1589 int Offset = -(GRLenInBytes * 2);
1604 if (
Op.getConstantOperandVal(0) != 0) {
1606 "return address can only be determined for the current frame");
1640 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
1648 !Subtarget.hasBasicD() &&
"unexpected target features");
1653 auto *
C = dyn_cast<ConstantSDNode>(Op0.
getOperand(1));
1654 if (
C &&
C->getZExtValue() < UINT64_C(0xFFFFFFFF))
1664 dyn_cast<VTSDNode>(Op0.
getOperand(1))->getVT().bitsLT(MVT::i32))
1668 EVT RetVT =
Op.getValueType();
1670 MakeLibCallOptions CallOptions;
1671 CallOptions.setTypeListBeforeSoften(OpVT, RetVT,
true);
1674 std::tie(Result, Chain) =
1682 !Subtarget.hasBasicD() &&
"unexpected target features");
1689 dyn_cast<VTSDNode>(Op0.
getOperand(1))->getVT().bitsLE(MVT::i32))
1693 EVT RetVT =
Op.getValueType();
1695 MakeLibCallOptions CallOptions;
1696 CallOptions.setTypeListBeforeSoften(OpVT, RetVT,
true);
1699 std::tie(Result, Chain) =
1710 if (
Op.getValueType() == MVT::f32 && Op0.
getValueType() == MVT::i32 &&
1711 Subtarget.
is64Bit() && Subtarget.hasBasicF()) {
1727 if (
Op.getValueSizeInBits() > 32 && Subtarget.hasBasicF() &&
1728 !Subtarget.hasBasicD()) {
1752 N->getOffset(), Flags);
1760template <
class NodeTy>
1763 bool IsLocal)
const {
1774 assert(Subtarget.
is64Bit() &&
"Large code model requires LA64");
1826 return getAddr(cast<BlockAddressSDNode>(
Op), DAG,
1832 return getAddr(cast<JumpTableSDNode>(
Op), DAG,
1838 return getAddr(cast<ConstantPoolSDNode>(
Op), DAG,
1845 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
1849 if (GV->
isDSOLocal() && isa<GlobalVariable>(GV)) {
1850 if (
auto GCM = dyn_cast<GlobalVariable>(GV)->
getCodeModel())
1859 unsigned Opc,
bool UseGOT,
1910 Args.push_back(Entry);
1942LoongArchTargetLowering::lowerGlobalTLSAddress(
SDValue Op,
1949 assert((!Large || Subtarget.
is64Bit()) &&
"Large code model requires LA64");
1952 assert(
N->getOffset() == 0 &&
"unexpected offset in global node");
1966 return getDynamicTLSAddr(
N, DAG,
1967 Large ? LoongArch::PseudoLA_TLS_GD_LARGE
1968 : LoongArch::PseudoLA_TLS_GD,
1975 return getDynamicTLSAddr(
N, DAG,
1976 Large ? LoongArch::PseudoLA_TLS_LD_LARGE
1977 : LoongArch::PseudoLA_TLS_LD,
1982 return getStaticTLSAddr(
N, DAG,
1983 Large ? LoongArch::PseudoLA_TLS_IE_LARGE
1984 : LoongArch::PseudoLA_TLS_IE,
1991 return getStaticTLSAddr(
N, DAG, LoongArch::PseudoLA_TLS_LE,
1995 return getTLSDescAddr(
N, DAG,
1996 Large ? LoongArch::PseudoLA_TLS_DESC_LARGE
1997 : LoongArch::PseudoLA_TLS_DESC,
2001template <
unsigned N>
2004 auto *CImm = cast<ConstantSDNode>(
Op->getOperand(ImmOp));
2006 if ((IsSigned && !isInt<N>(CImm->getSExtValue())) ||
2007 (!IsSigned && !isUInt<N>(CImm->getZExtValue()))) {
2009 ": argument out of range.");
2016LoongArchTargetLowering::lowerINTRINSIC_WO_CHAIN(
SDValue Op,
2019 switch (
Op.getConstantOperandVal(0)) {
2022 case Intrinsic::thread_pointer: {
2026 case Intrinsic::loongarch_lsx_vpickve2gr_d:
2027 case Intrinsic::loongarch_lsx_vpickve2gr_du:
2028 case Intrinsic::loongarch_lsx_vreplvei_d:
2029 case Intrinsic::loongarch_lasx_xvrepl128vei_d:
2030 return checkIntrinsicImmArg<1>(
Op, 2, DAG);
2031 case Intrinsic::loongarch_lsx_vreplvei_w:
2032 case Intrinsic::loongarch_lasx_xvrepl128vei_w:
2033 case Intrinsic::loongarch_lasx_xvpickve2gr_d:
2034 case Intrinsic::loongarch_lasx_xvpickve2gr_du:
2035 case Intrinsic::loongarch_lasx_xvpickve_d:
2036 case Intrinsic::loongarch_lasx_xvpickve_d_f:
2037 return checkIntrinsicImmArg<2>(
Op, 2, DAG);
2038 case Intrinsic::loongarch_lasx_xvinsve0_d:
2039 return checkIntrinsicImmArg<2>(
Op, 3, DAG);
2040 case Intrinsic::loongarch_lsx_vsat_b:
2041 case Intrinsic::loongarch_lsx_vsat_bu:
2042 case Intrinsic::loongarch_lsx_vrotri_b:
2043 case Intrinsic::loongarch_lsx_vsllwil_h_b:
2044 case Intrinsic::loongarch_lsx_vsllwil_hu_bu:
2045 case Intrinsic::loongarch_lsx_vsrlri_b:
2046 case Intrinsic::loongarch_lsx_vsrari_b:
2047 case Intrinsic::loongarch_lsx_vreplvei_h:
2048 case Intrinsic::loongarch_lasx_xvsat_b:
2049 case Intrinsic::loongarch_lasx_xvsat_bu:
2050 case Intrinsic::loongarch_lasx_xvrotri_b:
2051 case Intrinsic::loongarch_lasx_xvsllwil_h_b:
2052 case Intrinsic::loongarch_lasx_xvsllwil_hu_bu:
2053 case Intrinsic::loongarch_lasx_xvsrlri_b:
2054 case Intrinsic::loongarch_lasx_xvsrari_b:
2055 case Intrinsic::loongarch_lasx_xvrepl128vei_h:
2056 case Intrinsic::loongarch_lasx_xvpickve_w:
2057 case Intrinsic::loongarch_lasx_xvpickve_w_f:
2058 return checkIntrinsicImmArg<3>(
Op, 2, DAG);
2059 case Intrinsic::loongarch_lasx_xvinsve0_w:
2060 return checkIntrinsicImmArg<3>(
Op, 3, DAG);
2061 case Intrinsic::loongarch_lsx_vsat_h:
2062 case Intrinsic::loongarch_lsx_vsat_hu:
2063 case Intrinsic::loongarch_lsx_vrotri_h:
2064 case Intrinsic::loongarch_lsx_vsllwil_w_h:
2065 case Intrinsic::loongarch_lsx_vsllwil_wu_hu:
2066 case Intrinsic::loongarch_lsx_vsrlri_h:
2067 case Intrinsic::loongarch_lsx_vsrari_h:
2068 case Intrinsic::loongarch_lsx_vreplvei_b:
2069 case Intrinsic::loongarch_lasx_xvsat_h:
2070 case Intrinsic::loongarch_lasx_xvsat_hu:
2071 case Intrinsic::loongarch_lasx_xvrotri_h:
2072 case Intrinsic::loongarch_lasx_xvsllwil_w_h:
2073 case Intrinsic::loongarch_lasx_xvsllwil_wu_hu:
2074 case Intrinsic::loongarch_lasx_xvsrlri_h:
2075 case Intrinsic::loongarch_lasx_xvsrari_h:
2076 case Intrinsic::loongarch_lasx_xvrepl128vei_b:
2077 return checkIntrinsicImmArg<4>(
Op, 2, DAG);
2078 case Intrinsic::loongarch_lsx_vsrlni_b_h:
2079 case Intrinsic::loongarch_lsx_vsrani_b_h:
2080 case Intrinsic::loongarch_lsx_vsrlrni_b_h:
2081 case Intrinsic::loongarch_lsx_vsrarni_b_h:
2082 case Intrinsic::loongarch_lsx_vssrlni_b_h:
2083 case Intrinsic::loongarch_lsx_vssrani_b_h:
2084 case Intrinsic::loongarch_lsx_vssrlni_bu_h:
2085 case Intrinsic::loongarch_lsx_vssrani_bu_h:
2086 case Intrinsic::loongarch_lsx_vssrlrni_b_h:
2087 case Intrinsic::loongarch_lsx_vssrarni_b_h:
2088 case Intrinsic::loongarch_lsx_vssrlrni_bu_h:
2089 case Intrinsic::loongarch_lsx_vssrarni_bu_h:
2090 case Intrinsic::loongarch_lasx_xvsrlni_b_h:
2091 case Intrinsic::loongarch_lasx_xvsrani_b_h:
2092 case Intrinsic::loongarch_lasx_xvsrlrni_b_h:
2093 case Intrinsic::loongarch_lasx_xvsrarni_b_h:
2094 case Intrinsic::loongarch_lasx_xvssrlni_b_h:
2095 case Intrinsic::loongarch_lasx_xvssrani_b_h:
2096 case Intrinsic::loongarch_lasx_xvssrlni_bu_h:
2097 case Intrinsic::loongarch_lasx_xvssrani_bu_h:
2098 case Intrinsic::loongarch_lasx_xvssrlrni_b_h:
2099 case Intrinsic::loongarch_lasx_xvssrarni_b_h:
2100 case Intrinsic::loongarch_lasx_xvssrlrni_bu_h:
2101 case Intrinsic::loongarch_lasx_xvssrarni_bu_h:
2102 return checkIntrinsicImmArg<4>(
Op, 3, DAG);
2103 case Intrinsic::loongarch_lsx_vsat_w:
2104 case Intrinsic::loongarch_lsx_vsat_wu:
2105 case Intrinsic::loongarch_lsx_vrotri_w:
2106 case Intrinsic::loongarch_lsx_vsllwil_d_w:
2107 case Intrinsic::loongarch_lsx_vsllwil_du_wu:
2108 case Intrinsic::loongarch_lsx_vsrlri_w:
2109 case Intrinsic::loongarch_lsx_vsrari_w:
2110 case Intrinsic::loongarch_lsx_vslei_bu:
2111 case Intrinsic::loongarch_lsx_vslei_hu:
2112 case Intrinsic::loongarch_lsx_vslei_wu:
2113 case Intrinsic::loongarch_lsx_vslei_du:
2114 case Intrinsic::loongarch_lsx_vslti_bu:
2115 case Intrinsic::loongarch_lsx_vslti_hu:
2116 case Intrinsic::loongarch_lsx_vslti_wu:
2117 case Intrinsic::loongarch_lsx_vslti_du:
2118 case Intrinsic::loongarch_lsx_vbsll_v:
2119 case Intrinsic::loongarch_lsx_vbsrl_v:
2120 case Intrinsic::loongarch_lasx_xvsat_w:
2121 case Intrinsic::loongarch_lasx_xvsat_wu:
2122 case Intrinsic::loongarch_lasx_xvrotri_w:
2123 case Intrinsic::loongarch_lasx_xvsllwil_d_w:
2124 case Intrinsic::loongarch_lasx_xvsllwil_du_wu:
2125 case Intrinsic::loongarch_lasx_xvsrlri_w:
2126 case Intrinsic::loongarch_lasx_xvsrari_w:
2127 case Intrinsic::loongarch_lasx_xvslei_bu:
2128 case Intrinsic::loongarch_lasx_xvslei_hu:
2129 case Intrinsic::loongarch_lasx_xvslei_wu:
2130 case Intrinsic::loongarch_lasx_xvslei_du:
2131 case Intrinsic::loongarch_lasx_xvslti_bu:
2132 case Intrinsic::loongarch_lasx_xvslti_hu:
2133 case Intrinsic::loongarch_lasx_xvslti_wu:
2134 case Intrinsic::loongarch_lasx_xvslti_du:
2135 case Intrinsic::loongarch_lasx_xvbsll_v:
2136 case Intrinsic::loongarch_lasx_xvbsrl_v:
2137 return checkIntrinsicImmArg<5>(
Op, 2, DAG);
2138 case Intrinsic::loongarch_lsx_vseqi_b:
2139 case Intrinsic::loongarch_lsx_vseqi_h:
2140 case Intrinsic::loongarch_lsx_vseqi_w:
2141 case Intrinsic::loongarch_lsx_vseqi_d:
2142 case Intrinsic::loongarch_lsx_vslei_b:
2143 case Intrinsic::loongarch_lsx_vslei_h:
2144 case Intrinsic::loongarch_lsx_vslei_w:
2145 case Intrinsic::loongarch_lsx_vslei_d:
2146 case Intrinsic::loongarch_lsx_vslti_b:
2147 case Intrinsic::loongarch_lsx_vslti_h:
2148 case Intrinsic::loongarch_lsx_vslti_w:
2149 case Intrinsic::loongarch_lsx_vslti_d:
2150 case Intrinsic::loongarch_lasx_xvseqi_b:
2151 case Intrinsic::loongarch_lasx_xvseqi_h:
2152 case Intrinsic::loongarch_lasx_xvseqi_w:
2153 case Intrinsic::loongarch_lasx_xvseqi_d:
2154 case Intrinsic::loongarch_lasx_xvslei_b:
2155 case Intrinsic::loongarch_lasx_xvslei_h:
2156 case Intrinsic::loongarch_lasx_xvslei_w:
2157 case Intrinsic::loongarch_lasx_xvslei_d:
2158 case Intrinsic::loongarch_lasx_xvslti_b:
2159 case Intrinsic::loongarch_lasx_xvslti_h:
2160 case Intrinsic::loongarch_lasx_xvslti_w:
2161 case Intrinsic::loongarch_lasx_xvslti_d:
2162 return checkIntrinsicImmArg<5>(
Op, 2, DAG,
true);
2163 case Intrinsic::loongarch_lsx_vsrlni_h_w:
2164 case Intrinsic::loongarch_lsx_vsrani_h_w:
2165 case Intrinsic::loongarch_lsx_vsrlrni_h_w:
2166 case Intrinsic::loongarch_lsx_vsrarni_h_w:
2167 case Intrinsic::loongarch_lsx_vssrlni_h_w:
2168 case Intrinsic::loongarch_lsx_vssrani_h_w:
2169 case Intrinsic::loongarch_lsx_vssrlni_hu_w:
2170 case Intrinsic::loongarch_lsx_vssrani_hu_w:
2171 case Intrinsic::loongarch_lsx_vssrlrni_h_w:
2172 case Intrinsic::loongarch_lsx_vssrarni_h_w:
2173 case Intrinsic::loongarch_lsx_vssrlrni_hu_w:
2174 case Intrinsic::loongarch_lsx_vssrarni_hu_w:
2175 case Intrinsic::loongarch_lsx_vfrstpi_b:
2176 case Intrinsic::loongarch_lsx_vfrstpi_h:
2177 case Intrinsic::loongarch_lasx_xvsrlni_h_w:
2178 case Intrinsic::loongarch_lasx_xvsrani_h_w:
2179 case Intrinsic::loongarch_lasx_xvsrlrni_h_w:
2180 case Intrinsic::loongarch_lasx_xvsrarni_h_w:
2181 case Intrinsic::loongarch_lasx_xvssrlni_h_w:
2182 case Intrinsic::loongarch_lasx_xvssrani_h_w:
2183 case Intrinsic::loongarch_lasx_xvssrlni_hu_w:
2184 case Intrinsic::loongarch_lasx_xvssrani_hu_w:
2185 case Intrinsic::loongarch_lasx_xvssrlrni_h_w:
2186 case Intrinsic::loongarch_lasx_xvssrarni_h_w:
2187 case Intrinsic::loongarch_lasx_xvssrlrni_hu_w:
2188 case Intrinsic::loongarch_lasx_xvssrarni_hu_w:
2189 case Intrinsic::loongarch_lasx_xvfrstpi_b:
2190 case Intrinsic::loongarch_lasx_xvfrstpi_h:
2191 return checkIntrinsicImmArg<5>(
Op, 3, DAG);
2192 case Intrinsic::loongarch_lsx_vsat_d:
2193 case Intrinsic::loongarch_lsx_vsat_du:
2194 case Intrinsic::loongarch_lsx_vrotri_d:
2195 case Intrinsic::loongarch_lsx_vsrlri_d:
2196 case Intrinsic::loongarch_lsx_vsrari_d:
2197 case Intrinsic::loongarch_lasx_xvsat_d:
2198 case Intrinsic::loongarch_lasx_xvsat_du:
2199 case Intrinsic::loongarch_lasx_xvrotri_d:
2200 case Intrinsic::loongarch_lasx_xvsrlri_d:
2201 case Intrinsic::loongarch_lasx_xvsrari_d:
2202 return checkIntrinsicImmArg<6>(
Op, 2, DAG);
2203 case Intrinsic::loongarch_lsx_vsrlni_w_d:
2204 case Intrinsic::loongarch_lsx_vsrani_w_d:
2205 case Intrinsic::loongarch_lsx_vsrlrni_w_d:
2206 case Intrinsic::loongarch_lsx_vsrarni_w_d:
2207 case Intrinsic::loongarch_lsx_vssrlni_w_d:
2208 case Intrinsic::loongarch_lsx_vssrani_w_d:
2209 case Intrinsic::loongarch_lsx_vssrlni_wu_d:
2210 case Intrinsic::loongarch_lsx_vssrani_wu_d:
2211 case Intrinsic::loongarch_lsx_vssrlrni_w_d:
2212 case Intrinsic::loongarch_lsx_vssrarni_w_d:
2213 case Intrinsic::loongarch_lsx_vssrlrni_wu_d:
2214 case Intrinsic::loongarch_lsx_vssrarni_wu_d:
2215 case Intrinsic::loongarch_lasx_xvsrlni_w_d:
2216 case Intrinsic::loongarch_lasx_xvsrani_w_d:
2217 case Intrinsic::loongarch_lasx_xvsrlrni_w_d:
2218 case Intrinsic::loongarch_lasx_xvsrarni_w_d:
2219 case Intrinsic::loongarch_lasx_xvssrlni_w_d:
2220 case Intrinsic::loongarch_lasx_xvssrani_w_d:
2221 case Intrinsic::loongarch_lasx_xvssrlni_wu_d:
2222 case Intrinsic::loongarch_lasx_xvssrani_wu_d:
2223 case Intrinsic::loongarch_lasx_xvssrlrni_w_d:
2224 case Intrinsic::loongarch_lasx_xvssrarni_w_d:
2225 case Intrinsic::loongarch_lasx_xvssrlrni_wu_d:
2226 case Intrinsic::loongarch_lasx_xvssrarni_wu_d:
2227 return checkIntrinsicImmArg<6>(
Op, 3, DAG);
2228 case Intrinsic::loongarch_lsx_vsrlni_d_q:
2229 case Intrinsic::loongarch_lsx_vsrani_d_q:
2230 case Intrinsic::loongarch_lsx_vsrlrni_d_q:
2231 case Intrinsic::loongarch_lsx_vsrarni_d_q:
2232 case Intrinsic::loongarch_lsx_vssrlni_d_q:
2233 case Intrinsic::loongarch_lsx_vssrani_d_q:
2234 case Intrinsic::loongarch_lsx_vssrlni_du_q:
2235 case Intrinsic::loongarch_lsx_vssrani_du_q:
2236 case Intrinsic::loongarch_lsx_vssrlrni_d_q:
2237 case Intrinsic::loongarch_lsx_vssrarni_d_q:
2238 case Intrinsic::loongarch_lsx_vssrlrni_du_q:
2239 case Intrinsic::loongarch_lsx_vssrarni_du_q:
2240 case Intrinsic::loongarch_lasx_xvsrlni_d_q:
2241 case Intrinsic::loongarch_lasx_xvsrani_d_q:
2242 case Intrinsic::loongarch_lasx_xvsrlrni_d_q:
2243 case Intrinsic::loongarch_lasx_xvsrarni_d_q:
2244 case Intrinsic::loongarch_lasx_xvssrlni_d_q:
2245 case Intrinsic::loongarch_lasx_xvssrani_d_q:
2246 case Intrinsic::loongarch_lasx_xvssrlni_du_q:
2247 case Intrinsic::loongarch_lasx_xvssrani_du_q:
2248 case Intrinsic::loongarch_lasx_xvssrlrni_d_q:
2249 case Intrinsic::loongarch_lasx_xvssrarni_d_q:
2250 case Intrinsic::loongarch_lasx_xvssrlrni_du_q:
2251 case Intrinsic::loongarch_lasx_xvssrarni_du_q:
2252 return checkIntrinsicImmArg<7>(
Op, 3, DAG);
2253 case Intrinsic::loongarch_lsx_vnori_b:
2254 case Intrinsic::loongarch_lsx_vshuf4i_b:
2255 case Intrinsic::loongarch_lsx_vshuf4i_h:
2256 case Intrinsic::loongarch_lsx_vshuf4i_w:
2257 case Intrinsic::loongarch_lasx_xvnori_b:
2258 case Intrinsic::loongarch_lasx_xvshuf4i_b:
2259 case Intrinsic::loongarch_lasx_xvshuf4i_h:
2260 case Intrinsic::loongarch_lasx_xvshuf4i_w:
2261 case Intrinsic::loongarch_lasx_xvpermi_d:
2262 return checkIntrinsicImmArg<8>(
Op, 2, DAG);
2263 case Intrinsic::loongarch_lsx_vshuf4i_d:
2264 case Intrinsic::loongarch_lsx_vpermi_w:
2265 case Intrinsic::loongarch_lsx_vbitseli_b:
2266 case Intrinsic::loongarch_lsx_vextrins_b:
2267 case Intrinsic::loongarch_lsx_vextrins_h:
2268 case Intrinsic::loongarch_lsx_vextrins_w:
2269 case Intrinsic::loongarch_lsx_vextrins_d:
2270 case Intrinsic::loongarch_lasx_xvshuf4i_d:
2271 case Intrinsic::loongarch_lasx_xvpermi_w:
2272 case Intrinsic::loongarch_lasx_xvpermi_q:
2273 case Intrinsic::loongarch_lasx_xvbitseli_b:
2274 case Intrinsic::loongarch_lasx_xvextrins_b:
2275 case Intrinsic::loongarch_lasx_xvextrins_h:
2276 case Intrinsic::loongarch_lasx_xvextrins_w:
2277 case Intrinsic::loongarch_lasx_xvextrins_d:
2278 return checkIntrinsicImmArg<8>(
Op, 3, DAG);
2279 case Intrinsic::loongarch_lsx_vrepli_b:
2280 case Intrinsic::loongarch_lsx_vrepli_h:
2281 case Intrinsic::loongarch_lsx_vrepli_w:
2282 case Intrinsic::loongarch_lsx_vrepli_d:
2283 case Intrinsic::loongarch_lasx_xvrepli_b:
2284 case Intrinsic::loongarch_lasx_xvrepli_h:
2285 case Intrinsic::loongarch_lasx_xvrepli_w:
2286 case Intrinsic::loongarch_lasx_xvrepli_d:
2287 return checkIntrinsicImmArg<10>(
Op, 1, DAG,
true);
2288 case Intrinsic::loongarch_lsx_vldi:
2289 case Intrinsic::loongarch_lasx_xvldi:
2290 return checkIntrinsicImmArg<13>(
Op, 1, DAG,
true);
2305LoongArchTargetLowering::lowerINTRINSIC_W_CHAIN(
SDValue Op,
2309 EVT VT =
Op.getValueType();
2311 const StringRef ErrorMsgOOR =
"argument out of range";
2312 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
2313 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
2315 switch (
Op.getConstantOperandVal(1)) {
2318 case Intrinsic::loongarch_crc_w_b_w:
2319 case Intrinsic::loongarch_crc_w_h_w:
2320 case Intrinsic::loongarch_crc_w_w_w:
2321 case Intrinsic::loongarch_crc_w_d_w:
2322 case Intrinsic::loongarch_crcc_w_b_w:
2323 case Intrinsic::loongarch_crcc_w_h_w:
2324 case Intrinsic::loongarch_crcc_w_w_w:
2325 case Intrinsic::loongarch_crcc_w_d_w:
2327 case Intrinsic::loongarch_csrrd_w:
2328 case Intrinsic::loongarch_csrrd_d: {
2329 unsigned Imm =
Op.getConstantOperandVal(2);
2330 return !isUInt<14>(Imm)
2335 case Intrinsic::loongarch_csrwr_w:
2336 case Intrinsic::loongarch_csrwr_d: {
2337 unsigned Imm =
Op.getConstantOperandVal(3);
2338 return !isUInt<14>(Imm)
2341 {Chain,
Op.getOperand(2),
2344 case Intrinsic::loongarch_csrxchg_w:
2345 case Intrinsic::loongarch_csrxchg_d: {
2346 unsigned Imm =
Op.getConstantOperandVal(4);
2347 return !isUInt<14>(Imm)
2350 {Chain,
Op.getOperand(2),
Op.getOperand(3),
2353 case Intrinsic::loongarch_iocsrrd_d: {
2358#define IOCSRRD_CASE(NAME, NODE) \
2359 case Intrinsic::loongarch_##NAME: { \
2360 return DAG.getNode(LoongArchISD::NODE, DL, {GRLenVT, MVT::Other}, \
2361 {Chain, Op.getOperand(2)}); \
2367 case Intrinsic::loongarch_cpucfg: {
2369 {Chain,
Op.getOperand(2)});
2371 case Intrinsic::loongarch_lddir_d: {
2372 unsigned Imm =
Op.getConstantOperandVal(3);
2373 return !isUInt<8>(Imm)
2377 case Intrinsic::loongarch_movfcsr2gr: {
2378 if (!Subtarget.hasBasicF())
2380 unsigned Imm =
Op.getConstantOperandVal(2);
2381 return !isUInt<2>(Imm)
2386 case Intrinsic::loongarch_lsx_vld:
2387 case Intrinsic::loongarch_lsx_vldrepl_b:
2388 case Intrinsic::loongarch_lasx_xvld:
2389 case Intrinsic::loongarch_lasx_xvldrepl_b:
2390 return !isInt<12>(cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
2393 case Intrinsic::loongarch_lsx_vldrepl_h:
2394 case Intrinsic::loongarch_lasx_xvldrepl_h:
2395 return !isShiftedInt<11, 1>(
2396 cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
2398 Op,
"argument out of range or not a multiple of 2", DAG)
2400 case Intrinsic::loongarch_lsx_vldrepl_w:
2401 case Intrinsic::loongarch_lasx_xvldrepl_w:
2402 return !isShiftedInt<10, 2>(
2403 cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
2405 Op,
"argument out of range or not a multiple of 4", DAG)
2407 case Intrinsic::loongarch_lsx_vldrepl_d:
2408 case Intrinsic::loongarch_lasx_xvldrepl_d:
2409 return !isShiftedInt<9, 3>(
2410 cast<ConstantSDNode>(
Op.getOperand(3))->getSExtValue())
2412 Op,
"argument out of range or not a multiple of 8", DAG)
2423 return Op.getOperand(0);
2431 uint64_t IntrinsicEnum =
Op.getConstantOperandVal(1);
2433 const StringRef ErrorMsgOOR =
"argument out of range";
2434 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
2435 const StringRef ErrorMsgReqLA32 =
"requires loongarch32";
2436 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
2438 switch (IntrinsicEnum) {
2442 case Intrinsic::loongarch_cacop_d:
2443 case Intrinsic::loongarch_cacop_w: {
2444 if (IntrinsicEnum == Intrinsic::loongarch_cacop_d && !Subtarget.
is64Bit())
2446 if (IntrinsicEnum == Intrinsic::loongarch_cacop_w && Subtarget.
is64Bit())
2450 int Imm2 = cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue();
2451 if (!isUInt<5>(Imm1) || !isInt<12>(Imm2))
2455 case Intrinsic::loongarch_dbar: {
2457 return !isUInt<15>(Imm)
2462 case Intrinsic::loongarch_ibar: {
2464 return !isUInt<15>(Imm)
2469 case Intrinsic::loongarch_break: {
2471 return !isUInt<15>(Imm)
2476 case Intrinsic::loongarch_movgr2fcsr: {
2477 if (!Subtarget.hasBasicF())
2480 return !isUInt<2>(Imm)
2487 case Intrinsic::loongarch_syscall: {
2489 return !isUInt<15>(Imm)
2494#define IOCSRWR_CASE(NAME, NODE) \
2495 case Intrinsic::loongarch_##NAME: { \
2496 SDValue Op3 = Op.getOperand(3); \
2497 return Subtarget.is64Bit() \
2498 ? DAG.getNode(LoongArchISD::NODE, DL, MVT::Other, Chain, \
2499 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2), \
2500 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op3)) \
2501 : DAG.getNode(LoongArchISD::NODE, DL, MVT::Other, Chain, Op2, \
2508 case Intrinsic::loongarch_iocsrwr_d: {
2516#define ASRT_LE_GT_CASE(NAME) \
2517 case Intrinsic::loongarch_##NAME: { \
2518 return !Subtarget.is64Bit() \
2519 ? emitIntrinsicErrorMessage(Op, ErrorMsgReqLA64, DAG) \
2524#undef ASRT_LE_GT_CASE
2525 case Intrinsic::loongarch_ldpte_d: {
2526 unsigned Imm =
Op.getConstantOperandVal(3);
2532 case Intrinsic::loongarch_lsx_vst:
2533 case Intrinsic::loongarch_lasx_xvst:
2534 return !isInt<12>(cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue())
2537 case Intrinsic::loongarch_lasx_xvstelm_b:
2538 return (!isInt<8>(cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2539 !isUInt<5>(
Op.getConstantOperandVal(5)))
2542 case Intrinsic::loongarch_lsx_vstelm_b:
2543 return (!isInt<8>(cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2544 !isUInt<4>(
Op.getConstantOperandVal(5)))
2547 case Intrinsic::loongarch_lasx_xvstelm_h:
2548 return (!isShiftedInt<8, 1>(
2549 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2550 !isUInt<4>(
Op.getConstantOperandVal(5)))
2552 Op,
"argument out of range or not a multiple of 2", DAG)
2554 case Intrinsic::loongarch_lsx_vstelm_h:
2555 return (!isShiftedInt<8, 1>(
2556 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2557 !isUInt<3>(
Op.getConstantOperandVal(5)))
2559 Op,
"argument out of range or not a multiple of 2", DAG)
2561 case Intrinsic::loongarch_lasx_xvstelm_w:
2562 return (!isShiftedInt<8, 2>(
2563 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2564 !isUInt<3>(
Op.getConstantOperandVal(5)))
2566 Op,
"argument out of range or not a multiple of 4", DAG)
2568 case Intrinsic::loongarch_lsx_vstelm_w:
2569 return (!isShiftedInt<8, 2>(
2570 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2571 !isUInt<2>(
Op.getConstantOperandVal(5)))
2573 Op,
"argument out of range or not a multiple of 4", DAG)
2575 case Intrinsic::loongarch_lasx_xvstelm_d:
2576 return (!isShiftedInt<8, 3>(
2577 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2578 !isUInt<2>(
Op.getConstantOperandVal(5)))
2580 Op,
"argument out of range or not a multiple of 8", DAG)
2582 case Intrinsic::loongarch_lsx_vstelm_d:
2583 return (!isShiftedInt<8, 3>(
2584 cast<ConstantSDNode>(
Op.getOperand(4))->getSExtValue()) ||
2585 !isUInt<1>(
Op.getConstantOperandVal(5)))
2587 Op,
"argument out of range or not a multiple of 8", DAG)
2598 EVT VT =
Lo.getValueType();
2639 EVT VT =
Lo.getValueType();
2731 NewOp0 = DAG.
getNode(ExtOpc,
DL, MVT::i64,
N->getOperand(0));
2732 NewRes = DAG.
getNode(WOpcode,
DL, MVT::i64, NewOp0);
2736 NewOp0 = DAG.
getNode(ExtOpc,
DL, MVT::i64,
N->getOperand(0));
2742 NewRes = DAG.
getNode(WOpcode,
DL, MVT::i64, NewOp0, NewOp1);
2769 StringRef ErrorMsg,
bool WithChain =
true) {
2774 Results.push_back(
N->getOperand(0));
2777template <
unsigned N>
2782 const StringRef ErrorMsgOOR =
"argument out of range";
2783 unsigned Imm =
Node->getConstantOperandVal(2);
2784 if (!isUInt<N>(Imm)) {
2817 switch (
N->getConstantOperandVal(0)) {
2820 case Intrinsic::loongarch_lsx_vpickve2gr_b:
2821 replaceVPICKVE2GRResults<4>(
N,
Results, DAG, Subtarget,
2824 case Intrinsic::loongarch_lsx_vpickve2gr_h:
2825 case Intrinsic::loongarch_lasx_xvpickve2gr_w:
2826 replaceVPICKVE2GRResults<3>(
N,
Results, DAG, Subtarget,
2829 case Intrinsic::loongarch_lsx_vpickve2gr_w:
2830 replaceVPICKVE2GRResults<2>(
N,
Results, DAG, Subtarget,
2833 case Intrinsic::loongarch_lsx_vpickve2gr_bu:
2834 replaceVPICKVE2GRResults<4>(
N,
Results, DAG, Subtarget,
2837 case Intrinsic::loongarch_lsx_vpickve2gr_hu:
2838 case Intrinsic::loongarch_lasx_xvpickve2gr_wu:
2839 replaceVPICKVE2GRResults<3>(
N,
Results, DAG, Subtarget,
2842 case Intrinsic::loongarch_lsx_vpickve2gr_wu:
2843 replaceVPICKVE2GRResults<2>(
N,
Results, DAG, Subtarget,
2846 case Intrinsic::loongarch_lsx_bz_b:
2847 case Intrinsic::loongarch_lsx_bz_h:
2848 case Intrinsic::loongarch_lsx_bz_w:
2849 case Intrinsic::loongarch_lsx_bz_d:
2850 case Intrinsic::loongarch_lasx_xbz_b:
2851 case Intrinsic::loongarch_lasx_xbz_h:
2852 case Intrinsic::loongarch_lasx_xbz_w:
2853 case Intrinsic::loongarch_lasx_xbz_d:
2857 case Intrinsic::loongarch_lsx_bz_v:
2858 case Intrinsic::loongarch_lasx_xbz_v:
2862 case Intrinsic::loongarch_lsx_bnz_b:
2863 case Intrinsic::loongarch_lsx_bnz_h:
2864 case Intrinsic::loongarch_lsx_bnz_w:
2865 case Intrinsic::loongarch_lsx_bnz_d:
2866 case Intrinsic::loongarch_lasx_xbnz_b:
2867 case Intrinsic::loongarch_lasx_xbnz_h:
2868 case Intrinsic::loongarch_lasx_xbnz_w:
2869 case Intrinsic::loongarch_lasx_xbnz_d:
2873 case Intrinsic::loongarch_lsx_bnz_v:
2874 case Intrinsic::loongarch_lasx_xbnz_v:
2884 EVT VT =
N->getValueType(0);
2885 switch (
N->getOpcode()) {
2891 "Unexpected custom legalisation");
2899 "Unexpected custom legalisation");
2901 Subtarget.hasDiv32() && VT == MVT::i32
2909 "Unexpected custom legalisation");
2918 "Unexpected custom legalisation");
2923 "Unexpected custom legalisation");
2928 if (Src.getValueType() == MVT::f16)
2939 EVT OpVT = Src.getValueType();
2943 std::tie(Result, Chain) =
2950 EVT SrcVT = Src.getValueType();
2951 if (VT == MVT::i32 && SrcVT == MVT::f32 && Subtarget.
is64Bit() &&
2952 Subtarget.hasBasicF()) {
2961 "Unexpected custom legalisation");
2964 TLI.expandFP_TO_UINT(
N, Tmp1, Tmp2, DAG);
2970 assert((VT == MVT::i16 || VT == MVT::i32) &&
2971 "Unexpected custom legalization");
2992 assert((VT == MVT::i8 || (VT == MVT::i32 && Subtarget.
is64Bit())) &&
2993 "Unexpected custom legalization");
3013 "Unexpected custom legalisation");
3021 const StringRef ErrorMsgOOR =
"argument out of range";
3022 const StringRef ErrorMsgReqLA64 =
"requires loongarch64";
3023 const StringRef ErrorMsgReqF =
"requires basic 'f' target feature";
3025 switch (
N->getConstantOperandVal(1)) {
3028 case Intrinsic::loongarch_movfcsr2gr: {
3029 if (!Subtarget.hasBasicF()) {
3034 if (!isUInt<2>(Imm)) {
3046#define CRC_CASE_EXT_BINARYOP(NAME, NODE) \
3047 case Intrinsic::loongarch_##NAME: { \
3048 SDValue NODE = DAG.getNode( \
3049 LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
3050 {Chain, DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2), \
3051 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3))}); \
3052 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NODE.getValue(0))); \
3053 Results.push_back(NODE.getValue(1)); \
3062#undef CRC_CASE_EXT_BINARYOP
3064#define CRC_CASE_EXT_UNARYOP(NAME, NODE) \
3065 case Intrinsic::loongarch_##NAME: { \
3066 SDValue NODE = DAG.getNode( \
3067 LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
3069 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3))}); \
3070 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NODE.getValue(0))); \
3071 Results.push_back(NODE.getValue(1)); \
3076#undef CRC_CASE_EXT_UNARYOP
3077#define CSR_CASE(ID) \
3078 case Intrinsic::loongarch_##ID: { \
3079 if (!Subtarget.is64Bit()) \
3080 emitErrorAndReplaceIntrinsicResults(N, Results, DAG, ErrorMsgReqLA64); \
3088 case Intrinsic::loongarch_csrrd_w: {
3090 if (!isUInt<14>(Imm)) {
3102 case Intrinsic::loongarch_csrwr_w: {
3103 unsigned Imm =
N->getConstantOperandVal(3);
3104 if (!isUInt<14>(Imm)) {
3117 case Intrinsic::loongarch_csrxchg_w: {
3118 unsigned Imm =
N->getConstantOperandVal(4);
3119 if (!isUInt<14>(Imm)) {
3133#define IOCSRRD_CASE(NAME, NODE) \
3134 case Intrinsic::loongarch_##NAME: { \
3135 SDValue IOCSRRDResults = \
3136 DAG.getNode(LoongArchISD::NODE, DL, {MVT::i64, MVT::Other}, \
3137 {Chain, DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2)}); \
3138 Results.push_back( \
3139 DAG.getNode(ISD::TRUNCATE, DL, VT, IOCSRRDResults.getValue(0))); \
3140 Results.push_back(IOCSRRDResults.getValue(1)); \
3147 case Intrinsic::loongarch_cpucfg: {
3156 case Intrinsic::loongarch_lddir_d: {
3169 "On LA64, only 64-bit registers can be read.");
3172 "On LA32, only 32-bit registers can be read.");
3174 Results.push_back(
N->getOperand(0));
3185 OpVT == MVT::f64 ? RTLIB::LROUND_F64 : RTLIB::LROUND_F32;
3202 SDValue FirstOperand =
N->getOperand(0);
3203 SDValue SecondOperand =
N->getOperand(1);
3204 unsigned FirstOperandOpc = FirstOperand.
getOpcode();
3205 EVT ValTy =
N->getValueType(0);
3208 unsigned SMIdx, SMLen;
3214 if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand)) ||
3225 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))))
3266 NewOperand = FirstOperand;
3269 msb = lsb + SMLen - 1;
3273 if (FirstOperandOpc ==
ISD::SRA || FirstOperandOpc ==
ISD::SRL || lsb == 0)
3294 SDValue FirstOperand =
N->getOperand(0);
3296 EVT ValTy =
N->getValueType(0);
3299 unsigned MaskIdx, MaskLen;
3305 !(CN = dyn_cast<ConstantSDNode>(FirstOperand.
getOperand(1))) ||
3310 if (!(CN = dyn_cast<ConstantSDNode>(
N->getOperand(1))))
3314 if (MaskIdx <= Shamt && Shamt <= MaskIdx + MaskLen - 1)
3327 EVT ValTy =
N->getValueType(0);
3328 SDValue N0 =
N->getOperand(0), N1 =
N->getOperand(1);
3332 unsigned MaskIdx0, MaskLen0, MaskIdx1, MaskLen1;
3334 bool SwapAndRetried =
false;
3339 if (ValBits != 32 && ValBits != 64)
3349 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
3352 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3354 MaskIdx0 == MaskIdx1 && MaskLen0 == MaskLen1 &&
3355 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
3357 (MaskIdx0 + MaskLen0 <= ValBits)) {
3371 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
3374 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3376 (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
3378 MaskLen0 == MaskLen1 && MaskIdx1 == 0 &&
3379 (MaskIdx0 + MaskLen0 <= ValBits)) {
3394 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
3396 (MaskIdx0 + MaskLen0 <= 64) &&
3397 (CN1 = dyn_cast<ConstantSDNode>(N1->getOperand(1))) &&
3404 ? (MaskIdx0 + (MaskLen0 & 31) - 1)
3405 : (MaskIdx0 + MaskLen0 - 1),
3417 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
3419 MaskIdx0 == 0 && (CN1 = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3421 (MaskIdx0 + MaskLen0 <= ValBits)) {
3436 (CN0 = dyn_cast<ConstantSDNode>(N0.
getOperand(1))) &&
3438 (CN1 = dyn_cast<ConstantSDNode>(N1)) &&
3444 DAG.
getConstant(ValBits == 32 ? (MaskIdx0 + (MaskLen0 & 31) - 1)
3445 : (MaskIdx0 + MaskLen0 - 1),
3460 unsigned MaskIdx, MaskLen;
3461 if (N1.getOpcode() ==
ISD::SHL && N1.getOperand(0).getOpcode() ==
ISD::AND &&
3462 (CNMask = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
3464 MaskIdx == 0 && (CNShamt = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3486 (CNMask = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3488 N1.getOperand(0).getOpcode() ==
ISD::SHL &&
3489 (CNShamt = dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(1))) &&
3502 if (!SwapAndRetried) {
3504 SwapAndRetried =
true;
3508 SwapAndRetried =
false;
3520 (CNMask = dyn_cast<ConstantSDNode>(N1.getOperand(1))) &&
3534 if (!SwapAndRetried) {
3536 SwapAndRetried =
true;
3546 switch (V.getNode()->getOpcode()) {
3548 LoadSDNode *LoadNode = cast<LoadSDNode>(V.getNode());
3557 VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1));
3558 if ((TypeNode->
getVT() == MVT::i8) || (TypeNode->
getVT() == MVT::i16)) {
3565 VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1));
3566 if ((TypeNode->
getVT() == MVT::i8) || (TypeNode->
getVT() == MVT::i16)) {
3643 SDNode *AndNode =
N->getOperand(0).getNode();
3651 SDValue CmpInputValue =
N->getOperand(1);
3659 CN = dyn_cast<ConstantSDNode>(CmpInputValue);
3662 AndInputValue1 = AndInputValue1.
getOperand(0);
3666 if (AndInputValue2 != CmpInputValue)
3699 TruncInputValue1, TruncInputValue2);
3721template <
unsigned N>
3725 bool IsSigned =
false) {
3727 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(ImmOp));
3729 if ((IsSigned && !isInt<N>(CImm->getSExtValue())) ||
3730 (!IsSigned && !isUInt<N>(CImm->getZExtValue()))) {
3732 ": argument out of range.");
3738template <
unsigned N>
3742 EVT ResTy =
Node->getValueType(0);
3743 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(ImmOp));
3746 if ((IsSigned && !isInt<N>(CImm->getSExtValue())) ||
3747 (!IsSigned && !isUInt<N>(CImm->getZExtValue()))) {
3749 ": argument out of range.");
3754 IsSigned ? CImm->getSExtValue() : CImm->getZExtValue(), IsSigned),
3760 EVT ResTy =
Node->getValueType(0);
3768 EVT ResTy =
Node->getValueType(0);
3777template <
unsigned N>
3780 EVT ResTy =
Node->getValueType(0);
3781 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(2));
3783 if (!isUInt<N>(CImm->getZExtValue())) {
3785 ": argument out of range.");
3795template <
unsigned N>
3798 EVT ResTy =
Node->getValueType(0);
3799 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(2));
3801 if (!isUInt<N>(CImm->getZExtValue())) {
3803 ": argument out of range.");
3812template <
unsigned N>
3815 EVT ResTy =
Node->getValueType(0);
3816 auto *CImm = cast<ConstantSDNode>(
Node->getOperand(2));
3818 if (!isUInt<N>(CImm->getZExtValue())) {
3820 ": argument out of range.");
3834 switch (
N->getConstantOperandVal(0)) {
3837 case Intrinsic::loongarch_lsx_vadd_b:
3838 case Intrinsic::loongarch_lsx_vadd_h:
3839 case Intrinsic::loongarch_lsx_vadd_w:
3840 case Intrinsic::loongarch_lsx_vadd_d:
3841 case Intrinsic::loongarch_lasx_xvadd_b:
3842 case Intrinsic::loongarch_lasx_xvadd_h:
3843 case Intrinsic::loongarch_lasx_xvadd_w:
3844 case Intrinsic::loongarch_lasx_xvadd_d:
3847 case Intrinsic::loongarch_lsx_vaddi_bu:
3848 case Intrinsic::loongarch_lsx_vaddi_hu:
3849 case Intrinsic::loongarch_lsx_vaddi_wu:
3850 case Intrinsic::loongarch_lsx_vaddi_du:
3851 case Intrinsic::loongarch_lasx_xvaddi_bu:
3852 case Intrinsic::loongarch_lasx_xvaddi_hu:
3853 case Intrinsic::loongarch_lasx_xvaddi_wu:
3854 case Intrinsic::loongarch_lasx_xvaddi_du:
3856 lowerVectorSplatImm<5>(
N, 2, DAG));
3857 case Intrinsic::loongarch_lsx_vsub_b:
3858 case Intrinsic::loongarch_lsx_vsub_h:
3859 case Intrinsic::loongarch_lsx_vsub_w:
3860 case Intrinsic::loongarch_lsx_vsub_d:
3861 case Intrinsic::loongarch_lasx_xvsub_b:
3862 case Intrinsic::loongarch_lasx_xvsub_h:
3863 case Intrinsic::loongarch_lasx_xvsub_w:
3864 case Intrinsic::loongarch_lasx_xvsub_d:
3867 case Intrinsic::loongarch_lsx_vsubi_bu:
3868 case Intrinsic::loongarch_lsx_vsubi_hu:
3869 case Intrinsic::loongarch_lsx_vsubi_wu:
3870 case Intrinsic::loongarch_lsx_vsubi_du:
3871 case Intrinsic::loongarch_lasx_xvsubi_bu:
3872 case Intrinsic::loongarch_lasx_xvsubi_hu:
3873 case Intrinsic::loongarch_lasx_xvsubi_wu:
3874 case Intrinsic::loongarch_lasx_xvsubi_du:
3876 lowerVectorSplatImm<5>(
N, 2, DAG));
3877 case Intrinsic::loongarch_lsx_vneg_b:
3878 case Intrinsic::loongarch_lsx_vneg_h:
3879 case Intrinsic::loongarch_lsx_vneg_w:
3880 case Intrinsic::loongarch_lsx_vneg_d:
3881 case Intrinsic::loongarch_lasx_xvneg_b:
3882 case Intrinsic::loongarch_lasx_xvneg_h:
3883 case Intrinsic::loongarch_lasx_xvneg_w:
3884 case Intrinsic::loongarch_lasx_xvneg_d:
3888 APInt(
N->getValueType(0).getScalarType().getSizeInBits(), 0,
3890 SDLoc(
N),
N->getValueType(0)),
3892 case Intrinsic::loongarch_lsx_vmax_b:
3893 case Intrinsic::loongarch_lsx_vmax_h:
3894 case Intrinsic::loongarch_lsx_vmax_w:
3895 case Intrinsic::loongarch_lsx_vmax_d:
3896 case Intrinsic::loongarch_lasx_xvmax_b:
3897 case Intrinsic::loongarch_lasx_xvmax_h:
3898 case Intrinsic::loongarch_lasx_xvmax_w:
3899 case Intrinsic::loongarch_lasx_xvmax_d:
3902 case Intrinsic::loongarch_lsx_vmax_bu:
3903 case Intrinsic::loongarch_lsx_vmax_hu:
3904 case Intrinsic::loongarch_lsx_vmax_wu:
3905 case Intrinsic::loongarch_lsx_vmax_du:
3906 case Intrinsic::loongarch_lasx_xvmax_bu:
3907 case Intrinsic::loongarch_lasx_xvmax_hu:
3908 case Intrinsic::loongarch_lasx_xvmax_wu:
3909 case Intrinsic::loongarch_lasx_xvmax_du:
3912 case Intrinsic::loongarch_lsx_vmaxi_b:
3913 case Intrinsic::loongarch_lsx_vmaxi_h:
3914 case Intrinsic::loongarch_lsx_vmaxi_w:
3915 case Intrinsic::loongarch_lsx_vmaxi_d:
3916 case Intrinsic::loongarch_lasx_xvmaxi_b:
3917 case Intrinsic::loongarch_lasx_xvmaxi_h:
3918 case Intrinsic::loongarch_lasx_xvmaxi_w:
3919 case Intrinsic::loongarch_lasx_xvmaxi_d:
3921 lowerVectorSplatImm<5>(
N, 2, DAG,
true));
3922 case Intrinsic::loongarch_lsx_vmaxi_bu:
3923 case Intrinsic::loongarch_lsx_vmaxi_hu:
3924 case Intrinsic::loongarch_lsx_vmaxi_wu:
3925 case Intrinsic::loongarch_lsx_vmaxi_du:
3926 case Intrinsic::loongarch_lasx_xvmaxi_bu:
3927 case Intrinsic::loongarch_lasx_xvmaxi_hu:
3928 case Intrinsic::loongarch_lasx_xvmaxi_wu:
3929 case Intrinsic::loongarch_lasx_xvmaxi_du:
3931 lowerVectorSplatImm<5>(
N, 2, DAG));
3932 case Intrinsic::loongarch_lsx_vmin_b:
3933 case Intrinsic::loongarch_lsx_vmin_h:
3934 case Intrinsic::loongarch_lsx_vmin_w:
3935 case Intrinsic::loongarch_lsx_vmin_d:
3936 case Intrinsic::loongarch_lasx_xvmin_b:
3937 case Intrinsic::loongarch_lasx_xvmin_h:
3938 case Intrinsic::loongarch_lasx_xvmin_w:
3939 case Intrinsic::loongarch_lasx_xvmin_d:
3942 case Intrinsic::loongarch_lsx_vmin_bu:
3943 case Intrinsic::loongarch_lsx_vmin_hu:
3944 case Intrinsic::loongarch_lsx_vmin_wu:
3945 case Intrinsic::loongarch_lsx_vmin_du:
3946 case Intrinsic::loongarch_lasx_xvmin_bu:
3947 case Intrinsic::loongarch_lasx_xvmin_hu:
3948 case Intrinsic::loongarch_lasx_xvmin_wu:
3949 case Intrinsic::loongarch_lasx_xvmin_du:
3952 case Intrinsic::loongarch_lsx_vmini_b:
3953 case Intrinsic::loongarch_lsx_vmini_h:
3954 case Intrinsic::loongarch_lsx_vmini_w:
3955 case Intrinsic::loongarch_lsx_vmini_d:
3956 case Intrinsic::loongarch_lasx_xvmini_b:
3957 case Intrinsic::loongarch_lasx_xvmini_h:
3958 case Intrinsic::loongarch_lasx_xvmini_w:
3959 case Intrinsic::loongarch_lasx_xvmini_d:
3961 lowerVectorSplatImm<5>(
N, 2, DAG,
true));
3962 case Intrinsic::loongarch_lsx_vmini_bu:
3963 case Intrinsic::loongarch_lsx_vmini_hu:
3964 case Intrinsic::loongarch_lsx_vmini_wu:
3965 case Intrinsic::loongarch_lsx_vmini_du:
3966 case Intrinsic::loongarch_lasx_xvmini_bu:
3967 case Intrinsic::loongarch_lasx_xvmini_hu:
3968 case Intrinsic::loongarch_lasx_xvmini_wu:
3969 case Intrinsic::loongarch_lasx_xvmini_du:
3971 lowerVectorSplatImm<5>(
N, 2, DAG));
3972 case Intrinsic::loongarch_lsx_vmul_b:
3973 case Intrinsic::loongarch_lsx_vmul_h:
3974 case Intrinsic::loongarch_lsx_vmul_w:
3975 case Intrinsic::loongarch_lsx_vmul_d:
3976 case Intrinsic::loongarch_lasx_xvmul_b:
3977 case Intrinsic::loongarch_lasx_xvmul_h:
3978 case Intrinsic::loongarch_lasx_xvmul_w:
3979 case Intrinsic::loongarch_lasx_xvmul_d:
3982 case Intrinsic::loongarch_lsx_vmadd_b:
3983 case Intrinsic::loongarch_lsx_vmadd_h:
3984 case Intrinsic::loongarch_lsx_vmadd_w:
3985 case Intrinsic::loongarch_lsx_vmadd_d:
3986 case Intrinsic::loongarch_lasx_xvmadd_b:
3987 case Intrinsic::loongarch_lasx_xvmadd_h:
3988 case Intrinsic::loongarch_lasx_xvmadd_w:
3989 case Intrinsic::loongarch_lasx_xvmadd_d: {
3990 EVT ResTy =
N->getValueType(0);
3995 case Intrinsic::loongarch_lsx_vmsub_b:
3996 case Intrinsic::loongarch_lsx_vmsub_h:
3997 case Intrinsic::loongarch_lsx_vmsub_w:
3998 case Intrinsic::loongarch_lsx_vmsub_d:
3999 case Intrinsic::loongarch_lasx_xvmsub_b:
4000 case Intrinsic::loongarch_lasx_xvmsub_h:
4001 case Intrinsic::loongarch_lasx_xvmsub_w:
4002 case Intrinsic::loongarch_lasx_xvmsub_d: {
4003 EVT ResTy =
N->getValueType(0);
4008 case Intrinsic::loongarch_lsx_vdiv_b:
4009 case Intrinsic::loongarch_lsx_vdiv_h:
4010 case Intrinsic::loongarch_lsx_vdiv_w:
4011 case Intrinsic::loongarch_lsx_vdiv_d:
4012 case Intrinsic::loongarch_lasx_xvdiv_b:
4013 case Intrinsic::loongarch_lasx_xvdiv_h:
4014 case Intrinsic::loongarch_lasx_xvdiv_w:
4015 case Intrinsic::loongarch_lasx_xvdiv_d:
4018 case Intrinsic::loongarch_lsx_vdiv_bu:
4019 case Intrinsic::loongarch_lsx_vdiv_hu:
4020 case Intrinsic::loongarch_lsx_vdiv_wu:
4021 case Intrinsic::loongarch_lsx_vdiv_du:
4022 case Intrinsic::loongarch_lasx_xvdiv_bu:
4023 case Intrinsic::loongarch_lasx_xvdiv_hu:
4024 case Intrinsic::loongarch_lasx_xvdiv_wu:
4025 case Intrinsic::loongarch_lasx_xvdiv_du:
4028 case Intrinsic::loongarch_lsx_vmod_b:
4029 case Intrinsic::loongarch_lsx_vmod_h:
4030 case Intrinsic::loongarch_lsx_vmod_w:
4031 case Intrinsic::loongarch_lsx_vmod_d:
4032 case Intrinsic::loongarch_lasx_xvmod_b:
4033 case Intrinsic::loongarch_lasx_xvmod_h:
4034 case Intrinsic::loongarch_lasx_xvmod_w:
4035 case Intrinsic::loongarch_lasx_xvmod_d:
4038 case Intrinsic::loongarch_lsx_vmod_bu:
4039 case Intrinsic::loongarch_lsx_vmod_hu:
4040 case Intrinsic::loongarch_lsx_vmod_wu:
4041 case Intrinsic::loongarch_lsx_vmod_du:
4042 case Intrinsic::loongarch_lasx_xvmod_bu:
4043 case Intrinsic::loongarch_lasx_xvmod_hu:
4044 case Intrinsic::loongarch_lasx_xvmod_wu:
4045 case Intrinsic::loongarch_lasx_xvmod_du:
4048 case Intrinsic::loongarch_lsx_vand_v:
4049 case Intrinsic::loongarch_lasx_xvand_v:
4052 case Intrinsic::loongarch_lsx_vor_v:
4053 case Intrinsic::loongarch_lasx_xvor_v:
4056 case Intrinsic::loongarch_lsx_vxor_v:
4057 case Intrinsic::loongarch_lasx_xvxor_v:
4060 case Intrinsic::loongarch_lsx_vnor_v:
4061 case Intrinsic::loongarch_lasx_xvnor_v: {
4066 case Intrinsic::loongarch_lsx_vandi_b:
4067 case Intrinsic::loongarch_lasx_xvandi_b:
4069 lowerVectorSplatImm<8>(
N, 2, DAG));
4070 case Intrinsic::loongarch_lsx_vori_b:
4071 case Intrinsic::loongarch_lasx_xvori_b:
4073 lowerVectorSplatImm<8>(
N, 2, DAG));
4074 case Intrinsic::loongarch_lsx_vxori_b:
4075 case Intrinsic::loongarch_lasx_xvxori_b:
4077 lowerVectorSplatImm<8>(
N, 2, DAG));
4078 case Intrinsic::loongarch_lsx_vsll_b:
4079 case Intrinsic::loongarch_lsx_vsll_h:
4080 case Intrinsic::loongarch_lsx_vsll_w:
4081 case Intrinsic::loongarch_lsx_vsll_d:
4082 case Intrinsic::loongarch_lasx_xvsll_b:
4083 case Intrinsic::loongarch_lasx_xvsll_h:
4084 case Intrinsic::loongarch_lasx_xvsll_w:
4085 case Intrinsic::loongarch_lasx_xvsll_d:
4088 case Intrinsic::loongarch_lsx_vslli_b:
4089 case Intrinsic::loongarch_lasx_xvslli_b:
4091 lowerVectorSplatImm<3>(
N, 2, DAG));
4092 case Intrinsic::loongarch_lsx_vslli_h:
4093 case Intrinsic::loongarch_lasx_xvslli_h:
4095 lowerVectorSplatImm<4>(
N, 2, DAG));
4096 case Intrinsic::loongarch_lsx_vslli_w:
4097 case Intrinsic::loongarch_lasx_xvslli_w:
4099 lowerVectorSplatImm<5>(
N, 2, DAG));
4100 case Intrinsic::loongarch_lsx_vslli_d:
4101 case Intrinsic::loongarch_lasx_xvslli_d:
4103 lowerVectorSplatImm<6>(
N, 2, DAG));
4104 case Intrinsic::loongarch_lsx_vsrl_b:
4105 case Intrinsic::loongarch_lsx_vsrl_h:
4106 case Intrinsic::loongarch_lsx_vsrl_w:
4107 case Intrinsic::loongarch_lsx_vsrl_d:
4108 case Intrinsic::loongarch_lasx_xvsrl_b:
4109 case Intrinsic::loongarch_lasx_xvsrl_h:
4110 case Intrinsic::loongarch_lasx_xvsrl_w:
4111 case Intrinsic::loongarch_lasx_xvsrl_d:
4114 case Intrinsic::loongarch_lsx_vsrli_b:
4115 case Intrinsic::loongarch_lasx_xvsrli_b:
4117 lowerVectorSplatImm<3>(
N, 2, DAG));
4118 case Intrinsic::loongarch_lsx_vsrli_h:
4119 case Intrinsic::loongarch_lasx_xvsrli_h:
4121 lowerVectorSplatImm<4>(
N, 2, DAG));
4122 case Intrinsic::loongarch_lsx_vsrli_w:
4123 case Intrinsic::loongarch_lasx_xvsrli_w:
4125 lowerVectorSplatImm<5>(
N, 2, DAG));
4126 case Intrinsic::loongarch_lsx_vsrli_d:
4127 case Intrinsic::loongarch_lasx_xvsrli_d:
4129 lowerVectorSplatImm<6>(
N, 2, DAG));
4130 case Intrinsic::loongarch_lsx_vsra_b:
4131 case Intrinsic::loongarch_lsx_vsra_h:
4132 case Intrinsic::loongarch_lsx_vsra_w:
4133 case Intrinsic::loongarch_lsx_vsra_d:
4134 case Intrinsic::loongarch_lasx_xvsra_b:
4135 case Intrinsic::loongarch_lasx_xvsra_h:
4136 case Intrinsic::loongarch_lasx_xvsra_w:
4137 case Intrinsic::loongarch_lasx_xvsra_d:
4140 case Intrinsic::loongarch_lsx_vsrai_b:
4141 case Intrinsic::loongarch_lasx_xvsrai_b:
4143 lowerVectorSplatImm<3>(
N, 2, DAG));
4144 case Intrinsic::loongarch_lsx_vsrai_h:
4145 case Intrinsic::loongarch_lasx_xvsrai_h:
4147 lowerVectorSplatImm<4>(
N, 2, DAG));
4148 case Intrinsic::loongarch_lsx_vsrai_w:
4149 case Intrinsic::loongarch_lasx_xvsrai_w:
4151 lowerVectorSplatImm<5>(
N, 2, DAG));
4152 case Intrinsic::loongarch_lsx_vsrai_d:
4153 case Intrinsic::loongarch_lasx_xvsrai_d:
4155 lowerVectorSplatImm<6>(
N, 2, DAG));
4156 case Intrinsic::loongarch_lsx_vclz_b:
4157 case Intrinsic::loongarch_lsx_vclz_h:
4158 case Intrinsic::loongarch_lsx_vclz_w:
4159 case Intrinsic::loongarch_lsx_vclz_d:
4160 case Intrinsic::loongarch_lasx_xvclz_b:
4161 case Intrinsic::loongarch_lasx_xvclz_h:
4162 case Intrinsic::loongarch_lasx_xvclz_w:
4163 case Intrinsic::loongarch_lasx_xvclz_d:
4165 case Intrinsic::loongarch_lsx_vpcnt_b:
4166 case Intrinsic::loongarch_lsx_vpcnt_h:
4167 case Intrinsic::loongarch_lsx_vpcnt_w:
4168 case Intrinsic::loongarch_lsx_vpcnt_d:
4169 case Intrinsic::loongarch_lasx_xvpcnt_b:
4170 case Intrinsic::loongarch_lasx_xvpcnt_h:
4171 case Intrinsic::loongarch_lasx_xvpcnt_w:
4172 case Intrinsic::loongarch_lasx_xvpcnt_d:
4174 case Intrinsic::loongarch_lsx_vbitclr_b:
4175 case Intrinsic::loongarch_lsx_vbitclr_h:
4176 case Intrinsic::loongarch_lsx_vbitclr_w:
4177 case Intrinsic::loongarch_lsx_vbitclr_d:
4178 case Intrinsic::loongarch_lasx_xvbitclr_b:
4179 case Intrinsic::loongarch_lasx_xvbitclr_h:
4180 case Intrinsic::loongarch_lasx_xvbitclr_w:
4181 case Intrinsic::loongarch_lasx_xvbitclr_d:
4183 case Intrinsic::loongarch_lsx_vbitclri_b:
4184 case Intrinsic::loongarch_lasx_xvbitclri_b:
4185 return lowerVectorBitClearImm<3>(
N, DAG);
4186 case Intrinsic::loongarch_lsx_vbitclri_h:
4187 case Intrinsic::loongarch_lasx_xvbitclri_h:
4188 return lowerVectorBitClearImm<4>(
N, DAG);
4189 case Intrinsic::loongarch_lsx_vbitclri_w:
4190 case Intrinsic::loongarch_lasx_xvbitclri_w:
4191 return lowerVectorBitClearImm<5>(
N, DAG);
4192 case Intrinsic::loongarch_lsx_vbitclri_d:
4193 case Intrinsic::loongarch_lasx_xvbitclri_d:
4194 return lowerVectorBitClearImm<6>(
N, DAG);
4195 case Intrinsic::loongarch_lsx_vbitset_b:
4196 case Intrinsic::loongarch_lsx_vbitset_h:
4197 case Intrinsic::loongarch_lsx_vbitset_w:
4198 case Intrinsic::loongarch_lsx_vbitset_d:
4199 case Intrinsic::loongarch_lasx_xvbitset_b:
4200 case Intrinsic::loongarch_lasx_xvbitset_h:
4201 case Intrinsic::loongarch_lasx_xvbitset_w:
4202 case Intrinsic::loongarch_lasx_xvbitset_d: {
4203 EVT VecTy =
N->getValueType(0);
4209 case Intrinsic::loongarch_lsx_vbitseti_b:
4210 case Intrinsic::loongarch_lasx_xvbitseti_b:
4211 return lowerVectorBitSetImm<3>(
N, DAG);
4212 case Intrinsic::loongarch_lsx_vbitseti_h:
4213 case Intrinsic::loongarch_lasx_xvbitseti_h:
4214 return lowerVectorBitSetImm<4>(
N, DAG);
4215 case Intrinsic::loongarch_lsx_vbitseti_w:
4216 case Intrinsic::loongarch_lasx_xvbitseti_w:
4217 return lowerVectorBitSetImm<5>(
N, DAG);
4218 case Intrinsic::loongarch_lsx_vbitseti_d:
4219 case Intrinsic::loongarch_lasx_xvbitseti_d:
4220 return lowerVectorBitSetImm<6>(
N, DAG);
4221 case Intrinsic::loongarch_lsx_vbitrev_b:
4222 case Intrinsic::loongarch_lsx_vbitrev_h:
4223 case Intrinsic::loongarch_lsx_vbitrev_w:
4224 case Intrinsic::loongarch_lsx_vbitrev_d:
4225 case Intrinsic::loongarch_lasx_xvbitrev_b:
4226 case Intrinsic::loongarch_lasx_xvbitrev_h:
4227 case Intrinsic::loongarch_lasx_xvbitrev_w:
4228 case Intrinsic::loongarch_lasx_xvbitrev_d: {
4229 EVT VecTy =
N->getValueType(0);
4235 case Intrinsic::loongarch_lsx_vbitrevi_b:
4236 case Intrinsic::loongarch_lasx_xvbitrevi_b:
4237 return lowerVectorBitRevImm<3>(
N, DAG);
4238 case Intrinsic::loongarch_lsx_vbitrevi_h:
4239 case Intrinsic::loongarch_lasx_xvbitrevi_h:
4240 return lowerVectorBitRevImm<4>(
N, DAG);
4241 case Intrinsic::loongarch_lsx_vbitrevi_w:
4242 case Intrinsic::loongarch_lasx_xvbitrevi_w:
4243 return lowerVectorBitRevImm<5>(
N, DAG);
4244 case Intrinsic::loongarch_lsx_vbitrevi_d:
4245 case Intrinsic::loongarch_lasx_xvbitrevi_d:
4246 return lowerVectorBitRevImm<6>(
N, DAG);
4247 case Intrinsic::loongarch_lsx_vfadd_s:
4248 case Intrinsic::loongarch_lsx_vfadd_d:
4249 case Intrinsic::loongarch_lasx_xvfadd_s:
4250 case Intrinsic::loongarch_lasx_xvfadd_d:
4253 case Intrinsic::loongarch_lsx_vfsub_s:
4254 case Intrinsic::loongarch_lsx_vfsub_d:
4255 case Intrinsic::loongarch_lasx_xvfsub_s:
4256 case Intrinsic::loongarch_lasx_xvfsub_d:
4259 case Intrinsic::loongarch_lsx_vfmul_s:
4260 case Intrinsic::loongarch_lsx_vfmul_d:
4261 case Intrinsic::loongarch_lasx_xvfmul_s:
4262 case Intrinsic::loongarch_lasx_xvfmul_d:
4265 case Intrinsic::loongarch_lsx_vfdiv_s:
4266 case Intrinsic::loongarch_lsx_vfdiv_d:
4267 case Intrinsic::loongarch_lasx_xvfdiv_s:
4268 case Intrinsic::loongarch_lasx_xvfdiv_d:
4271 case Intrinsic::loongarch_lsx_vfmadd_s:
4272 case Intrinsic::loongarch_lsx_vfmadd_d:
4273 case Intrinsic::loongarch_lasx_xvfmadd_s:
4274 case Intrinsic::loongarch_lasx_xvfmadd_d:
4276 N->getOperand(2),
N->getOperand(3));
4277 case Intrinsic::loongarch_lsx_vinsgr2vr_b:
4279 N->getOperand(1),
N->getOperand(2),
4280 legalizeIntrinsicImmArg<4>(
N, 3, DAG, Subtarget));
4281 case Intrinsic::loongarch_lsx_vinsgr2vr_h:
4282 case Intrinsic::loongarch_lasx_xvinsgr2vr_w:
4284 N->getOperand(1),
N->getOperand(2),
4285 legalizeIntrinsicImmArg<3>(
N, 3, DAG, Subtarget));
4286 case Intrinsic::loongarch_lsx_vinsgr2vr_w:
4287 case Intrinsic::loongarch_lasx_xvinsgr2vr_d:
4289 N->getOperand(1),
N->getOperand(2),
4290 legalizeIntrinsicImmArg<2>(
N, 3, DAG, Subtarget));
4291 case Intrinsic::loongarch_lsx_vinsgr2vr_d:
4293 N->getOperand(1),
N->getOperand(2),
4294 legalizeIntrinsicImmArg<1>(
N, 3, DAG, Subtarget));
4295 case Intrinsic::loongarch_lsx_vreplgr2vr_b:
4296 case Intrinsic::loongarch_lsx_vreplgr2vr_h:
4297 case Intrinsic::loongarch_lsx_vreplgr2vr_w:
4298 case Intrinsic::loongarch_lsx_vreplgr2vr_d:
4299 case Intrinsic::loongarch_lasx_xvreplgr2vr_b:
4300 case Intrinsic::loongarch_lasx_xvreplgr2vr_h:
4301 case Intrinsic::loongarch_lasx_xvreplgr2vr_w:
4302 case Intrinsic::loongarch_lasx_xvreplgr2vr_d:
4306 case Intrinsic::loongarch_lsx_vreplve_b:
4307 case Intrinsic::loongarch_lsx_vreplve_h:
4308 case Intrinsic::loongarch_lsx_vreplve_w:
4309 case Intrinsic::loongarch_lsx_vreplve_d:
4310 case Intrinsic::loongarch_lasx_xvreplve_b:
4311 case Intrinsic::loongarch_lasx_xvreplve_h:
4312 case Intrinsic::loongarch_lasx_xvreplve_w:
4313 case Intrinsic::loongarch_lasx_xvreplve_d:
4325 switch (
N->getOpcode()) {
4362 MF->
insert(It, BreakMBB);
4366 SinkMBB->splice(SinkMBB->end(),
MBB, std::next(
MI.getIterator()),
MBB->
end());
4367 SinkMBB->transferSuccessorsAndUpdatePHIs(
MBB);
4385 BreakMBB->addSuccessor(SinkMBB);
4397 switch (
MI.getOpcode()) {
4400 case LoongArch::PseudoVBZ:
4401 CondOpc = LoongArch::VSETEQZ_V;
4403 case LoongArch::PseudoVBZ_B:
4404 CondOpc = LoongArch::VSETANYEQZ_B;
4406 case LoongArch::PseudoVBZ_H:
4407 CondOpc = LoongArch::VSETANYEQZ_H;
4409 case LoongArch::PseudoVBZ_W:
4410 CondOpc = LoongArch::VSETANYEQZ_W;
4412 case LoongArch::PseudoVBZ_D:
4413 CondOpc = LoongArch::VSETANYEQZ_D;
4415 case LoongArch::PseudoVBNZ:
4416 CondOpc = LoongArch::VSETNEZ_V;
4418 case LoongArch::PseudoVBNZ_B:
4419 CondOpc = LoongArch::VSETALLNEZ_B;
4421 case LoongArch::PseudoVBNZ_H:
4422 CondOpc = LoongArch::VSETALLNEZ_H;
4424 case LoongArch::PseudoVBNZ_W:
4425 CondOpc = LoongArch::VSETALLNEZ_W;
4427 case LoongArch::PseudoVBNZ_D:
4428 CondOpc = LoongArch::VSETALLNEZ_D;
4430 case LoongArch::PseudoXVBZ:
4431 CondOpc = LoongArch::XVSETEQZ_V;
4433 case LoongArch::PseudoXVBZ_B:
4434 CondOpc = LoongArch::XVSETANYEQZ_B;
4436 case LoongArch::PseudoXVBZ_H:
4437 CondOpc = LoongArch::XVSETANYEQZ_H;
4439 case LoongArch::PseudoXVBZ_W:
4440 CondOpc = LoongArch::XVSETANYEQZ_W;
4442 case LoongArch::PseudoXVBZ_D:
4443 CondOpc = LoongArch::XVSETANYEQZ_D;
4445 case LoongArch::PseudoXVBNZ:
4446 CondOpc = LoongArch::XVSETNEZ_V;
4448 case LoongArch::PseudoXVBNZ_B:
4449 CondOpc = LoongArch::XVSETALLNEZ_B;
4451 case LoongArch::PseudoXVBNZ_H:
4452 CondOpc = LoongArch::XVSETALLNEZ_H;
4454 case LoongArch::PseudoXVBNZ_W:
4455 CondOpc = LoongArch::XVSETALLNEZ_W;
4457 case LoongArch::PseudoXVBNZ_D:
4458 CondOpc = LoongArch::XVSETALLNEZ_D;
4473 F->insert(It, FalseBB);
4474 F->insert(It, TrueBB);
4475 F->insert(It, SinkBB);
4478 SinkBB->
splice(SinkBB->
end(), BB, std::next(
MI.getIterator()), BB->
end());
4482 Register FCC =
MRI.createVirtualRegister(&LoongArch::CFRRegClass);
4491 Register RD1 =
MRI.createVirtualRegister(&LoongArch::GPRRegClass);
4499 Register RD2 =
MRI.createVirtualRegister(&LoongArch::GPRRegClass);
4507 MI.getOperand(0).getReg())
4514 MI.eraseFromParent();
4523 switch (
MI.getOpcode()) {
4526 case LoongArch::PseudoXVINSGR2VR_B:
4528 InsOp = LoongArch::VINSGR2VR_B;
4530 case LoongArch::PseudoXVINSGR2VR_H:
4532 InsOp = LoongArch::VINSGR2VR_H;
4544 unsigned Idx =
MI.getOperand(3).getImm();
4547 if (
Idx >= HalfSize) {
4548 ScratchReg1 =
MRI.createVirtualRegister(RC);
4549 BuildMI(*BB,
MI,
DL,
TII->get(LoongArch::XVPERMI_Q), ScratchReg1)
4555 Register ScratchSubReg1 =
MRI.createVirtualRegister(SubRC);
4556 Register ScratchSubReg2 =
MRI.createVirtualRegister(SubRC);
4558 .
addReg(ScratchReg1, 0, LoongArch::sub_128);
4565 if (
Idx >= HalfSize)
4566 ScratchReg2 =
MRI.createVirtualRegister(RC);
4568 BuildMI(*BB,
MI,
DL,
TII->get(LoongArch::SUBREG_TO_REG), ScratchReg2)
4571 .
addImm(LoongArch::sub_128);
4573 if (
Idx >= HalfSize)
4579 MI.eraseFromParent();
4586 assert(Subtarget.hasExtLSX());
4593 Register ScratchReg1 =
MRI.createVirtualRegister(RC);
4594 Register ScratchReg2 =
MRI.createVirtualRegister(RC);
4595 Register ScratchReg3 =
MRI.createVirtualRegister(RC);
4599 TII->get(Subtarget.
is64Bit() ? LoongArch::VINSGR2VR_D
4600 : LoongArch::VINSGR2VR_W),
4607 TII->get(Subtarget.
is64Bit() ? LoongArch::VPCNT_D : LoongArch::VPCNT_W),
4611 TII->get(Subtarget.
is64Bit() ? LoongArch::VPICKVE2GR_D
4612 : LoongArch::VPICKVE2GR_W),
4617 MI.eraseFromParent();
4626 switch (
MI.getOpcode()) {
4629 case LoongArch::DIV_W:
4630 case LoongArch::DIV_WU:
4631 case LoongArch::MOD_W:
4632 case LoongArch::MOD_WU:
4633 case LoongArch::DIV_D:
4634 case LoongArch::DIV_DU:
4635 case LoongArch::MOD_D:
4636 case LoongArch::MOD_DU:
4639 case LoongArch::WRFCSR: {
4641 LoongArch::FCSR0 +
MI.getOperand(0).getImm())
4642 .
addReg(
MI.getOperand(1).getReg());
4643 MI.eraseFromParent();
4646 case LoongArch::RDFCSR: {
4649 MI.getOperand(0).getReg())
4650 .
addReg(LoongArch::FCSR0 +
MI.getOperand(1).getImm());
4652 MI.eraseFromParent();
4655 case LoongArch::PseudoVBZ:
4656 case LoongArch::PseudoVBZ_B:
4657 case LoongArch::PseudoVBZ_H:
4658 case LoongArch::PseudoVBZ_W:
4659 case LoongArch::PseudoVBZ_D:
4660 case LoongArch::PseudoVBNZ:
4661 case LoongArch::PseudoVBNZ_B:
4662 case LoongArch::PseudoVBNZ_H:
4663 case LoongArch::PseudoVBNZ_W:
4664 case LoongArch::PseudoVBNZ_D:
4665 case LoongArch::PseudoXVBZ:
4666 case LoongArch::PseudoXVBZ_B:
4667 case LoongArch::PseudoXVBZ_H:
4668 case LoongArch::PseudoXVBZ_W:
4669 case LoongArch::PseudoXVBZ_D:
4670 case LoongArch::PseudoXVBNZ:
4671 case LoongArch::PseudoXVBNZ_B:
4672 case LoongArch::PseudoXVBNZ_H:
4673 case LoongArch::PseudoXVBNZ_W:
4674 case LoongArch::PseudoXVBNZ_D:
4676 case LoongArch::PseudoXVINSGR2VR_B:
4677 case LoongArch::PseudoXVINSGR2VR_H:
4679 case LoongArch::PseudoCTPOP:
4681 case TargetOpcode::STATEPOINT:
4687 MI.addOperand(*
MI.getMF(),
4689 LoongArch::R1,
true,
4700 unsigned *
Fast)
const {
4701 if (!Subtarget.hasUAL())
4715#define NODE_NAME_CASE(node) \
4716 case LoongArchISD::node: \
4717 return "LoongArchISD::" #node;
4797#undef NODE_NAME_CASE
4810 LoongArch::R7, LoongArch::R8, LoongArch::R9,
4811 LoongArch::R10, LoongArch::R11};
4815 LoongArch::F3, LoongArch::F4, LoongArch::F5,
4816 LoongArch::F6, LoongArch::F7};
4819 LoongArch::F0_64, LoongArch::F1_64, LoongArch::F2_64, LoongArch::F3_64,
4820 LoongArch::F4_64, LoongArch::F5_64, LoongArch::F6_64, LoongArch::F7_64};
4823 LoongArch::VR3, LoongArch::VR4, LoongArch::VR5,
4824 LoongArch::VR6, LoongArch::VR7};
4827 LoongArch::XR3, LoongArch::XR4, LoongArch::XR5,
4828 LoongArch::XR6, LoongArch::XR7};
4834 unsigned ValNo2,
MVT ValVT2,
MVT LocVT2,
4836 unsigned GRLenInBytes = GRLen / 8;
4869 unsigned ValNo,
MVT ValVT,
4871 CCState &State,
bool IsFixed,
bool IsRet,
4873 unsigned GRLen =
DL.getLargestLegalIntTypeSizeInBits();
4874 assert((GRLen == 32 || GRLen == 64) &&
"Unspport GRLen");
4875 MVT GRLenVT = GRLen == 32 ? MVT::i32 : MVT::i64;
4880 if (IsRet && ValNo > 1)
4884 bool UseGPRForFloat =
true;
4894 UseGPRForFloat = !IsFixed;
4903 UseGPRForFloat =
true;
4905 if (UseGPRForFloat && ValVT == MVT::f32) {
4908 }
else if (UseGPRForFloat && GRLen == 64 && ValVT == MVT::f64) {
4911 }
else if (UseGPRForFloat && GRLen == 32 && ValVT == MVT::f64) {
4922 unsigned TwoGRLenInBytes = (2 * GRLen) / 8;
4924 DL.getTypeAllocSize(OrigTy) == TwoGRLenInBytes) {
4927 if (RegIdx != std::size(
ArgGPRs) && RegIdx % 2 == 1)
4936 "PendingLocs and PendingArgFlags out of sync");
4954 PendingLocs.
size() <= 2) {
4955 assert(PendingLocs.
size() == 2 &&
"Unexpected PendingLocs.size()");
4960 PendingLocs.
clear();
4961 PendingArgFlags.
clear();
4968 unsigned StoreSizeBytes = GRLen / 8;
4971 if (ValVT == MVT::f32 && !UseGPRForFloat)
4973 else if (ValVT == MVT::f64 && !UseGPRForFloat)
4987 if (!PendingLocs.
empty()) {
4989 assert(PendingLocs.
size() > 2 &&
"Unexpected PendingLocs.size()");
4990 for (
auto &It : PendingLocs) {
4992 It.convertToReg(Reg);
4997 PendingLocs.clear();
4998 PendingArgFlags.
clear();
5001 assert((!UseGPRForFloat || LocVT == GRLenVT) &&
5002 "Expected an GRLenVT at this stage");
5019void LoongArchTargetLowering::analyzeInputArgs(
5022 LoongArchCCAssignFn Fn)
const {
5024 for (
unsigned i = 0, e =
Ins.size(); i != e; ++i) {
5026 Type *ArgTy =
nullptr;
5028 ArgTy = FType->getReturnType();
5029 else if (Ins[i].isOrigArg())
5030 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
5034 CCInfo,
true, IsRet, ArgTy)) {
5035 LLVM_DEBUG(
dbgs() <<
"InputArg #" << i <<
" has unhandled type " << ArgVT
5042void LoongArchTargetLowering::analyzeOutputArgs(
5045 CallLoweringInfo *CLI, LoongArchCCAssignFn Fn)
const {
5046 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
5047 MVT ArgVT = Outs[i].VT;
5048 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty :
nullptr;
5052 CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) {
5053 LLVM_DEBUG(
dbgs() <<
"OutputArg #" << i <<
" has unhandled type " << ArgVT
5094 if (In.isOrigArg()) {
5099 if ((
BitWidth <= 32 && In.Flags.isSExt()) ||
5100 (
BitWidth < 32 && In.Flags.isZExt())) {
5160 if (LocVT == MVT::i32 || LocVT == MVT::i64) {
5164 LoongArch::R23, LoongArch::R24, LoongArch::R25,
5165 LoongArch::R26, LoongArch::R27, LoongArch::R28,
5166 LoongArch::R29, LoongArch::R30, LoongArch::R31};
5173 if (LocVT == MVT::f32) {
5176 static const MCPhysReg FPR32List[] = {LoongArch::F24, LoongArch::F25,
5177 LoongArch::F26, LoongArch::F27};
5184 if (LocVT == MVT::f64) {
5187 static const MCPhysReg FPR64List[] = {LoongArch::F28_64, LoongArch::F29_64,
5188 LoongArch::F30_64, LoongArch::F31_64};
5217 "GHC calling convention requires the F and D extensions");
5222 unsigned GRLenInBytes = Subtarget.
getGRLen() / 8;
5224 std::vector<SDValue> OutChains;
5233 analyzeInputArgs(MF, CCInfo, Ins,
false,
CC_LoongArch);
5235 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
5247 unsigned ArgIndex = Ins[i].OrigArgIndex;
5248 unsigned ArgPartOffset = Ins[i].PartOffset;
5249 assert(ArgPartOffset == 0);
5250 while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
5252 unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
5275 int VaArgOffset, VarArgsSaveSize;
5281 VarArgsSaveSize = 0;
5283 VarArgsSaveSize = GRLenInBytes * (ArgRegs.
size() -
Idx);
5284 VaArgOffset = -VarArgsSaveSize;
5290 LoongArchFI->setVarArgsFrameIndex(FI);
5298 VarArgsSaveSize += GRLenInBytes;
5303 for (
unsigned I =
Idx;
I < ArgRegs.
size();
5304 ++
I, VaArgOffset += GRLenInBytes) {
5312 cast<StoreSDNode>(Store.getNode())
5314 ->setValue((
Value *)
nullptr);
5315 OutChains.push_back(Store);
5317 LoongArchFI->setVarArgsSaveSize(VarArgsSaveSize);
5322 if (!OutChains.empty()) {
5323 OutChains.push_back(Chain);
5338 if (
N->getNumValues() != 1)
5340 if (!
N->hasNUsesOfValue(1, 0))
5343 SDNode *Copy = *
N->user_begin();
5349 if (Copy->getGluedNode())
5353 bool HasRet =
false;
5354 for (
SDNode *Node : Copy->users()) {
5363 Chain = Copy->getOperand(0);
5368bool LoongArchTargetLowering::isEligibleForTailCallOptimization(
5372 auto CalleeCC = CLI.CallConv;
5373 auto &Outs = CLI.Outs;
5375 auto CallerCC = Caller.getCallingConv();
5382 for (
auto &VA : ArgLocs)
5388 auto IsCallerStructRet = Caller.hasStructRetAttr();
5389 auto IsCalleeStructRet = Outs.
empty() ?
false : Outs[0].Flags.isSRet();
5390 if (IsCallerStructRet || IsCalleeStructRet)
5394 for (
auto &Arg : Outs)
5395 if (Arg.Flags.isByVal())
5400 const uint32_t *CallerPreserved =
TRI->getCallPreservedMask(MF, CallerCC);
5401 if (CalleeCC != CallerCC) {
5402 const uint32_t *CalleePreserved =
TRI->getCallPreservedMask(MF, CalleeCC);
5403 if (!
TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
5441 analyzeOutputArgs(MF, ArgCCInfo, Outs,
false, &CLI,
CC_LoongArch);
5445 IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
5451 "site marked musttail");
5458 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
5460 if (!Flags.isByVal())
5464 unsigned Size = Flags.getByValSize();
5465 Align Alignment = Flags.getNonZeroByValAlign();
5472 Chain = DAG.
getMemcpy(Chain,
DL, FIPtr, Arg, SizeNode, Alignment,
5474 false,
nullptr, std::nullopt,
5486 for (
unsigned i = 0, j = 0, e = ArgLocs.
size(); i != e; ++i) {
5488 SDValue ArgValue = OutVals[i];
5501 unsigned ArgIndex = Outs[i].OrigArgIndex;
5502 unsigned ArgPartOffset = Outs[i].PartOffset;
5503 assert(ArgPartOffset == 0);
5508 while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
5509 SDValue PartValue = OutVals[i + 1];
5510 unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
5520 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
5524 for (
const auto &Part : Parts) {
5525 SDValue PartValue = Part.first;
5526 SDValue PartOffset = Part.second;
5533 ArgValue = SpillSlot;
5539 if (Flags.isByVal())
5540 ArgValue = ByValArgs[j++];
5547 assert(!IsTailCall &&
"Tail call not allowed if stack is used "
5548 "for passing parameters");
5551 if (!StackPtr.getNode())
5564 if (!MemOpChains.
empty())
5570 for (
auto &Reg : RegsToPass) {
5571 Chain = DAG.
getCopyToReg(Chain,
DL, Reg.first, Reg.second, Glue);
5598 for (
auto &Reg : RegsToPass)
5604 const uint32_t *Mask =
TRI->getCallPreservedMask(MF, CallConv);
5605 assert(Mask &&
"Missing call preserved mask for calling convention");
5623 assert(Subtarget.
is64Bit() &&
"Medium code model requires LA64");
5627 assert(Subtarget.
is64Bit() &&
"Large code model requires LA64");
5650 analyzeInputArgs(MF, RetCCInfo, Ins,
true,
CC_LoongArch);
5653 for (
auto &VA : RVLocs) {
5673 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
5675 for (
unsigned i = 0, e = Outs.
size(); i != e; ++i) {
5679 Outs[i].Flags, CCInfo,
true,
true,
5706 for (
unsigned i = 0, e = RVLocs.
size(); i < e; ++i) {
5730 if (!Subtarget.hasExtLSX())
5733 if (VT == MVT::f32) {
5734 uint64_t masked = Imm.bitcastToAPInt().getZExtValue() & 0x7e07ffff;
5735 return (masked == 0x3e000000 || masked == 0x40000000);
5738 if (VT == MVT::f64) {
5739 uint64_t masked = Imm.bitcastToAPInt().getZExtValue() & 0x7fc0ffffffffffff;
5740 return (masked == 0x3fc0000000000000 || masked == 0x4000000000000000);
5746bool LoongArchTargetLowering::isFPImmLegal(
const APFloat &Imm,
EVT VT,
5747 bool ForCodeSize)
const {
5749 if (VT == MVT::f32 && !Subtarget.hasBasicF())
5751 if (VT == MVT::f64 && !Subtarget.hasBasicD())
5753 return (Imm.isZero() || Imm.isExactlyValue(1.0) ||
isFPImmVLDILegal(Imm, VT));
5764bool LoongArchTargetLowering::shouldInsertFencesForAtomic(
5767 return isa<LoadInst>(
I) || isa<StoreInst>(
I);
5769 if (isa<LoadInst>(
I))
5774 Type *Ty =
I->getOperand(0)->getType();
5793 return Y.getValueType().isScalarInteger() && !isa<ConstantSDNode>(
Y);
5799 unsigned Intrinsic)
const {
5800 switch (Intrinsic) {
5803 case Intrinsic::loongarch_masked_atomicrmw_xchg_i32:
5804 case Intrinsic::loongarch_masked_atomicrmw_add_i32:
5805 case Intrinsic::loongarch_masked_atomicrmw_sub_i32:
5806 case Intrinsic::loongarch_masked_atomicrmw_nand_i32:
5808 Info.memVT = MVT::i32;
5809 Info.ptrVal =
I.getArgOperand(0);
5828 "Unable to expand");
5829 unsigned MinWordSize = 4;
5842 Intrinsic::ptrmask, {PtrTy, IntTy},
5843 {
Addr, ConstantInt::get(IntTy, ~(
uint64_t)(MinWordSize - 1))},
nullptr,
5847 Value *PtrLSB = Builder.
CreateAnd(AddrInt, MinWordSize - 1,
"PtrLSB");
5849 ShiftAmt = Builder.
CreateTrunc(ShiftAmt, WordType,
"ShiftAmt");
5851 ConstantInt::get(WordType,
5855 Value *ValOperand_Shifted =
5857 ShiftAmt,
"ValOperand_Shifted");
5860 NewOperand = Builder.
CreateOr(ValOperand_Shifted, Inv_Mask,
"AndOperand");
5862 NewOperand = ValOperand_Shifted;
5888 if (Subtarget.hasLAM_BH() && Subtarget.
is64Bit() &&
5896 if (Subtarget.hasLAMCAS()) {
5918 return Intrinsic::loongarch_masked_atomicrmw_xchg_i64;
5920 return Intrinsic::loongarch_masked_atomicrmw_add_i64;
5922 return Intrinsic::loongarch_masked_atomicrmw_sub_i64;
5924 return Intrinsic::loongarch_masked_atomicrmw_nand_i64;
5926 return Intrinsic::loongarch_masked_atomicrmw_umax_i64;
5928 return Intrinsic::loongarch_masked_atomicrmw_umin_i64;
5930 return Intrinsic::loongarch_masked_atomicrmw_max_i64;
5932 return Intrinsic::loongarch_masked_atomicrmw_min_i64;
5942 return Intrinsic::loongarch_masked_atomicrmw_xchg_i32;
5944 return Intrinsic::loongarch_masked_atomicrmw_add_i32;
5946 return Intrinsic::loongarch_masked_atomicrmw_sub_i32;
5948 return Intrinsic::loongarch_masked_atomicrmw_nand_i32;
5960 if (Subtarget.hasLAMCAS())
5973 Value *FailureOrdering =
5977 Intrinsic::ID CmpXchgIntrID = Intrinsic::loongarch_masked_cmpxchg_i64;
5983 CmpXchgIntrID, Tys, {AlignedAddr, CmpVal, NewVal, Mask, FailureOrdering});
6007 unsigned GRLen = Subtarget.
getGRLen();
6036 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
6039 Builder.
CreateCall(LlwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
6066 const Constant *PersonalityFn)
const {
6067 return LoongArch::R4;
6071 const Constant *PersonalityFn)
const {
6072 return LoongArch::R5;
6083 int RefinementSteps = VT.
getScalarType() == MVT::f64 ? 2 : 1;
6084 return RefinementSteps;
6089 int &RefinementSteps,
6090 bool &UseOneConstNR,
6091 bool Reciprocal)
const {
6092 if (Subtarget.hasFrecipe()) {
6096 if (VT == MVT::f32 || (VT == MVT::f64 && Subtarget.hasBasicD()) ||
6097 (VT == MVT::v4f32 && Subtarget.hasExtLSX()) ||
6098 (VT == MVT::v2f64 && Subtarget.hasExtLSX()) ||
6099 (VT == MVT::v8f32 && Subtarget.hasExtLASX()) ||
6100 (VT == MVT::v4f64 && Subtarget.hasExtLASX())) {
6119 int &RefinementSteps)
const {
6120 if (Subtarget.hasFrecipe()) {
6124 if (VT == MVT::f32 || (VT == MVT::f64 && Subtarget.hasBasicD()) ||
6125 (VT == MVT::v4f32 && Subtarget.hasExtLSX()) ||
6126 (VT == MVT::v2f64 && Subtarget.hasExtLSX()) ||
6127 (VT == MVT::v8f32 && Subtarget.hasExtLASX()) ||
6128 (VT == MVT::v4f64 && Subtarget.hasExtLASX())) {
6145LoongArchTargetLowering::getConstraintType(
StringRef Constraint)
const {
6163 if (Constraint.
size() == 1) {
6164 switch (Constraint[0]) {
6179 if (Constraint ==
"ZC" || Constraint ==
"ZB")
6195std::pair<unsigned, const TargetRegisterClass *>
6196LoongArchTargetLowering::getRegForInlineAsmConstraint(
6200 if (Constraint.
size() == 1) {
6201 switch (Constraint[0]) {
6206 return std::make_pair(0U, &LoongArch::GPRRegClass);
6208 if (Subtarget.hasBasicF() && VT == MVT::f32)
6209 return std::make_pair(0U, &LoongArch::FPR32RegClass);
6210 if (Subtarget.hasBasicD() && VT == MVT::f64)
6211 return std::make_pair(0U, &LoongArch::FPR64RegClass);
6212 if (Subtarget.hasExtLSX() &&
6213 TRI->isTypeLegalForClass(LoongArch::LSX128RegClass, VT))
6214 return std::make_pair(0U, &LoongArch::LSX128RegClass);
6215 if (Subtarget.hasExtLASX() &&
6216 TRI->isTypeLegalForClass(LoongArch::LASX256RegClass, VT))
6217 return std::make_pair(0U, &LoongArch::LASX256RegClass);
6237 bool IsFP = Constraint[2] ==
'f';
6238 std::pair<StringRef, StringRef> Temp = Constraint.
split(
'$');
6239 std::pair<unsigned, const TargetRegisterClass *>
R;
6241 TRI, join_items(
"", Temp.first, Temp.second), VT);
6244 unsigned RegNo =
R.first;
6245 if (LoongArch::F0 <= RegNo && RegNo <= LoongArch::F31) {
6246 if (Subtarget.hasBasicD() && (VT == MVT::f64 || VT == MVT::Other)) {
6247 unsigned DReg = RegNo - LoongArch::F0 + LoongArch::F0_64;
6248 return std::make_pair(DReg, &LoongArch::FPR64RegClass);
6258void LoongArchTargetLowering::LowerAsmOperandForConstraint(
6262 if (Constraint.
size() == 1) {
6263 switch (Constraint[0]) {
6266 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op)) {
6268 if (isInt<16>(CVal))
6275 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op)) {
6277 if (isInt<12>(CVal))
6284 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
6285 if (
C->getZExtValue() == 0)
6291 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op)) {
6293 if (isUInt<12>(CVal))
6305#define GET_REGISTER_MATCHER
6306#include "LoongArchGenAsmMatcher.inc"
6312 std::string NewRegName =
Name.second.str();
6314 if (Reg == LoongArch::NoRegister)
6316 if (Reg == LoongArch::NoRegister)
6320 if (!ReservedRegs.
test(Reg))
6336 if (
auto *ConstNode = dyn_cast<ConstantSDNode>(
C.getNode())) {
6337 const APInt &Imm = ConstNode->getAPIntValue();
6339 if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
6340 (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
6343 if (ConstNode->hasOneUse() &&
6344 ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
6345 (Imm - 8).isPowerOf2() || (Imm - 16).isPowerOf2()))
6351 if (ConstNode->hasOneUse() && !(Imm.sge(-2048) && Imm.sle(4095))) {
6352 unsigned Shifts = Imm.countr_zero();
6358 APInt ImmPop = Imm.ashr(Shifts);
6359 if (ImmPop == 3 || ImmPop == 5 || ImmPop == 9 || ImmPop == 17)
6363 APInt ImmSmall =
APInt(Imm.getBitWidth(), 1ULL << Shifts,
true);
6364 if ((Imm - ImmSmall).isPowerOf2() || (Imm + ImmSmall).isPowerOf2() ||
6365 (ImmSmall - Imm).isPowerOf2())
6375 Type *Ty,
unsigned AS,
6391 !(isShiftedInt<14, 2>(AM.
BaseOffs) && Subtarget.hasUAL()))
6418 return isInt<12>(Imm);
6422 return isInt<12>(Imm);
6429 if (
auto *LD = dyn_cast<LoadSDNode>(Val)) {
6430 EVT MemVT = LD->getMemoryVT();
6431 if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
6442 return Subtarget.
is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
6451 if (
Y.getValueType().isVector())
6454 return !isa<ConstantSDNode>(
Y);
6463 Type *Ty,
bool IsSigned)
const {
6484 Align &PrefAlign)
const {
6485 if (!isa<MemIntrinsic>(CI))
6490 PrefAlign =
Align(8);
6493 PrefAlign =
Align(4);
unsigned const MachineRegisterInfo * MRI
static MCRegister MatchRegisterName(StringRef Name)
static bool checkValueWidth(SDValue V, unsigned width, ISD::LoadExtType &ExtType)
static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget, const AArch64TargetLowering &TLI)
static SDValue performANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue performSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
#define NODE_NAME_CASE(node)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static MCRegister MatchRegisterAltName(StringRef Name)
Maps from the set of all alternative registernames to a register number.
Function Alias Analysis Results
static uint64_t getConstant(const Value *IndexValue)
static SDValue getTargetNode(GlobalAddressSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
Analysis containing CSE Info
static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue unpackFromRegLoc(const CSKYSubtarget &Subtarget, SelectionDAG &DAG, SDValue Chain, const CCValAssign &VA, const SDLoc &DL)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
static SDValue performINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLE_VREPLVEI(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VREPLVEI (if possible).
const MCPhysReg ArgFPR32s[]
static SDValue lowerVECTOR_SHUFFLE_VSHUF4I(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VSHUF4I (if possible).
static SDValue lowerVECTOR_SHUFFLE_VPICKEV(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VPICKEV (if possible).
static SDValue lowerVECTOR_SHUFFLE_XVPICKOD(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVPICKOD (if possible).
static bool fitsRegularPattern(typename SmallVectorImpl< ValType >::const_iterator Begin, unsigned CheckStride, typename SmallVectorImpl< ValType >::const_iterator End, ValType ExpectedIndex, unsigned ExpectedIndexStride)
Determine whether a range fits a regular pattern of values.
static void canonicalizeShuffleVectorByLane(const SDLoc &DL, MutableArrayRef< int > Mask, MVT VT, SDValue &V1, SDValue &V2, SelectionDAG &DAG)
Shuffle vectors by lane to generate more optimized instructions.
static SDValue emitIntrinsicErrorMessage(SDValue Op, StringRef ErrorMsg, SelectionDAG &DAG)
static cl::opt< bool > ZeroDivCheck("loongarch-check-zero-division", cl::Hidden, cl::desc("Trap on integer division by zero."), cl::init(false))
static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Dispatching routine to lower various 256-bit LoongArch vector shuffles.
static int getEstimateRefinementSteps(EVT VT, const LoongArchSubtarget &Subtarget)
static void emitErrorAndReplaceIntrinsicResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, StringRef ErrorMsg, bool WithChain=true)
static SDValue checkIntrinsicImmArg(SDValue Op, unsigned ImmOp, SelectionDAG &DAG, bool IsSigned=false)
static SDValue lowerVECTOR_SHUFFLE_XVSHUF4I(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVSHUF4I (if possible).
static SDValue lowerVECTOR_SHUFFLE_VILVH(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VILVH (if possible).
static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG)
static bool CC_LoongArch(const DataLayout &DL, LoongArchABI::ABI ABI, unsigned ValNo, MVT ValVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, bool IsRet, Type *OrigTy)
static SDValue performSRLCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
static SDValue lowerVectorBitSetImm(SDNode *Node, SelectionDAG &DAG)
static SDValue lowerVECTOR_SHUFFLE_XVPACKOD(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVPACKOD (if possible).
#define CRC_CASE_EXT_BINARYOP(NAME, NODE)
static SDValue lowerVectorBitRevImm(SDNode *Node, SelectionDAG &DAG)
static SDValue truncateVecElts(SDNode *Node, SelectionDAG &DAG)
static MachineBasicBlock * insertDivByZeroTrap(MachineInstr &MI, MachineBasicBlock *MBB)
static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG)
static SDValue lowerVectorBitClear(SDNode *Node, SelectionDAG &DAG)
static bool CC_LoongArch_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
static SDValue lowerVECTOR_SHUFFLE_VPACKEV(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VPACKEV (if possible).
static void replaceVPICKVE2GRResults(SDNode *Node, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, unsigned ResOp)
static SDValue legalizeIntrinsicImmArg(SDNode *Node, unsigned ImmOp, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, bool IsSigned=false)
static SDValue emitIntrinsicWithChainErrorMessage(SDValue Op, StringRef ErrorMsg, SelectionDAG &DAG)
static bool CC_LoongArchAssign2GRLen(unsigned GRLen, CCState &State, CCValAssign VA1, ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, MVT ValVT2, MVT LocVT2, ISD::ArgFlagsTy ArgFlags2)
const MCPhysReg ArgFPR64s[]
static MachineBasicBlock * emitPseudoCTPOP(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
#define IOCSRWR_CASE(NAME, NODE)
#define CRC_CASE_EXT_UNARYOP(NAME, NODE)
static SDValue lowerVECTOR_SHUFFLE_VPACKOD(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VPACKOD (if possible).
static MachineBasicBlock * emitPseudoXVINSGR2VR(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue lowerVectorSplatImm(SDNode *Node, unsigned ImmOp, SelectionDAG &DAG, bool IsSigned=false)
const MCPhysReg ArgGPRs[]
static SDValue lowerVECTOR_SHUFFLE_XVILVL(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVILVL (if possible).
static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG, int NumOp, unsigned ExtOpc=ISD::ANY_EXTEND)
static void replaceVecCondBranchResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget, unsigned ResOp)
#define ASRT_LE_GT_CASE(NAME)
static bool isConstantOrUndef(const SDValue Op)
static SDValue lowerVECTOR_SHUFFLE_XVPACKEV(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVPACKEV (if possible).
static MachineBasicBlock * emitVecCondBranchPseudo(MachineInstr &MI, MachineBasicBlock *BB, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLE_XVILVH(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVILVH (if possible).
static SDValue lowerVECTOR_SHUFFLE_XVSHUF(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVSHUF (if possible).
static SDValue performBITREV_WCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const LoongArchSubtarget &Subtarget)
#define IOCSRRD_CASE(NAME, NODE)
static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Dispatching routine to lower various 128-bit LoongArch vector shuffles.
static SDValue lowerVECTOR_SHUFFLE_XVPICKEV(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVPICKEV (if possible).
static SDValue lowerVECTOR_SHUFFLE_VILVL(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VILVL (if possible).
static SDValue lowerVectorBitClearImm(SDNode *Node, SelectionDAG &DAG)
static bool isConstantOrUndefBUILD_VECTOR(const BuildVectorSDNode *Op)
static void replaceINTRINSIC_WO_CHAINResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const LoongArchSubtarget &Subtarget)
static SDValue lowerVECTOR_SHUFFLE_XVREPLVEI(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into XVREPLVEI (if possible).
static SDValue lowerVECTOR_SHUFFLE_VPICKOD(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VPICKOD (if possible).
static Intrinsic::ID getIntrinsicForMaskedAtomicRMWBinOp(unsigned GRLen, AtomicRMWInst::BinOp BinOp)
static SDValue lowerVECTOR_SHUFFLE_VSHUF(const SDLoc &DL, ArrayRef< int > Mask, MVT VT, SDValue V1, SDValue V2, SelectionDAG &DAG)
Lower VECTOR_SHUFFLE into VSHUF.
static LoongArchISD::NodeType getLoongArchWOpcode(unsigned Opcode)
unsigned const TargetRegisterInfo * TRI
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Class for arbitrary precision integers.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
An instruction that atomically checks whether a specified value is in a memory location,...
Value * getCompareOperand()
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
an instruction that atomically reads a memory location, combines it with another value,...
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ USubCond
Subtract only if no unsigned overflow.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ UMax
*p = old >unsigned v ? old : v
@ UDecWrap
Decrement one until a minimum value or zero.
Value * getPointerOperand()
bool isFloatingPointOperation() const
BinOp getOperation() const
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
LLVM Basic Block Representation.
bool test(unsigned Idx) const
A "pseudo-class" with methods for operating on BUILD_VECTORs.
CCState - This class holds information needed while lowering arguments and return values.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
SmallVectorImpl< ISD::ArgFlagsTy > & getPendingArgFlags()
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
SmallVectorImpl< CCValAssign > & getPendingLocs()
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
void addLoc(const CCValAssign &V)
CCValAssign - Represent assignment of one arg/retval to a location.
static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT, LocInfo HTP, unsigned ExtraInfo=0)
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
int64_t getLocMemOffset() const
unsigned getValNo() const
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
uint64_t getZExtValue() const
int64_t getSExtValue() const
This is an important base class in LLVM.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
FunctionType * getFunctionType() const
Returns the FunctionType for me.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Argument * getArg(unsigned i) const
Common base class shared among various IRBuilders.
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
LLVMContext & getContext() const
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
AtomicRMWInst * CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val, MaybeAlign Align, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Class to represent integer types.
This is an important class for using LLVM in a threaded context.
void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
This class is used to represent ISD::LOAD nodes.
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
LoongArchMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private Lo...
void addSExt32Register(Register Reg)
const LoongArchRegisterInfo * getRegisterInfo() const override
const LoongArchInstrInfo * getInstrInfo() const override
unsigned getMaxBytesForAlignment() const
Align getPrefFunctionAlignment() const
unsigned getGRLen() const
Align getPrefLoopAlignment() const
bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override
Return true if result of the specified node is used by a return node only.
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &RefinementSteps, bool &UseOneConstNR, bool Reciprocal) const override
Hooks for building estimates in place of slower divisions and square roots.
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
TargetLowering::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
Value * emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const override
Perform a masked cmpxchg using a target-specific intrinsic.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ValueType of the result of SETCC operations.
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const override
Return true if it is profitable to transform an integer multiplication-by-constant into simpler opera...
bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
TargetLowering::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
Determine if the target supports unaligned memory accesses.
bool isCheapToSpeculateCtlz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, Align &PrefAlign) const override
Return true if the pointer arguments to CI should be aligned by aligning the object whose address is ...
Value * emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const override
Perform a masked atomicrmw using a target-specific intrinsic.
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
bool signExtendConstant(const ConstantInt *CI) const override
Return true if this constant should be sign extended when promoting to a larger type.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
bool isCheapToSpeculateCttz(Type *Ty) const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
bool shouldSignExtendTypeInLibCall(Type *Ty, bool IsSigned) const override
Returns true if arguments should be sign-extended in lib calls.
bool isFPImmVLDILegal(const APFloat &Imm, EVT VT) const
bool shouldExtendTypeInLibCall(EVT Type) const override
Returns true if arguments should be extended in lib calls.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
void emitExpandAtomicRMW(AtomicRMWInst *AI) const override
Perform a atomicrmw expansion using a target-specific way.
ISD::NodeType getExtendForAtomicCmpSwapArg() const override
Returns how the platform's atomic compare and swap expects its comparison value to be extended (ZERO_...
LoongArchTargetLowering(const TargetMachine &TM, const LoongArchSubtarget &STI)
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
bool hasAndNotCompare(SDValue Y) const override
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) !...
SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &RefinementSteps) const override
Return a reciprocal estimate value for the input operand.
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
Wrapper class representing physical registers. Should be passed by value.
bool hasFeature(unsigned Feature) const
bool is128BitVector() const
Return true if this is a 128-bit vector type.
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
static auto fixedlen_vector_valuetypes()
bool is256BitVector() const
Return true if this is a 256-bit vector type.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
MVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
void setIsKill(bool Val=true)
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
EVT getMemoryVT() const
Return the type of the in-memory value.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Class to represent pointers.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
size_t use_size() const
Return the number of uses of this node.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
uint64_t getScalarValueSizeInBits() const
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getRegister(Register Reg, EVT VT)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV)
Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to the shuffle node in input but with swa...
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
MachineFunction & getMachineFunction() const
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
LLVMContext * getContext() const
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
ArrayRef< int > getMask() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
typename SuperClass::const_iterator const_iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
StringRef - Represent a constant reference to a string, i.e.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
void setMaxBytesForAlignment(unsigned MaxBytes)
void setPrefLoopAlignment(Align Alignment)
Set the target's preferred loop alignment.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
std::vector< ArgListEntry > ArgListTy
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
bool useTLSDESC() const
Returns true if this target uses TLS Descriptors.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
bool shouldAssumeDSOLocal(const GlobalValue *GV) const
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
This class is used to represent EVT's, which are used to parameterize some operations.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ MEMBARRIER
MEMBARRIER - Compiler barrier only; generate a no-op.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ BR_JT
BR_JT - Jumptable branch.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
ABI getTargetABI(StringRef ABIName)
Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
AtomicOrdering
Atomic ordering for LLVM's memory model.
unsigned getKillRegState(bool B)
DWARFExpression::Operation Op
constexpr unsigned BitWidth
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool is256BitVector() const
Return true if this is a 256-bit vector type.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Align getNonZeroOrigAlign() const
Register getFrameRegister(const MachineFunction &MF) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
bool isBeforeLegalizeOps() const
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setTypeListBeforeSoften(ArrayRef< EVT > OpsVT, EVT RetVT, bool Value=true)