20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Intrinsics.h"
23 #include "llvm/IR/Operator.h"
25 using namespace clang;
26 using namespace CodeGen;
33 uint64_t AtomicSizeInBits;
34 uint64_t ValueSizeInBits;
44 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
51 ValueTy = ATy->getValueType();
56 uint64_t ValueAlignInBits;
57 uint64_t AtomicAlignInBits;
58 TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59 ValueSizeInBits = ValueTI.
Width;
60 ValueAlignInBits = ValueTI.
Align;
62 TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63 AtomicSizeInBits = AtomicTI.
Width;
64 AtomicAlignInBits = AtomicTI.
Align;
66 assert(ValueSizeInBits <= AtomicSizeInBits);
67 assert(ValueAlignInBits <= AtomicAlignInBits);
69 AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70 ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
77 ValueSizeInBits = C.getTypeSize(ValueTy);
80 AtomicSizeInBits = C.toBits(
81 C.toCharUnitsFromBits(
Offset + OrigBFI.Size + C.getCharWidth() - 1)
85 (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.
getAlignment()) *
87 VoidPtrAddr = CGF.
Builder.CreateConstGEP1_64(
88 VoidPtrAddr, OffsetInChars.getQuantity());
89 auto Addr = CGF.
Builder.CreatePointerBitCastOrAddrSpaceCast(
91 CGF.
Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
92 "atomic_bitfield_base");
95 BFI.StorageSize = AtomicSizeInBits;
96 BFI.StorageOffset += OffsetInChars;
100 AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
101 if (AtomicTy.isNull()) {
104 C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
111 ValueSizeInBits = C.getTypeSize(ValueTy);
113 AtomicSizeInBits = C.getTypeSize(AtomicTy);
119 ValueSizeInBits = C.getTypeSize(ValueTy);
123 ->getPointerElementType()
124 ->getVectorNumElements());
125 AtomicSizeInBits = C.getTypeSize(AtomicTy);
129 UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
133 QualType getAtomicType()
const {
return AtomicTy; }
134 QualType getValueType()
const {
return ValueTy; }
135 CharUnits getAtomicAlignment()
const {
return AtomicAlign; }
136 CharUnits getValueAlignment()
const {
return ValueAlign; }
137 uint64_t getAtomicSizeInBits()
const {
return AtomicSizeInBits; }
138 uint64_t getValueSizeInBits()
const {
return ValueSizeInBits; }
140 bool shouldUseLibcall()
const {
return UseLibcall; }
141 const LValue &getAtomicLValue()
const {
return LVal; }
144 return LVal.getAddress();
145 else if (LVal.isBitField())
146 return LVal.getBitFieldAddr();
147 else if (LVal.isVectorElt())
148 return LVal.getVectorAddr();
149 assert(LVal.isExtVectorElt());
150 return LVal.getExtVectorAddr();
159 bool hasPadding()
const {
160 return (ValueSizeInBits != AtomicSizeInBits);
163 bool emitMemSetZeroIfNecessary()
const;
186 void emitCopyIntoMemory(
RValue rvalue)
const;
189 LValue projectValue()
const {
190 assert(LVal.isSimple());
193 addr = CGF.
Builder.CreateStructGEP(
nullptr, addr, 0);
202 bool AsValue, llvm::AtomicOrdering AO,
213 std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange(
215 llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
216 llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent,
217 bool IsWeak =
false);
222 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
227 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
RValue UpdateRVal,
236 translateAtomicOrdering(
const llvm::AtomicOrdering AO);
239 bool requiresMemSetZero(llvm::Type *
type)
const;
245 void EmitAtomicLoadLibcall(
llvm::Value *AddForLoaded,
246 llvm::AtomicOrdering AO,
bool IsVolatile);
248 llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO,
bool IsVolatile);
252 llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
253 llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent);
255 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
257 llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
258 llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent,
259 bool IsWeak =
false);
262 EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
266 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
270 void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
RValue UpdateRVal,
273 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
RValue UpdateRal,
279 AtomicInfo::translateAtomicOrdering(
const llvm::AtomicOrdering AO) {
281 case llvm::Unordered:
282 case llvm::NotAtomic:
283 case llvm::Monotonic:
289 case llvm::AcquireRelease:
291 case llvm::SequentiallyConsistent:
294 llvm_unreachable(
"Unhandled AtomicOrdering");
297 llvm::Value *AtomicInfo::CreateTempAlloca()
const {
299 (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
302 TempAlloca->setAlignment(getAtomicAlignment().getQuantity());
304 if (LVal.isBitField())
305 return CGF.
Builder.CreatePointerBitCastOrAddrSpaceCast(
306 TempAlloca, getAtomicAddress()->getType());
324 uint64_t expectedSize) {
325 return (CGM.
getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
331 bool AtomicInfo::requiresMemSetZero(llvm::Type *
type)
const {
333 if (hasPadding())
return true;
336 switch (getEvaluationKind()) {
343 AtomicSizeInBits / 2);
349 llvm_unreachable(
"bad evaluation kind");
352 bool AtomicInfo::emitMemSetZeroIfNecessary()
const {
353 assert(LVal.isSimple());
355 if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
359 addr, llvm::ConstantInt::get(CGF.
Int8Ty, 0),
361 LVal.getAlignment().getQuantity());
368 uint64_t Size,
unsigned Align,
369 llvm::AtomicOrdering SuccessOrder,
370 llvm::AtomicOrdering FailureOrder) {
372 llvm::LoadInst *Expected = CGF.
Builder.CreateLoad(Val1);
373 Expected->setAlignment(Align);
374 llvm::LoadInst *Desired = CGF.
Builder.CreateLoad(Val2);
375 Desired->setAlignment(Align);
377 llvm::AtomicCmpXchgInst *Pair = CGF.
Builder.CreateAtomicCmpXchg(
378 Ptr, Expected, Desired, SuccessOrder, FailureOrder);
380 Pair->setWeak(IsWeak);
389 llvm::BasicBlock *StoreExpectedBB =
394 llvm::BasicBlock *ContinueBB =
399 CGF.
Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
401 CGF.
Builder.SetInsertPoint(StoreExpectedBB);
403 llvm::StoreInst *StoreExpected = CGF.
Builder.CreateStore(Old, Val1);
404 StoreExpected->setAlignment(Align);
406 CGF.
Builder.CreateBr(ContinueBB);
408 CGF.
Builder.SetInsertPoint(ContinueBB);
422 uint64_t Size,
unsigned Align,
423 llvm::AtomicOrdering SuccessOrder) {
424 llvm::AtomicOrdering FailureOrder;
425 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
426 switch (FO->getSExtValue()) {
428 FailureOrder = llvm::Monotonic;
432 FailureOrder = llvm::Acquire;
435 FailureOrder = llvm::SequentiallyConsistent;
438 if (FailureOrder >= SuccessOrder) {
441 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
444 SuccessOrder, FailureOrder);
449 llvm::BasicBlock *MonotonicBB =
nullptr, *AcquireBB =
nullptr,
452 if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release)
454 if (SuccessOrder == llvm::SequentiallyConsistent)
459 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
466 CGF.
Builder.SetInsertPoint(MonotonicBB);
468 Size, Align, SuccessOrder, llvm::Monotonic);
472 CGF.
Builder.SetInsertPoint(AcquireBB);
474 Size, Align, SuccessOrder, llvm::Acquire);
482 CGF.
Builder.SetInsertPoint(SeqCstBB);
484 Size, Align, SuccessOrder, llvm::SequentiallyConsistent);
490 CGF.
Builder.SetInsertPoint(ContBB);
496 uint64_t Size,
unsigned Align,
497 llvm::AtomicOrdering Order) {
498 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
499 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
501 switch (E->
getOp()) {
502 case AtomicExpr::AO__c11_atomic_init:
503 llvm_unreachable(
"Already handled!");
505 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
507 FailureOrder, Size, Align, Order);
509 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
511 FailureOrder, Size, Align, Order);
513 case AtomicExpr::AO__atomic_compare_exchange:
514 case AtomicExpr::AO__atomic_compare_exchange_n: {
515 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
517 Val1, Val2, FailureOrder, Size, Align, Order);
520 llvm::BasicBlock *StrongBB =
523 llvm::BasicBlock *ContBB =
526 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(IsWeak, WeakBB);
527 SI->addCase(CGF.
Builder.getInt1(
false), StrongBB);
529 CGF.
Builder.SetInsertPoint(StrongBB);
531 FailureOrder, Size, Align, Order);
534 CGF.
Builder.SetInsertPoint(WeakBB);
536 FailureOrder, Size, Align, Order);
539 CGF.
Builder.SetInsertPoint(ContBB);
543 case AtomicExpr::AO__c11_atomic_load:
544 case AtomicExpr::AO__atomic_load_n:
545 case AtomicExpr::AO__atomic_load: {
546 llvm::LoadInst *Load = CGF.
Builder.CreateLoad(Ptr);
547 Load->setAtomic(Order);
548 Load->setAlignment(Size);
550 llvm::StoreInst *StoreDest = CGF.
Builder.CreateStore(Load, Dest);
551 StoreDest->setAlignment(Align);
555 case AtomicExpr::AO__c11_atomic_store:
556 case AtomicExpr::AO__atomic_store:
557 case AtomicExpr::AO__atomic_store_n: {
558 assert(!Dest &&
"Store does not return a value");
559 llvm::LoadInst *LoadVal1 = CGF.
Builder.CreateLoad(Val1);
560 LoadVal1->setAlignment(Align);
561 llvm::StoreInst *
Store = CGF.
Builder.CreateStore(LoadVal1, Ptr);
562 Store->setAtomic(Order);
563 Store->setAlignment(Size);
568 case AtomicExpr::AO__c11_atomic_exchange:
569 case AtomicExpr::AO__atomic_exchange_n:
570 case AtomicExpr::AO__atomic_exchange:
571 Op = llvm::AtomicRMWInst::Xchg;
574 case AtomicExpr::AO__atomic_add_fetch:
575 PostOp = llvm::Instruction::Add;
577 case AtomicExpr::AO__c11_atomic_fetch_add:
578 case AtomicExpr::AO__atomic_fetch_add:
579 Op = llvm::AtomicRMWInst::Add;
582 case AtomicExpr::AO__atomic_sub_fetch:
583 PostOp = llvm::Instruction::Sub;
585 case AtomicExpr::AO__c11_atomic_fetch_sub:
586 case AtomicExpr::AO__atomic_fetch_sub:
587 Op = llvm::AtomicRMWInst::Sub;
590 case AtomicExpr::AO__atomic_and_fetch:
593 case AtomicExpr::AO__c11_atomic_fetch_and:
594 case AtomicExpr::AO__atomic_fetch_and:
598 case AtomicExpr::AO__atomic_or_fetch:
599 PostOp = llvm::Instruction::Or;
601 case AtomicExpr::AO__c11_atomic_fetch_or:
602 case AtomicExpr::AO__atomic_fetch_or:
603 Op = llvm::AtomicRMWInst::Or;
606 case AtomicExpr::AO__atomic_xor_fetch:
607 PostOp = llvm::Instruction::Xor;
609 case AtomicExpr::AO__c11_atomic_fetch_xor:
610 case AtomicExpr::AO__atomic_fetch_xor:
611 Op = llvm::AtomicRMWInst::Xor;
614 case AtomicExpr::AO__atomic_nand_fetch:
617 case AtomicExpr::AO__atomic_fetch_nand:
618 Op = llvm::AtomicRMWInst::Nand;
622 llvm::LoadInst *LoadVal1 = CGF.
Builder.CreateLoad(Val1);
623 LoadVal1->setAlignment(Align);
624 llvm::AtomicRMWInst *RMWI =
625 CGF.
Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
632 Result = CGF.
Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
633 if (E->
getOp() == AtomicExpr::AO__atomic_nand_fetch)
634 Result = CGF.
Builder.CreateNot(Result);
635 llvm::StoreInst *StoreDest = CGF.
Builder.CreateStore(Result, Dest);
636 StoreDest->setAlignment(Align);
653 if (UseOptimizedLibcall) {
660 SizeInBits)->getPointerTo();
677 MemTy = AT->getValueType();
682 unsigned MaxInlineWidthInBits =
684 bool UseLibcall = (Size != Align ||
687 llvm::Value *IsWeak =
nullptr, *OrderFail =
nullptr, *Val1 =
nullptr,
691 if (E->
getOp() == AtomicExpr::AO__c11_atomic_init) {
692 assert(!Dest &&
"Init does not return a value");
700 switch (E->
getOp()) {
701 case AtomicExpr::AO__c11_atomic_init:
702 llvm_unreachable(
"Already handled!");
704 case AtomicExpr::AO__c11_atomic_load:
705 case AtomicExpr::AO__atomic_load_n:
708 case AtomicExpr::AO__atomic_load:
712 case AtomicExpr::AO__atomic_store:
716 case AtomicExpr::AO__atomic_exchange:
721 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
722 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
723 case AtomicExpr::AO__atomic_compare_exchange_n:
724 case AtomicExpr::AO__atomic_compare_exchange:
726 if (E->
getOp() == AtomicExpr::AO__atomic_compare_exchange)
735 case AtomicExpr::AO__c11_atomic_fetch_add:
736 case AtomicExpr::AO__c11_atomic_fetch_sub:
752 case AtomicExpr::AO__atomic_fetch_add:
753 case AtomicExpr::AO__atomic_fetch_sub:
754 case AtomicExpr::AO__atomic_add_fetch:
755 case AtomicExpr::AO__atomic_sub_fetch:
756 case AtomicExpr::AO__c11_atomic_store:
757 case AtomicExpr::AO__c11_atomic_exchange:
758 case AtomicExpr::AO__atomic_store_n:
759 case AtomicExpr::AO__atomic_exchange_n:
760 case AtomicExpr::AO__c11_atomic_fetch_and:
761 case AtomicExpr::AO__c11_atomic_fetch_or:
762 case AtomicExpr::AO__c11_atomic_fetch_xor:
763 case AtomicExpr::AO__atomic_fetch_and:
764 case AtomicExpr::AO__atomic_fetch_or:
765 case AtomicExpr::AO__atomic_fetch_xor:
766 case AtomicExpr::AO__atomic_fetch_nand:
767 case AtomicExpr::AO__atomic_and_fetch:
768 case AtomicExpr::AO__atomic_or_fetch:
769 case AtomicExpr::AO__atomic_xor_fetch:
770 case AtomicExpr::AO__atomic_nand_fetch:
786 bool UseOptimizedLibcall =
false;
787 switch (E->
getOp()) {
788 case AtomicExpr::AO__c11_atomic_fetch_add:
789 case AtomicExpr::AO__atomic_fetch_add:
790 case AtomicExpr::AO__c11_atomic_fetch_and:
791 case AtomicExpr::AO__atomic_fetch_and:
792 case AtomicExpr::AO__c11_atomic_fetch_or:
793 case AtomicExpr::AO__atomic_fetch_or:
794 case AtomicExpr::AO__c11_atomic_fetch_sub:
795 case AtomicExpr::AO__atomic_fetch_sub:
796 case AtomicExpr::AO__c11_atomic_fetch_xor:
797 case AtomicExpr::AO__atomic_fetch_xor:
799 UseOptimizedLibcall =
true;
803 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
804 UseOptimizedLibcall =
true;
809 if (!UseOptimizedLibcall) {
817 std::string LibCallName;
821 bool HaveRetTy =
false;
822 switch (E->
getOp()) {
830 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
831 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
832 case AtomicExpr::AO__atomic_compare_exchange:
833 case AtomicExpr::AO__atomic_compare_exchange_n:
834 LibCallName =
"__atomic_compare_exchange";
846 case AtomicExpr::AO__c11_atomic_exchange:
847 case AtomicExpr::AO__atomic_exchange_n:
848 case AtomicExpr::AO__atomic_exchange:
849 LibCallName =
"__atomic_exchange";
855 case AtomicExpr::AO__c11_atomic_store:
856 case AtomicExpr::AO__atomic_store:
857 case AtomicExpr::AO__atomic_store_n:
858 LibCallName =
"__atomic_store";
866 case AtomicExpr::AO__c11_atomic_load:
867 case AtomicExpr::AO__atomic_load:
868 case AtomicExpr::AO__atomic_load_n:
869 LibCallName =
"__atomic_load";
872 case AtomicExpr::AO__c11_atomic_fetch_add:
873 case AtomicExpr::AO__atomic_fetch_add:
874 LibCallName =
"__atomic_fetch_add";
879 case AtomicExpr::AO__c11_atomic_fetch_and:
880 case AtomicExpr::AO__atomic_fetch_and:
881 LibCallName =
"__atomic_fetch_and";
886 case AtomicExpr::AO__c11_atomic_fetch_or:
887 case AtomicExpr::AO__atomic_fetch_or:
888 LibCallName =
"__atomic_fetch_or";
893 case AtomicExpr::AO__c11_atomic_fetch_sub:
894 case AtomicExpr::AO__atomic_fetch_sub:
895 LibCallName =
"__atomic_fetch_sub";
900 case AtomicExpr::AO__c11_atomic_fetch_xor:
901 case AtomicExpr::AO__atomic_fetch_xor:
902 LibCallName =
"__atomic_fetch_xor";
910 if (UseOptimizedLibcall)
911 LibCallName +=
"_" + llvm::utostr(Size);
914 if (UseOptimizedLibcall) {
938 if (UseOptimizedLibcall) {
940 llvm::StoreInst *StoreDest =
Builder.CreateStore(
942 Builder.CreateBitCast(GetDest(), ResVal->getType()->getPointerTo()));
943 StoreDest->setAlignment(Align);
948 bool IsStore = E->
getOp() == AtomicExpr::AO__c11_atomic_store ||
949 E->
getOp() == AtomicExpr::AO__atomic_store ||
950 E->
getOp() == AtomicExpr::AO__atomic_store_n;
951 bool IsLoad = E->
getOp() == AtomicExpr::AO__c11_atomic_load ||
952 E->
getOp() == AtomicExpr::AO__atomic_load ||
953 E->
getOp() == AtomicExpr::AO__atomic_load_n;
959 Ptr, ITy->getPointerTo(Ptr->getType()->getPointerAddressSpace()));
960 if (Val1) Val1 =
Builder.CreateBitCast(Val1, ITy->getPointerTo());
961 if (Val2) Val2 =
Builder.CreateBitCast(Val2, ITy->getPointerTo());
963 Dest =
Builder.CreateBitCast(Dest, ITy->getPointerTo());
965 if (isa<llvm::ConstantInt>(Order)) {
966 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
969 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
970 Size, Align, llvm::Monotonic);
976 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
977 Size, Align, llvm::Acquire);
982 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
983 Size, Align, llvm::Release);
986 if (IsLoad || IsStore)
988 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
989 Size, Align, llvm::AcquireRelease);
992 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
993 Size, Align, llvm::SequentiallyConsistent);
1008 llvm::BasicBlock *MonotonicBB =
nullptr, *AcquireBB =
nullptr,
1009 *ReleaseBB =
nullptr, *AcqRelBB =
nullptr,
1010 *SeqCstBB =
nullptr;
1016 if (!IsLoad && !IsStore)
1025 Order =
Builder.CreateIntCast(Order,
Builder.getInt32Ty(),
false);
1026 llvm::SwitchInst *SI =
Builder.CreateSwitch(Order, MonotonicBB);
1029 Builder.SetInsertPoint(MonotonicBB);
1030 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1031 Size, Align, llvm::Monotonic);
1034 Builder.SetInsertPoint(AcquireBB);
1035 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1036 Size, Align, llvm::Acquire);
1044 Builder.SetInsertPoint(ReleaseBB);
1045 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1046 Size, Align, llvm::Release);
1051 if (!IsLoad && !IsStore) {
1052 Builder.SetInsertPoint(AcqRelBB);
1053 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1054 Size, Align, llvm::AcquireRelease);
1059 Builder.SetInsertPoint(SeqCstBB);
1060 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1061 Size, Align, llvm::SequentiallyConsistent);
1067 Builder.SetInsertPoint(ContBB);
1074 unsigned addrspace =
1075 cast<llvm::PointerType>(addr->getType())->getAddressSpace();
1076 llvm::IntegerType *ty =
1078 return CGF.
Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
1084 if (LVal.isSimple()) {
1090 addr = CGF.
Builder.CreateStructGEP(
nullptr, addr, 0);
1099 CGF.
Builder.CreateAlignedLoad(addr, AtomicAlign.getQuantity()));
1100 if (LVal.isBitField())
1102 addr, LVal.getBitFieldInfo(), LVal.getType(), LVal.getAlignment()));
1103 if (LVal.isVectorElt())
1106 LVal.getAlignment()),
1108 assert(LVal.isExtVectorElt());
1110 addr, LVal.getExtVectorElts(), LVal.getType(), LVal.getAlignment()));
1116 bool AsValue)
const {
1118 assert(IntVal->getType()->isIntegerTy() &&
"Expected integer value");
1120 (((!LVal.isBitField() ||
1121 LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1124 auto *ValTy = AsValue
1126 : getAtomicAddress()->getType()->getPointerElementType();
1127 if (ValTy->isIntegerTy()) {
1128 assert(IntVal->getType() == ValTy &&
"Different integer types.");
1130 }
else if (ValTy->isPointerTy())
1132 else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1139 bool TempIsVolatile =
false;
1144 TempAlignment = getValueAlignment();
1147 Temp = CreateTempAlloca();
1148 TempAlignment = getAtomicAlignment();
1152 llvm::Value *CastTemp = emitCastToAtomicIntPointer(Temp);
1154 ->setVolatile(TempIsVolatile);
1156 return convertTempToRValue(Temp, ResultSlot, Loc, AsValue);
1159 void AtomicInfo::EmitAtomicLoadLibcall(
llvm::Value *AddForLoaded,
1160 llvm::AtomicOrdering AO,
bool) {
1169 llvm::ConstantInt::get(CGF.
IntTy, translateAtomicOrdering(AO))),
1174 llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1177 llvm::Value *Addr = emitCastToAtomicIntPointer(getAtomicAddress());
1178 llvm::LoadInst *Load = CGF.
Builder.CreateLoad(Addr,
"atomic-load");
1179 Load->setAtomic(AO);
1182 Load->setAlignment(getAtomicAlignment().getQuantity());
1184 Load->setVolatile(
true);
1185 if (LVal.getTBAAInfo())
1194 AtomicInfo AI(*
this, LV);
1197 bool AtomicIsInline = !AI.shouldUseLibcall();
1205 bool IsVolatile)
const {
1214 llvm::AtomicOrdering AO;
1217 AO = llvm::SequentiallyConsistent;
1226 bool AsValue, llvm::AtomicOrdering AO,
1229 if (shouldUseLibcall()) {
1231 if (LVal.isSimple() && !ResultSlot.
isIgnored()) {
1233 TempAddr = ResultSlot.
getAddr();
1235 TempAddr = CreateTempAlloca();
1237 EmitAtomicLoadLibcall(TempAddr, AO, IsVolatile);
1241 return convertTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1245 auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1253 return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1259 llvm::AtomicOrdering AO,
bool IsVolatile,
1261 AtomicInfo Atomics(*
this, src);
1262 return Atomics.EmitAtomicLoad(resultSlot, loc,
true, AO,
1268 void AtomicInfo::emitCopyIntoMemory(
RValue rvalue)
const {
1269 assert(LVal.isSimple());
1278 || LVal.isVolatileQualified()),
1279 LVal.getAlignment());
1286 emitMemSetZeroIfNecessary();
1289 LValue TempLVal = projectValue();
1310 getAtomicAlignment());
1311 AtomicInfo Atomics(CGF, TempLV);
1312 Atomics.emitCopyIntoMemory(rvalue);
1319 if (RVal.
isScalar() && (!hasPadding() || !LVal.isSimple())) {
1321 if (isa<llvm::IntegerType>(Value->getType()))
1324 llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1326 LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1327 if (isa<llvm::PointerType>(Value->getType()))
1328 return CGF.
Builder.CreatePtrToInt(Value, InputIntTy);
1329 else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1330 return CGF.
Builder.CreateBitCast(Value, InputIntTy);
1338 Addr = emitCastToAtomicIntPointer(Addr);
1339 return CGF.
Builder.CreateAlignedLoad(Addr,
1340 getAtomicAlignment().getQuantity());
1343 std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1345 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure,
bool IsWeak) {
1347 auto *Addr = emitCastToAtomicIntPointer(getAtomicAddress());
1348 auto *Inst = CGF.
Builder.CreateAtomicCmpXchg(Addr, ExpectedVal, DesiredVal,
1351 Inst->setVolatile(LVal.isVolatileQualified());
1352 Inst->setWeak(IsWeak);
1355 auto *PreviousVal = CGF.
Builder.CreateExtractValue(Inst, 0);
1356 auto *SuccessFailureVal = CGF.
Builder.CreateExtractValue(Inst, 1);
1357 return std::make_pair(PreviousVal, SuccessFailureVal);
1361 AtomicInfo::EmitAtomicCompareExchangeLibcall(
llvm::Value *ExpectedAddr,
1363 llvm::AtomicOrdering Success,
1364 llvm::AtomicOrdering Failure) {
1376 CGF.
IntTy, translateAtomicOrdering(Success))),
1379 CGF.
IntTy, translateAtomicOrdering(Failure))),
1387 std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1388 RValue Expected,
RValue Desired, llvm::AtomicOrdering Success,
1389 llvm::AtomicOrdering Failure,
bool IsWeak) {
1390 if (Failure >= Success)
1392 Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1395 if (shouldUseLibcall()) {
1397 auto *ExpectedAddr = materializeRValue(Expected);
1398 auto *DesiredAddr = materializeRValue(Desired);
1399 auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr,
1401 return std::make_pair(
1409 auto *ExpectedVal = convertRValueToInt(Expected);
1410 auto *DesiredVal = convertRValueToInt(Desired);
1411 auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1413 return std::make_pair(
1426 LValue AtomicLVal = Atomics.getAtomicLValue();
1435 Ptr = Atomics.materializeRValue(OldRVal);
1464 RValue NewRVal = UpdateOp(UpRVal);
1474 void AtomicInfo::EmitAtomicUpdateLibcall(
1475 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1477 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1481 EmitAtomicLoadLibcall(ExpectedAddr, AO, IsVolatile);
1485 auto *DesiredAddr = CreateTempAlloca();
1486 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1488 getAtomicAddress()->getType()->getPointerElementType())) {
1489 auto *OldVal = CGF.
Builder.CreateAlignedLoad(
1490 ExpectedAddr, getAtomicAlignment().getQuantity());
1491 CGF.
Builder.CreateAlignedStore(OldVal, DesiredAddr,
1492 getAtomicAlignment().getQuantity());
1498 EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr, AO, Failure);
1499 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1503 void AtomicInfo::EmitAtomicUpdateOp(
1504 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1506 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1509 auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1513 auto *CurBB = CGF.
Builder.GetInsertBlock();
1515 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1517 PHI->addIncoming(OldVal, CurBB);
1518 auto *NewAtomicAddr = CreateTempAlloca();
1519 auto *NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1520 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1522 getAtomicAddress()->getType()->getPointerElementType())) {
1523 CGF.
Builder.CreateAlignedStore(PHI, NewAtomicIntAddr,
1524 getAtomicAlignment().getQuantity());
1529 auto *DesiredVal = CGF.
Builder.CreateAlignedLoad(
1530 NewAtomicIntAddr, getAtomicAlignment().getQuantity());
1532 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1533 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
1534 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1540 LValue AtomicLVal = Atomics.getAtomicLValue();
1563 void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1564 RValue UpdateRVal,
bool IsVolatile) {
1565 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1569 EmitAtomicLoadLibcall(ExpectedAddr, AO, IsVolatile);
1573 auto *DesiredAddr = CreateTempAlloca();
1574 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1576 getAtomicAddress()->getType()->getPointerElementType())) {
1577 auto *OldVal = CGF.
Builder.CreateAlignedLoad(
1578 ExpectedAddr, getAtomicAlignment().getQuantity());
1579 CGF.
Builder.CreateAlignedStore(OldVal, DesiredAddr,
1580 getAtomicAlignment().getQuantity());
1584 EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr, AO, Failure);
1585 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1589 void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
RValue UpdateRVal,
1591 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1594 auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1598 auto *CurBB = CGF.
Builder.GetInsertBlock();
1600 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1602 PHI->addIncoming(OldVal, CurBB);
1603 auto *NewAtomicAddr = CreateTempAlloca();
1604 auto *NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1605 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1607 getAtomicAddress()->getType()->getPointerElementType())) {
1608 CGF.
Builder.CreateAlignedStore(PHI, NewAtomicIntAddr,
1609 getAtomicAlignment().getQuantity());
1612 auto *DesiredVal = CGF.
Builder.CreateAlignedLoad(
1613 NewAtomicIntAddr, getAtomicAlignment().getQuantity());
1615 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1616 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
1617 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1621 void AtomicInfo::EmitAtomicUpdate(
1622 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1624 if (shouldUseLibcall()) {
1625 EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1627 EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1631 void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO,
RValue UpdateRVal,
1633 if (shouldUseLibcall()) {
1634 EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1636 EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1643 llvm::AtomicOrdering AO;
1645 AO = llvm::SequentiallyConsistent;
1659 llvm::AtomicOrdering AO,
bool IsVolatile,
1665 == dest.
getAddress()->getType()->getPointerElementType());
1667 AtomicInfo atomics(*
this, dest);
1668 LValue LVal = atomics.getAtomicLValue();
1673 atomics.emitCopyIntoMemory(rvalue);
1678 if (atomics.shouldUseLibcall()) {
1680 llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
1690 IntTy, AtomicInfo::translateAtomicOrdering(AO))),
1697 llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
1701 atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
1702 intValue =
Builder.CreateIntCast(
1703 intValue, addr->getType()->getPointerElementType(),
false);
1704 llvm::StoreInst *store =
Builder.CreateStore(intValue, addr);
1708 store->setAtomic(AO);
1713 store->setVolatile(
true);
1720 atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
1727 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure,
bool IsWeak,
1733 Obj.
getAddress()->getType()->getPointerElementType());
1736 Obj.
getAddress()->getType()->getPointerElementType());
1737 AtomicInfo Atomics(*
this, Obj);
1739 return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
1744 LValue LVal, llvm::AtomicOrdering AO,
1745 const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
bool IsVolatile) {
1746 AtomicInfo Atomics(*
this, LVal);
1747 Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
1751 AtomicInfo atomics(*
this, dest);
1753 switch (atomics.getEvaluationKind()) {
1769 bool Zeroed =
false;
1771 Zeroed = atomics.emitMemSetZeroIfNecessary();
1772 dest = atomics.projectValue();
1787 llvm_unreachable(
"bad evaluation kind");
Defines the clang::ASTContext interface.
llvm::IntegerType * IntTy
int
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest, llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2, llvm::Value *IsWeak, llvm::Value *FailureOrder, uint64_t Size, unsigned Align, llvm::AtomicOrdering Order)
CodeGenTypes & getTypes()
llvm::Type * ConvertTypeForMem(QualType T)
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, IsZeroed_t isZeroed=IsNotZeroed)
void setAlignment(CharUnits A)
const TargetInfo & getTarget() const
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
void setTBAAInfo(llvm::MDNode *N)
const llvm::DataLayout & getDataLayout() const
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
bool typeIsSuitableForInlineAtomic(QualType Ty, bool IsVolatile) const
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
llvm::Value * getAddress() const
void DecorateInstruction(llvm::Instruction *Inst, llvm::MDNode *TBAAInfo, bool ConvertTypeToTag=true)
bool isVolatileQualified() const
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
QualType getExtVectorType(QualType VectorType, unsigned NumElts) const
Return the unique reference to an extended vector type of the specified element type and size...
llvm::IntegerType * SizeTy
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)
RValue EmitLoadOfExtVectorElementLValue(LValue V)
LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment=CharUnits())
RValue EmitCall(const CGFunctionInfo &FnInfo, llvm::Value *Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, const Decl *TargetDecl=nullptr, llvm::Instruction **callOrInvoke=nullptr)
CharUnits getAlignment() const
const TargetInfo & getTargetInfo() const
llvm::PointerType * VoidPtrTy
QualType getIntPtrType() const
Return a type compatible with "intptr_t" (C99 7.18.1.4), as defined by the target.
llvm::Value * getAggregateAddr() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
virtual bool hasBuiltinAtomic(uint64_t AtomicSizeInBits, uint64_t AlignmentInBits) const
Returns true if the given target supports lock-free atomic operations at the specified width and alig...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitAtomicInit(Expr *E, LValue lvalue)
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
bool isExtVectorElt() const
static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
RValue EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest=nullptr)
QualType getPointeeType() const
static unsigned getNumSubExprs(AtomicOp Op)
Determine the number of arguments the specified atomic builtin should have.
static TypeEvaluationKind getEvaluationKind(QualType T)
llvm::Value * getBitFieldAddr() const
bool isAtomicType() const
ASTContext & getContext() const
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
void add(RValue rvalue, QualType type, bool needscopy=false)
llvm::LLVMContext & getLLVMContext()
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
llvm::Value * getExtVectorAddr() const
llvm::Value * EmitCastToVoidPtr(llvm::Value *value)
Emit a cast to void* in the appropriate address space.
The result type of a method or function.
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
void EmitAnyExprToMem(const Expr *E, llvm::Value *Location, Qualifiers Quals, bool IsInitializer)
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
llvm::Constant * CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeSet ExtraAttrs=llvm::AttributeSet())
Create a new runtime function with the specified type and name.
ASTContext & getContext() const
Encodes a location in the source. The SourceManager can decode this to get at the full include stack...
bool LValueIsSuitableForInlineAtomic(LValue Src)
void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr, QualType EltTy, bool isVolatile=false, CharUnits Alignment=CharUnits::Zero(), bool isAssignment=false)
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
const CGBitFieldInfo & getBitFieldInfo() const
static LValue MakeVectorElt(llvm::Value *Vec, llvm::Value *Idx, QualType type, CharUnits Alignment)
llvm::MDNode * getTBAAInfo() const
static RValue getAggregate(llvm::Value *V, bool Volatile=false)
static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, llvm::Value *Dest, llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2, llvm::Value *FailureOrderVal, uint64_t Size, unsigned Align, llvm::AtomicOrdering SuccessOrder)
const CodeGenOptions & getCodeGenOpts() const
static LValue MakeAddr(llvm::Value *address, QualType type, CharUnits alignment, ASTContext &Context, llvm::MDNode *TBAAInfo=nullptr)
llvm::AllocaInst * CreateMemTemp(QualType T, const Twine &Name="tmp")
SourceLocation getExprLoc() const LLVM_READONLY
RValue convertTempToRValue(llvm::Value *addr, QualType type, SourceLocation Loc)
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
static const Type * getElementType(const Expr *BaseExpr)
RValue EmitUnsupportedRValue(const Expr *E, const char *Name)
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
bool isZero() const
isZero - Test whether the quantity equals zero.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
static LValue MakeBitfield(llvm::Value *Addr, const CGBitFieldInfo &Info, QualType type, CharUnits Alignment)
Create a new object to represent a bit-field access.
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
static AggValueSlot ignored()
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
unsigned getMaxAtomicInlineWidth() const
Return the maximum width lock-free atomic operation which can be inlined given the supported features...
void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, bool Volatile, unsigned Alignment, QualType Ty, llvm::MDNode *TBAAInfo=nullptr, bool isInit=false, QualType TBAABaseTy=QualType(), uint64_t TBAAOffset=0)
void EmitAggExpr(const Expr *E, AggValueSlot AS)
llvm::Constant * getExtVectorElts() const
static void AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args, bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy, SourceLocation Loc, CharUnits SizeInChars)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::Value * getVectorIdx() const
llvm::Value * getAddr() const
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
static llvm::Value * EmitValToTemp(CodeGenFunction &CGF, Expr *E)
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
RValue EmitLoadOfBitfieldLValue(LValue LV)
bool hasVolatileMember(QualType T)
static RValue get(llvm::Value *V)
llvm::Value * EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, unsigned Alignment, QualType Ty, SourceLocation Loc, llvm::MDNode *TBAAInfo=nullptr, QualType TBAABaseTy=QualType(), uint64_t TBAAOffset=0)
bool isVolatileQualified() const
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, const llvm::function_ref< RValue(RValue)> &UpdateOp, llvm::Value *DesiredAddr)
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
static RValue emitAtomicLibcall(CodeGenFunction &CGF, StringRef fnName, QualType resultType, CallArgList &args)
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
static LValue MakeExtVectorElt(llvm::Value *Vec, llvm::Constant *Elts, QualType type, CharUnits Alignment)
Expr * getOrderFail() const
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, llvm::Value *Dest, llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2, uint64_t Size, unsigned Align, llvm::AtomicOrdering SuccessOrder, llvm::AtomicOrdering FailureOrder)
Structure with information about how a bitfield should be accessed.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
bool isPointerType() const
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.