20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Intrinsics.h"
23 #include "llvm/IR/Operator.h"
25 using namespace clang;
26 using namespace CodeGen;
33 uint64_t AtomicSizeInBits;
34 uint64_t ValueSizeInBits;
44 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
51 ValueTy = ATy->getValueType();
56 uint64_t ValueAlignInBits;
57 uint64_t AtomicAlignInBits;
58 TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59 ValueSizeInBits = ValueTI.
Width;
60 ValueAlignInBits = ValueTI.
Align;
62 TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63 AtomicSizeInBits = AtomicTI.
Width;
64 AtomicAlignInBits = AtomicTI.
Align;
66 assert(ValueSizeInBits <= AtomicSizeInBits);
67 assert(ValueAlignInBits <= AtomicAlignInBits);
69 AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70 ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
77 ValueSizeInBits = C.getTypeSize(ValueTy);
80 AtomicSizeInBits = C.toBits(
81 C.toCharUnitsFromBits(
Offset + OrigBFI.Size + C.getCharWidth() - 1)
85 (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.
getAlignment()) *
87 VoidPtrAddr = CGF.
Builder.CreateConstGEP1_64(
88 VoidPtrAddr, OffsetInChars.getQuantity());
91 CGF.
Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
92 "atomic_bitfield_base");
95 BFI.StorageSize = AtomicSizeInBits;
96 BFI.StorageOffset += OffsetInChars;
101 AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
102 if (AtomicTy.isNull()) {
105 C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
112 ValueSizeInBits = C.getTypeSize(ValueTy);
114 AtomicSizeInBits = C.getTypeSize(AtomicTy);
120 ValueSizeInBits = C.getTypeSize(ValueTy);
124 AtomicSizeInBits = C.getTypeSize(AtomicTy);
128 UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
132 QualType getAtomicType()
const {
return AtomicTy; }
133 QualType getValueType()
const {
return ValueTy; }
134 CharUnits getAtomicAlignment()
const {
return AtomicAlign; }
135 CharUnits getValueAlignment()
const {
return ValueAlign; }
136 uint64_t getAtomicSizeInBits()
const {
return AtomicSizeInBits; }
137 uint64_t getValueSizeInBits()
const {
return ValueSizeInBits; }
139 bool shouldUseLibcall()
const {
return UseLibcall; }
140 const LValue &getAtomicLValue()
const {
return LVal; }
143 return LVal.getPointer();
144 else if (LVal.isBitField())
145 return LVal.getBitFieldPointer();
146 else if (LVal.isVectorElt())
147 return LVal.getVectorPointer();
148 assert(LVal.isExtVectorElt());
149 return LVal.getExtVectorPointer();
151 Address getAtomicAddress()
const {
152 return Address(getAtomicPointer(), getAtomicAlignment());
155 Address getAtomicAddressAsAtomicIntPointer()
const {
156 return emitCastToAtomicIntPointer(getAtomicAddress());
165 bool hasPadding()
const {
166 return (ValueSizeInBits != AtomicSizeInBits);
169 bool emitMemSetZeroIfNecessary()
const;
197 void emitCopyIntoMemory(
RValue rvalue)
const;
200 LValue projectValue()
const {
201 assert(LVal.isSimple());
202 Address addr = getAtomicAddress();
207 LVal.getAlignmentSource(), LVal.getTBAAInfo());
213 bool AsValue, llvm::AtomicOrdering AO,
224 std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange(
226 llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
227 llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent,
228 bool IsWeak =
false);
233 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
238 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
RValue UpdateRVal,
247 translateAtomicOrdering(
const llvm::AtomicOrdering AO);
250 Address CreateTempAlloca()
const;
256 void EmitAtomicLoadLibcall(
llvm::Value *AddForLoaded,
257 llvm::AtomicOrdering AO,
bool IsVolatile);
259 llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO,
bool IsVolatile);
263 llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
264 llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent);
266 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
268 llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
269 llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent,
270 bool IsWeak =
false);
273 EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
277 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
281 void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
RValue UpdateRVal,
284 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
RValue UpdateRal,
290 AtomicInfo::translateAtomicOrdering(
const llvm::AtomicOrdering AO) {
292 case llvm::Unordered:
293 case llvm::NotAtomic:
294 case llvm::Monotonic:
300 case llvm::AcquireRelease:
302 case llvm::SequentiallyConsistent:
305 llvm_unreachable(
"Unhandled AtomicOrdering");
308 Address AtomicInfo::CreateTempAlloca()
const {
310 (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
312 getAtomicAlignment(),
315 if (LVal.isBitField())
317 TempAlloca, getAtomicAddress().getType());
335 uint64_t expectedSize) {
336 return (CGM.
getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
344 if (hasPadding())
return true;
347 switch (getEvaluationKind()) {
354 AtomicSizeInBits / 2);
360 llvm_unreachable(
"bad evaluation kind");
363 bool AtomicInfo::emitMemSetZeroIfNecessary()
const {
364 assert(LVal.isSimple());
366 if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
370 addr, llvm::ConstantInt::get(CGF.
Int8Ty, 0),
372 LVal.getAlignment().getQuantity());
380 llvm::AtomicOrdering SuccessOrder,
381 llvm::AtomicOrdering FailureOrder) {
386 llvm::AtomicCmpXchgInst *Pair = CGF.
Builder.CreateAtomicCmpXchg(
387 Ptr.
getPointer(), Expected, Desired, SuccessOrder, FailureOrder);
389 Pair->setWeak(IsWeak);
398 llvm::BasicBlock *StoreExpectedBB =
403 llvm::BasicBlock *ContinueBB =
408 CGF.
Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
410 CGF.
Builder.SetInsertPoint(StoreExpectedBB);
414 CGF.
Builder.CreateBr(ContinueBB);
416 CGF.
Builder.SetInsertPoint(ContinueBB);
430 llvm::AtomicOrdering SuccessOrder) {
431 llvm::AtomicOrdering FailureOrder;
432 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
433 switch (FO->getSExtValue()) {
435 FailureOrder = llvm::Monotonic;
439 FailureOrder = llvm::Acquire;
442 FailureOrder = llvm::SequentiallyConsistent;
445 if (FailureOrder >= SuccessOrder) {
448 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
451 SuccessOrder, FailureOrder);
456 llvm::BasicBlock *MonotonicBB =
nullptr, *AcquireBB =
nullptr,
459 if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release)
461 if (SuccessOrder == llvm::SequentiallyConsistent)
466 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
473 CGF.
Builder.SetInsertPoint(MonotonicBB);
475 Size, SuccessOrder, llvm::Monotonic);
479 CGF.
Builder.SetInsertPoint(AcquireBB);
481 Size, SuccessOrder, llvm::Acquire);
489 CGF.
Builder.SetInsertPoint(SeqCstBB);
491 Size, SuccessOrder, llvm::SequentiallyConsistent);
497 CGF.
Builder.SetInsertPoint(ContBB);
503 uint64_t Size, llvm::AtomicOrdering Order) {
504 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
505 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
507 switch (E->
getOp()) {
508 case AtomicExpr::AO__c11_atomic_init:
509 llvm_unreachable(
"Already handled!");
511 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
513 FailureOrder, Size, Order);
515 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
517 FailureOrder, Size, Order);
519 case AtomicExpr::AO__atomic_compare_exchange:
520 case AtomicExpr::AO__atomic_compare_exchange_n: {
521 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
523 Val1, Val2, FailureOrder, Size, Order);
526 llvm::BasicBlock *StrongBB =
529 llvm::BasicBlock *ContBB =
532 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(IsWeak, WeakBB);
533 SI->addCase(CGF.
Builder.getInt1(
false), StrongBB);
535 CGF.
Builder.SetInsertPoint(StrongBB);
537 FailureOrder, Size, Order);
540 CGF.
Builder.SetInsertPoint(WeakBB);
542 FailureOrder, Size, Order);
545 CGF.
Builder.SetInsertPoint(ContBB);
549 case AtomicExpr::AO__c11_atomic_load:
550 case AtomicExpr::AO__atomic_load_n:
551 case AtomicExpr::AO__atomic_load: {
553 Load->setAtomic(Order);
559 case AtomicExpr::AO__c11_atomic_store:
560 case AtomicExpr::AO__atomic_store:
561 case AtomicExpr::AO__atomic_store_n: {
564 Store->setAtomic(Order);
569 case AtomicExpr::AO__c11_atomic_exchange:
570 case AtomicExpr::AO__atomic_exchange_n:
571 case AtomicExpr::AO__atomic_exchange:
572 Op = llvm::AtomicRMWInst::Xchg;
575 case AtomicExpr::AO__atomic_add_fetch:
576 PostOp = llvm::Instruction::Add;
578 case AtomicExpr::AO__c11_atomic_fetch_add:
579 case AtomicExpr::AO__atomic_fetch_add:
580 Op = llvm::AtomicRMWInst::Add;
583 case AtomicExpr::AO__atomic_sub_fetch:
584 PostOp = llvm::Instruction::Sub;
586 case AtomicExpr::AO__c11_atomic_fetch_sub:
587 case AtomicExpr::AO__atomic_fetch_sub:
588 Op = llvm::AtomicRMWInst::Sub;
591 case AtomicExpr::AO__atomic_and_fetch:
594 case AtomicExpr::AO__c11_atomic_fetch_and:
595 case AtomicExpr::AO__atomic_fetch_and:
599 case AtomicExpr::AO__atomic_or_fetch:
600 PostOp = llvm::Instruction::Or;
602 case AtomicExpr::AO__c11_atomic_fetch_or:
603 case AtomicExpr::AO__atomic_fetch_or:
604 Op = llvm::AtomicRMWInst::Or;
607 case AtomicExpr::AO__atomic_xor_fetch:
608 PostOp = llvm::Instruction::Xor;
610 case AtomicExpr::AO__c11_atomic_fetch_xor:
611 case AtomicExpr::AO__atomic_fetch_xor:
612 Op = llvm::AtomicRMWInst::Xor;
615 case AtomicExpr::AO__atomic_nand_fetch:
618 case AtomicExpr::AO__atomic_fetch_nand:
619 Op = llvm::AtomicRMWInst::Nand;
624 llvm::AtomicRMWInst *RMWI =
632 Result = CGF.
Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
633 if (E->
getOp() == AtomicExpr::AO__atomic_nand_fetch)
634 Result = CGF.
Builder.CreateNot(Result);
652 if (UseOptimizedLibcall) {
659 SizeInBits)->getPointerTo();
677 MemTy = AT->getValueType();
682 bool UseLibcall = (sizeChars != alignChars ||
685 llvm::Value *IsWeak =
nullptr, *OrderFail =
nullptr;
692 if (E->
getOp() == AtomicExpr::AO__c11_atomic_init) {
700 switch (E->
getOp()) {
701 case AtomicExpr::AO__c11_atomic_init:
702 llvm_unreachable(
"Already handled above with EmitAtomicInit!");
704 case AtomicExpr::AO__c11_atomic_load:
705 case AtomicExpr::AO__atomic_load_n:
708 case AtomicExpr::AO__atomic_load:
712 case AtomicExpr::AO__atomic_store:
716 case AtomicExpr::AO__atomic_exchange:
721 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
722 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
723 case AtomicExpr::AO__atomic_compare_exchange_n:
724 case AtomicExpr::AO__atomic_compare_exchange:
726 if (E->
getOp() == AtomicExpr::AO__atomic_compare_exchange)
735 case AtomicExpr::AO__c11_atomic_fetch_add:
736 case AtomicExpr::AO__c11_atomic_fetch_sub:
753 case AtomicExpr::AO__atomic_fetch_add:
754 case AtomicExpr::AO__atomic_fetch_sub:
755 case AtomicExpr::AO__atomic_add_fetch:
756 case AtomicExpr::AO__atomic_sub_fetch:
757 case AtomicExpr::AO__c11_atomic_store:
758 case AtomicExpr::AO__c11_atomic_exchange:
759 case AtomicExpr::AO__atomic_store_n:
760 case AtomicExpr::AO__atomic_exchange_n:
761 case AtomicExpr::AO__c11_atomic_fetch_and:
762 case AtomicExpr::AO__c11_atomic_fetch_or:
763 case AtomicExpr::AO__c11_atomic_fetch_xor:
764 case AtomicExpr::AO__atomic_fetch_and:
765 case AtomicExpr::AO__atomic_fetch_or:
766 case AtomicExpr::AO__atomic_fetch_xor:
767 case AtomicExpr::AO__atomic_fetch_nand:
768 case AtomicExpr::AO__atomic_and_fetch:
769 case AtomicExpr::AO__atomic_or_fetch:
770 case AtomicExpr::AO__atomic_xor_fetch:
771 case AtomicExpr::AO__atomic_nand_fetch:
782 AtomicInfo Atomics(*
this, AtomicVal);
784 Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
785 if (Val1.
isValid()) Val1 = Atomics.convertToAtomicIntPointer(Val1);
786 if (Val2.
isValid()) Val2 = Atomics.convertToAtomicIntPointer(Val2);
788 Dest = Atomics.emitCastToAtomicIntPointer(Dest);
792 Dest = Atomics.emitCastToAtomicIntPointer(Atomics.CreateTempAlloca());
796 bool UseOptimizedLibcall =
false;
797 switch (E->
getOp()) {
798 case AtomicExpr::AO__c11_atomic_init:
799 llvm_unreachable(
"Already handled above with EmitAtomicInit!");
801 case AtomicExpr::AO__c11_atomic_fetch_add:
802 case AtomicExpr::AO__atomic_fetch_add:
803 case AtomicExpr::AO__c11_atomic_fetch_and:
804 case AtomicExpr::AO__atomic_fetch_and:
805 case AtomicExpr::AO__c11_atomic_fetch_or:
806 case AtomicExpr::AO__atomic_fetch_or:
807 case AtomicExpr::AO__atomic_fetch_nand:
808 case AtomicExpr::AO__c11_atomic_fetch_sub:
809 case AtomicExpr::AO__atomic_fetch_sub:
810 case AtomicExpr::AO__c11_atomic_fetch_xor:
811 case AtomicExpr::AO__atomic_fetch_xor:
812 case AtomicExpr::AO__atomic_add_fetch:
813 case AtomicExpr::AO__atomic_and_fetch:
814 case AtomicExpr::AO__atomic_nand_fetch:
815 case AtomicExpr::AO__atomic_or_fetch:
816 case AtomicExpr::AO__atomic_sub_fetch:
817 case AtomicExpr::AO__atomic_xor_fetch:
819 UseOptimizedLibcall =
true;
822 case AtomicExpr::AO__c11_atomic_load:
823 case AtomicExpr::AO__c11_atomic_store:
824 case AtomicExpr::AO__c11_atomic_exchange:
825 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
826 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
827 case AtomicExpr::AO__atomic_load_n:
828 case AtomicExpr::AO__atomic_load:
829 case AtomicExpr::AO__atomic_store_n:
830 case AtomicExpr::AO__atomic_store:
831 case AtomicExpr::AO__atomic_exchange_n:
832 case AtomicExpr::AO__atomic_exchange:
833 case AtomicExpr::AO__atomic_compare_exchange_n:
834 case AtomicExpr::AO__atomic_compare_exchange:
836 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
837 UseOptimizedLibcall =
true;
842 if (!UseOptimizedLibcall) {
851 std::string LibCallName;
855 bool HaveRetTy =
false;
856 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
857 switch (E->
getOp()) {
858 case AtomicExpr::AO__c11_atomic_init:
859 llvm_unreachable(
"Already handled!");
868 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
869 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
870 case AtomicExpr::AO__atomic_compare_exchange:
871 case AtomicExpr::AO__atomic_compare_exchange_n:
872 LibCallName =
"__atomic_compare_exchange";
885 case AtomicExpr::AO__c11_atomic_exchange:
886 case AtomicExpr::AO__atomic_exchange_n:
887 case AtomicExpr::AO__atomic_exchange:
888 LibCallName =
"__atomic_exchange";
894 case AtomicExpr::AO__c11_atomic_store:
895 case AtomicExpr::AO__atomic_store:
896 case AtomicExpr::AO__atomic_store_n:
897 LibCallName =
"__atomic_store";
905 case AtomicExpr::AO__c11_atomic_load:
906 case AtomicExpr::AO__atomic_load:
907 case AtomicExpr::AO__atomic_load_n:
908 LibCallName =
"__atomic_load";
912 case AtomicExpr::AO__atomic_add_fetch:
913 PostOp = llvm::Instruction::Add;
915 case AtomicExpr::AO__c11_atomic_fetch_add:
916 case AtomicExpr::AO__atomic_fetch_add:
917 LibCallName =
"__atomic_fetch_add";
923 case AtomicExpr::AO__atomic_and_fetch:
926 case AtomicExpr::AO__c11_atomic_fetch_and:
927 case AtomicExpr::AO__atomic_fetch_and:
928 LibCallName =
"__atomic_fetch_and";
934 case AtomicExpr::AO__atomic_or_fetch:
935 PostOp = llvm::Instruction::Or;
937 case AtomicExpr::AO__c11_atomic_fetch_or:
938 case AtomicExpr::AO__atomic_fetch_or:
939 LibCallName =
"__atomic_fetch_or";
945 case AtomicExpr::AO__atomic_sub_fetch:
946 PostOp = llvm::Instruction::Sub;
948 case AtomicExpr::AO__c11_atomic_fetch_sub:
949 case AtomicExpr::AO__atomic_fetch_sub:
950 LibCallName =
"__atomic_fetch_sub";
956 case AtomicExpr::AO__atomic_xor_fetch:
957 PostOp = llvm::Instruction::Xor;
959 case AtomicExpr::AO__c11_atomic_fetch_xor:
960 case AtomicExpr::AO__atomic_fetch_xor:
961 LibCallName =
"__atomic_fetch_xor";
967 case AtomicExpr::AO__atomic_nand_fetch:
970 case AtomicExpr::AO__atomic_fetch_nand:
971 LibCallName =
"__atomic_fetch_nand";
978 if (UseOptimizedLibcall)
979 LibCallName +=
"_" + llvm::utostr(Size);
982 if (UseOptimizedLibcall) {
1001 assert(UseOptimizedLibcall || !PostOp);
1013 llvm::Value *LoadVal1 = Args[1].RV.getScalarVal();
1014 ResVal =
Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1016 if (E->
getOp() == AtomicExpr::AO__atomic_nand_fetch)
1017 ResVal =
Builder.CreateNot(ResVal);
1032 bool IsStore = E->
getOp() == AtomicExpr::AO__c11_atomic_store ||
1033 E->
getOp() == AtomicExpr::AO__atomic_store ||
1034 E->
getOp() == AtomicExpr::AO__atomic_store_n;
1035 bool IsLoad = E->
getOp() == AtomicExpr::AO__c11_atomic_load ||
1036 E->
getOp() == AtomicExpr::AO__atomic_load ||
1037 E->
getOp() == AtomicExpr::AO__atomic_load_n;
1039 if (isa<llvm::ConstantInt>(Order)) {
1040 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1043 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1044 Size, llvm::Monotonic);
1050 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1051 Size, llvm::Acquire);
1056 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1057 Size, llvm::Release);
1060 if (IsLoad || IsStore)
1062 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1063 Size, llvm::AcquireRelease);
1066 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1067 Size, llvm::SequentiallyConsistent);
1085 llvm::BasicBlock *MonotonicBB =
nullptr, *AcquireBB =
nullptr,
1086 *ReleaseBB =
nullptr, *AcqRelBB =
nullptr,
1087 *SeqCstBB =
nullptr;
1093 if (!IsLoad && !IsStore)
1102 Order =
Builder.CreateIntCast(Order,
Builder.getInt32Ty(),
false);
1103 llvm::SwitchInst *SI =
Builder.CreateSwitch(Order, MonotonicBB);
1106 Builder.SetInsertPoint(MonotonicBB);
1107 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1108 Size, llvm::Monotonic);
1111 Builder.SetInsertPoint(AcquireBB);
1112 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1113 Size, llvm::Acquire);
1121 Builder.SetInsertPoint(ReleaseBB);
1122 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1123 Size, llvm::Release);
1128 if (!IsLoad && !IsStore) {
1129 Builder.SetInsertPoint(AcqRelBB);
1130 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1131 Size, llvm::AcquireRelease);
1136 Builder.SetInsertPoint(SeqCstBB);
1137 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1138 Size, llvm::SequentiallyConsistent);
1144 Builder.SetInsertPoint(ContBB);
1148 assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1154 Address AtomicInfo::emitCastToAtomicIntPointer(
Address addr)
const {
1155 unsigned addrspace =
1156 cast<llvm::PointerType>(addr.
getPointer()->getType())->getAddressSpace();
1157 llvm::IntegerType *ty =
1162 Address AtomicInfo::convertToAtomicIntPointer(
Address Addr)
const {
1165 if (SourceSizeInBits != AtomicSizeInBits) {
1166 Address Tmp = CreateTempAlloca();
1168 std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1172 return emitCastToAtomicIntPointer(Addr);
1178 bool asValue)
const {
1179 if (LVal.isSimple()) {
1194 if (LVal.isBitField())
1197 LVal.getAlignmentSource()));
1198 if (LVal.isVectorElt())
1201 LVal.getAlignmentSource()), loc);
1202 assert(LVal.isExtVectorElt());
1204 addr, LVal.getExtVectorElts(), LVal.
getType(),
1205 LVal.getAlignmentSource()));
1211 bool AsValue)
const {
1213 assert(IntVal->getType()->isIntegerTy() &&
"Expected integer value");
1215 (((!LVal.isBitField() ||
1216 LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1219 auto *ValTy = AsValue
1221 : getAtomicAddress().getType()->getPointerElementType();
1222 if (ValTy->isIntegerTy()) {
1223 assert(IntVal->getType() == ValTy &&
"Different integer types.");
1225 }
else if (ValTy->isPointerTy())
1227 else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1234 bool TempIsVolatile =
false;
1240 Temp = CreateTempAlloca();
1244 Address CastTemp = emitCastToAtomicIntPointer(Temp);
1246 ->setVolatile(TempIsVolatile);
1248 return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1251 void AtomicInfo::EmitAtomicLoadLibcall(
llvm::Value *AddForLoaded,
1252 llvm::AtomicOrdering AO,
bool) {
1261 llvm::ConstantInt::get(CGF.
IntTy, translateAtomicOrdering(AO))),
1266 llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1269 Address Addr = getAtomicAddressAsAtomicIntPointer();
1271 Load->setAtomic(AO);
1275 Load->setVolatile(
true);
1276 if (LVal.getTBAAInfo())
1286 AtomicInfo AI(*
this, LV);
1289 bool AtomicIsInline = !AI.shouldUseLibcall();
1290 return IsVolatile && AtomicIsInline;
1297 bool IsVolatile)
const {
1306 llvm::AtomicOrdering AO;
1309 AO = llvm::SequentiallyConsistent;
1318 bool AsValue, llvm::AtomicOrdering AO,
1321 if (shouldUseLibcall()) {
1323 if (LVal.isSimple() && !ResultSlot.
isIgnored()) {
1327 TempAddr = CreateTempAlloca();
1329 EmitAtomicLoadLibcall(TempAddr.
getPointer(), AO, IsVolatile);
1333 return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1337 auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1345 return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1351 llvm::AtomicOrdering AO,
bool IsVolatile,
1353 AtomicInfo Atomics(*
this, src);
1354 return Atomics.EmitAtomicLoad(resultSlot, loc,
true, AO,
1360 void AtomicInfo::emitCopyIntoMemory(
RValue rvalue)
const {
1361 assert(LVal.isSimple());
1370 || LVal.isVolatileQualified()));
1377 emitMemSetZeroIfNecessary();
1380 LValue TempLVal = projectValue();
1393 Address AtomicInfo::materializeRValue(
RValue rvalue)
const {
1401 AtomicInfo Atomics(CGF, TempLV);
1402 Atomics.emitCopyIntoMemory(rvalue);
1409 if (RVal.
isScalar() && (!hasPadding() || !LVal.isSimple())) {
1411 if (isa<llvm::IntegerType>(Value->getType()))
1414 llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1416 LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1417 if (isa<llvm::PointerType>(Value->getType()))
1418 return CGF.
Builder.CreatePtrToInt(Value, InputIntTy);
1419 else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1425 Address Addr = materializeRValue(RVal);
1428 Addr = emitCastToAtomicIntPointer(Addr);
1432 std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1434 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure,
bool IsWeak) {
1436 Address Addr = getAtomicAddressAsAtomicIntPointer();
1438 ExpectedVal, DesiredVal,
1441 Inst->setVolatile(LVal.isVolatileQualified());
1442 Inst->setWeak(IsWeak);
1445 auto *PreviousVal = CGF.
Builder.CreateExtractValue(Inst, 0);
1446 auto *SuccessFailureVal = CGF.
Builder.CreateExtractValue(Inst, 1);
1447 return std::make_pair(PreviousVal, SuccessFailureVal);
1451 AtomicInfo::EmitAtomicCompareExchangeLibcall(
llvm::Value *ExpectedAddr,
1453 llvm::AtomicOrdering Success,
1454 llvm::AtomicOrdering Failure) {
1466 CGF.
IntTy, translateAtomicOrdering(Success))),
1469 CGF.
IntTy, translateAtomicOrdering(Failure))),
1477 std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1478 RValue Expected,
RValue Desired, llvm::AtomicOrdering Success,
1479 llvm::AtomicOrdering Failure,
bool IsWeak) {
1480 if (Failure >= Success)
1482 Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1485 if (shouldUseLibcall()) {
1487 Address ExpectedAddr = materializeRValue(Expected);
1488 Address DesiredAddr = materializeRValue(Desired);
1489 auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.
getPointer(),
1492 return std::make_pair(
1500 auto *ExpectedVal = convertRValueToInt(Expected);
1501 auto *DesiredVal = convertRValueToInt(Desired);
1502 auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1504 return std::make_pair(
1515 LValue AtomicLVal = Atomics.getAtomicLValue();
1522 Address Ptr = Atomics.materializeRValue(OldRVal);
1554 RValue NewRVal = UpdateOp(UpRVal);
1564 void AtomicInfo::EmitAtomicUpdateLibcall(
1565 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1567 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1569 Address ExpectedAddr = CreateTempAlloca();
1571 EmitAtomicLoadLibcall(ExpectedAddr.
getPointer(), AO, IsVolatile);
1575 Address DesiredAddr = CreateTempAlloca();
1576 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1577 requiresMemSetZero(getAtomicAddress().getElementType())) {
1581 auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1586 EmitAtomicCompareExchangeLibcall(ExpectedAddr.
getPointer(),
1589 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1593 void AtomicInfo::EmitAtomicUpdateOp(
1594 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1596 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1599 auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1603 auto *CurBB = CGF.
Builder.GetInsertBlock();
1605 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1607 PHI->addIncoming(OldVal, CurBB);
1608 Address NewAtomicAddr = CreateTempAlloca();
1609 Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1610 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1611 requiresMemSetZero(getAtomicAddress().getElementType())) {
1619 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1620 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
1621 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1627 LValue AtomicLVal = Atomics.getAtomicLValue();
1652 void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1653 RValue UpdateRVal,
bool IsVolatile) {
1654 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1656 Address ExpectedAddr = CreateTempAlloca();
1658 EmitAtomicLoadLibcall(ExpectedAddr.
getPointer(), AO, IsVolatile);
1662 Address DesiredAddr = CreateTempAlloca();
1663 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1664 requiresMemSetZero(getAtomicAddress().getElementType())) {
1670 EmitAtomicCompareExchangeLibcall(ExpectedAddr.
getPointer(),
1673 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1677 void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
RValue UpdateRVal,
1679 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1682 auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1686 auto *CurBB = CGF.
Builder.GetInsertBlock();
1688 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1690 PHI->addIncoming(OldVal, CurBB);
1691 Address NewAtomicAddr = CreateTempAlloca();
1692 Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1693 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1694 requiresMemSetZero(getAtomicAddress().getElementType())) {
1700 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1701 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
1702 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1706 void AtomicInfo::EmitAtomicUpdate(
1707 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1709 if (shouldUseLibcall()) {
1710 EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1712 EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1716 void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO,
RValue UpdateRVal,
1718 if (shouldUseLibcall()) {
1719 EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1721 EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1728 llvm::AtomicOrdering AO;
1730 AO = llvm::SequentiallyConsistent;
1744 llvm::AtomicOrdering AO,
bool IsVolatile,
1752 AtomicInfo atomics(*
this, dest);
1753 LValue LVal = atomics.getAtomicLValue();
1758 atomics.emitCopyIntoMemory(rvalue);
1763 if (atomics.shouldUseLibcall()) {
1765 Address srcAddr = atomics.materializeRValue(rvalue);
1776 IntTy, AtomicInfo::translateAtomicOrdering(AO))),
1783 llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
1787 atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
1788 intValue =
Builder.CreateIntCast(
1794 store->setAtomic(AO);
1798 store->setVolatile(
true);
1805 atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
1812 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure,
bool IsWeak,
1822 AtomicInfo Atomics(*
this, Obj);
1824 return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
1829 LValue LVal, llvm::AtomicOrdering AO,
1830 const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
bool IsVolatile) {
1831 AtomicInfo Atomics(*
this, LVal);
1832 Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
1836 AtomicInfo atomics(*
this, dest);
1838 switch (atomics.getEvaluationKind()) {
1854 bool Zeroed =
false;
1856 Zeroed = atomics.emitMemSetZeroIfNecessary();
1857 dest = atomics.projectValue();
1872 llvm_unreachable(
"bad evaluation kind");
ReturnValueSlot - Contains the address where the return value of a function can be stored...
Defines the clang::ASTContext interface.
llvm::IntegerType * IntTy
int
A (possibly-)qualified type.
CodeGenTypes & getTypes()
llvm::Type * ConvertTypeForMem(QualType T)
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, AlignmentSource alignSource)
Create a new object to represent a bit-field access.
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, IsZeroed_t isZeroed=IsNotZeroed)
void setAlignment(CharUnits A)
const TargetInfo & getTarget() const
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth, signed/unsigned.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Address getAddress() const
void setTBAAInfo(llvm::MDNode *N)
const llvm::DataLayout & getDataLayout() const
const void * Store
Store - This opaque type encapsulates an immutable mapping from locations to values.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
bool typeIsSuitableForInlineAtomic(QualType Ty, bool IsVolatile) const
An type is a candidate for having its loads and stores be made atomic if we are operating under /vola...
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
llvm::Type * getElementType() const
Return the type of the values stored in this address.
static llvm::Value * getTypeSize(CodeGenFunction &CGF, QualType Ty)
bool isVolatileQualified() const
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
QualType getExtVectorType(QualType VectorType, unsigned NumElts) const
Return the unique reference to an extended vector type of the specified element type and size...
llvm::IntegerType * SizeTy
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)
RValue EmitCall(const CGFunctionInfo &FnInfo, llvm::Value *Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, CGCalleeInfo CalleeInfo=CGCalleeInfo(), llvm::Instruction **callOrInvoke=nullptr)
EmitCall - Generate a call of the given function, expecting the given result type, and using the given argument list which specifies both the LLVM arguments and the types they were derived from.
RValue EmitLoadOfExtVectorElementLValue(LValue V)
CharUnits getAlignment() const
const TargetInfo & getTargetInfo() const
CharUnits - This is an opaque type for sizes expressed in character units.
QualType getIntPtrType() const
Return a type compatible with "intptr_t" (C99 7.18.1.4), as defined by the target.
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
virtual bool hasBuiltinAtomic(uint64_t AtomicSizeInBits, uint64_t AlignmentInBits) const
Returns true if the given target supports lock-free atomic operations at the specified width and alig...
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, const llvm::function_ref< RValue(RValue)> &UpdateOp, Address DesiredAddr)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitAtomicInit(Expr *E, LValue lvalue)
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
RValue EmitAtomicExpr(AtomicExpr *E)
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
bool isExtVectorElt() const
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
static LValue MakeExtVectorElt(Address vecAddress, llvm::Constant *Elts, QualType type, AlignmentSource alignSource)
std::pair< CharUnits, CharUnits > getTypeInfoInChars(const Type *T) const
static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource AlignSource=AlignmentSource::Type)
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee...
static unsigned getNumSubExprs(AtomicOp Op)
Determine the number of arguments the specified atomic builtin should have.
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, llvm::MDNode *TBAAInfo, bool ConvertTypeToTag=true)
Decorate the instruction with a TBAA tag.
static TypeEvaluationKind getEvaluationKind(QualType T)
hasAggregateLLVMType - Return true if the specified AST type will map into an aggregate LLVM type or ...
llvm::Value * getPointer() const
Expr - This represents one expression.
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource AlignSource=AlignmentSource::Type, llvm::MDNode *TBAAInfo=nullptr, bool isInit=false, QualType TBAABaseTy=QualType(), uint64_t TBAAOffset=0, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
bool isAtomicType() const
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, AlignmentSource alignSource)
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation...
static LValue MakeAddr(Address address, QualType type, ASTContext &Context, AlignmentSource alignSource, llvm::MDNode *TBAAInfo=nullptr)
void add(RValue rvalue, QualType type, bool needscopy=false)
llvm::LLVMContext & getLLVMContext()
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
Address EmitPointerWithAlignment(const Expr *Addr, AlignmentSource *Source=nullptr)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Represents a GCC generic vector type.
llvm::Value * EmitCastToVoidPtr(llvm::Value *value)
Emit a cast to void* in the appropriate address space.
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *IsWeak, llvm::Value *FailureOrder, uint64_t Size, llvm::AtomicOrdering Order)
static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *FailureOrderVal, uint64_t Size, llvm::AtomicOrdering SuccessOrder)
Given an ordering required on success, emit all possible cmpxchg instructions to cope with the provid...
The result type of a method or function.
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
The l-value was considered opaque, so the alignment was determined from a type.
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
llvm::Constant * CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeSet ExtraAttrs=llvm::AttributeSet())
Create a new runtime function with the specified type and name.
llvm::Value * getBitFieldPointer() const
ASTContext & getContext() const
Encodes a location in the source.
bool LValueIsSuitableForInlineAtomic(LValue Src)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation...
const CGBitFieldInfo & getBitFieldInfo() const
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource AlignSource=AlignmentSource::Type, llvm::MDNode *TBAAInfo=nullptr, QualType TBAABaseTy=QualType(), uint64_t TBAAOffset=0, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
llvm::MDNode * getTBAAInfo() const
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load, __atomic_store, and __atomic_compare_exchange_*, for the similarly-named C++11 instructions, and __c11 variants for <stdatomic.h>.
const CodeGenOptions & getCodeGenOpts() const
Address getExtVectorAddress() const
AlignmentSource getAlignmentSource() const
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type, returning the result.
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
static const Type * getElementType(const Expr *BaseExpr)
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
bool isZero() const
isZero - Test whether the quantity equals zero.
Address CreateMemTemp(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignment...
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored...
Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
detail::InMemoryDirectory::const_iterator E
void EmitAggregateCopy(Address DestPtr, Address SrcPtr, QualType EltTy, bool isVolatile=false, bool isAssignment=false)
EmitAggregateCopy - Emit an aggregate copy.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
unsigned getMaxAtomicInlineWidth() const
Return the maximum width lock-free atomic operation which can be inlined given the supported features...
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type. ...
llvm::Constant * getExtVectorElts() const
llvm::PointerType * getType() const
Return the type of the pointer value.
static void AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args, bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy, SourceLocation Loc, CharUnits SizeInChars)
const T * getAs() const
Member-template getAs<specific type>'.
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
Address getAddress() const
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type, returning the result.
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::Value * getVectorIdx() const
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
RValue EmitLoadOfBitfieldLValue(LValue LV)
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
static RValue get(llvm::Value *V)
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::AtomicOrdering FailureOrder)
bool isVolatileQualified() const
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments...
static RValue emitAtomicLibcall(CodeGenFunction &CGF, StringRef fnName, QualType resultType, CallArgList &args)
static RValue getAggregate(Address addr, bool isVolatile=false)
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
LValue - This represents an lvalue references.
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
CallArgList - Type for representing both the value and type of arguments in a call.
static Address EmitValToTemp(CodeGenFunction &CGF, Expr *E)
Expr * getOrderFail() const
A class which abstracts out some details necessary for making a call.
Structure with information about how a bitfield should be accessed.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
bool isPointerType() const
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.