clang  3.7.0
CGAtomic.cpp
Go to the documentation of this file.
1 //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the code for emitting atomic operations.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CGCall.h"
16 #include "CGRecordLayout.h"
17 #include "CodeGenModule.h"
18 #include "clang/AST/ASTContext.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Intrinsics.h"
23 #include "llvm/IR/Operator.h"
24 
25 using namespace clang;
26 using namespace CodeGen;
27 
28 namespace {
29  class AtomicInfo {
30  CodeGenFunction &CGF;
31  QualType AtomicTy;
32  QualType ValueTy;
33  uint64_t AtomicSizeInBits;
34  uint64_t ValueSizeInBits;
35  CharUnits AtomicAlign;
36  CharUnits ValueAlign;
37  CharUnits LValueAlign;
38  TypeEvaluationKind EvaluationKind;
39  bool UseLibcall;
40  LValue LVal;
41  CGBitFieldInfo BFI;
42  public:
43  AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
44  : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45  EvaluationKind(TEK_Scalar), UseLibcall(true) {
46  assert(!lvalue.isGlobalReg());
47  ASTContext &C = CGF.getContext();
48  if (lvalue.isSimple()) {
49  AtomicTy = lvalue.getType();
50  if (auto *ATy = AtomicTy->getAs<AtomicType>())
51  ValueTy = ATy->getValueType();
52  else
53  ValueTy = AtomicTy;
54  EvaluationKind = CGF.getEvaluationKind(ValueTy);
55 
56  uint64_t ValueAlignInBits;
57  uint64_t AtomicAlignInBits;
58  TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59  ValueSizeInBits = ValueTI.Width;
60  ValueAlignInBits = ValueTI.Align;
61 
62  TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63  AtomicSizeInBits = AtomicTI.Width;
64  AtomicAlignInBits = AtomicTI.Align;
65 
66  assert(ValueSizeInBits <= AtomicSizeInBits);
67  assert(ValueAlignInBits <= AtomicAlignInBits);
68 
69  AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70  ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
71  if (lvalue.getAlignment().isZero())
72  lvalue.setAlignment(AtomicAlign);
73 
74  LVal = lvalue;
75  } else if (lvalue.isBitField()) {
76  ValueTy = lvalue.getType();
77  ValueSizeInBits = C.getTypeSize(ValueTy);
78  auto &OrigBFI = lvalue.getBitFieldInfo();
79  auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
80  AtomicSizeInBits = C.toBits(
81  C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
82  .RoundUpToAlignment(lvalue.getAlignment()));
83  auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldAddr());
84  auto OffsetInChars =
85  (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
86  lvalue.getAlignment();
87  VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
88  VoidPtrAddr, OffsetInChars.getQuantity());
89  auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
90  VoidPtrAddr,
91  CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
92  "atomic_bitfield_base");
93  BFI = OrigBFI;
94  BFI.Offset = Offset;
95  BFI.StorageSize = AtomicSizeInBits;
96  BFI.StorageOffset += OffsetInChars;
97  LVal = LValue::MakeBitfield(Addr, BFI, lvalue.getType(),
98  lvalue.getAlignment());
99  LVal.setTBAAInfo(lvalue.getTBAAInfo());
100  AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
101  if (AtomicTy.isNull()) {
102  llvm::APInt Size(
103  /*numBits=*/32,
104  C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
105  AtomicTy = C.getConstantArrayType(C.CharTy, Size, ArrayType::Normal,
106  /*IndexTypeQuals=*/0);
107  }
108  AtomicAlign = ValueAlign = lvalue.getAlignment();
109  } else if (lvalue.isVectorElt()) {
110  ValueTy = lvalue.getType()->getAs<VectorType>()->getElementType();
111  ValueSizeInBits = C.getTypeSize(ValueTy);
112  AtomicTy = lvalue.getType();
113  AtomicSizeInBits = C.getTypeSize(AtomicTy);
114  AtomicAlign = ValueAlign = lvalue.getAlignment();
115  LVal = lvalue;
116  } else {
117  assert(lvalue.isExtVectorElt());
118  ValueTy = lvalue.getType();
119  ValueSizeInBits = C.getTypeSize(ValueTy);
120  AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
121  lvalue.getType(), lvalue.getExtVectorAddr()
122  ->getType()
123  ->getPointerElementType()
124  ->getVectorNumElements());
125  AtomicSizeInBits = C.getTypeSize(AtomicTy);
126  AtomicAlign = ValueAlign = lvalue.getAlignment();
127  LVal = lvalue;
128  }
129  UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
130  AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
131  }
132 
133  QualType getAtomicType() const { return AtomicTy; }
134  QualType getValueType() const { return ValueTy; }
135  CharUnits getAtomicAlignment() const { return AtomicAlign; }
136  CharUnits getValueAlignment() const { return ValueAlign; }
137  uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
138  uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
139  TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
140  bool shouldUseLibcall() const { return UseLibcall; }
141  const LValue &getAtomicLValue() const { return LVal; }
142  llvm::Value *getAtomicAddress() const {
143  if (LVal.isSimple())
144  return LVal.getAddress();
145  else if (LVal.isBitField())
146  return LVal.getBitFieldAddr();
147  else if (LVal.isVectorElt())
148  return LVal.getVectorAddr();
149  assert(LVal.isExtVectorElt());
150  return LVal.getExtVectorAddr();
151  }
152 
153  /// Is the atomic size larger than the underlying value type?
154  ///
155  /// Note that the absence of padding does not mean that atomic
156  /// objects are completely interchangeable with non-atomic
157  /// objects: we might have promoted the alignment of a type
158  /// without making it bigger.
159  bool hasPadding() const {
160  return (ValueSizeInBits != AtomicSizeInBits);
161  }
162 
163  bool emitMemSetZeroIfNecessary() const;
164 
165  llvm::Value *getAtomicSizeValue() const {
166  CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
167  return CGF.CGM.getSize(size);
168  }
169 
170  /// Cast the given pointer to an integer pointer suitable for
171  /// atomic operations.
172  llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
173 
174  /// Turn an atomic-layout object into an r-value.
175  RValue convertTempToRValue(llvm::Value *addr, AggValueSlot resultSlot,
176  SourceLocation loc, bool AsValue) const;
177 
178  /// \brief Converts a rvalue to integer value.
179  llvm::Value *convertRValueToInt(RValue RVal) const;
180 
181  RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
182  AggValueSlot ResultSlot,
183  SourceLocation Loc, bool AsValue) const;
184 
185  /// Copy an atomic r-value into atomic-layout memory.
186  void emitCopyIntoMemory(RValue rvalue) const;
187 
188  /// Project an l-value down to the value field.
189  LValue projectValue() const {
190  assert(LVal.isSimple());
191  llvm::Value *addr = getAtomicAddress();
192  if (hasPadding())
193  addr = CGF.Builder.CreateStructGEP(nullptr, addr, 0);
194 
195  return LValue::MakeAddr(addr, getValueType(), LVal.getAlignment(),
196  CGF.getContext(), LVal.getTBAAInfo());
197  }
198 
199  /// \brief Emits atomic load.
200  /// \returns Loaded value.
201  RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
202  bool AsValue, llvm::AtomicOrdering AO,
203  bool IsVolatile);
204 
205  /// \brief Emits atomic compare-and-exchange sequence.
206  /// \param Expected Expected value.
207  /// \param Desired Desired value.
208  /// \param Success Atomic ordering for success operation.
209  /// \param Failure Atomic ordering for failed operation.
210  /// \param IsWeak true if atomic operation is weak, false otherwise.
211  /// \returns Pair of values: previous value from storage (value type) and
212  /// boolean flag (i1 type) with true if success and false otherwise.
213  std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange(
214  RValue Expected, RValue Desired,
215  llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
216  llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent,
217  bool IsWeak = false);
218 
219  /// \brief Emits atomic update.
220  /// \param AO Atomic ordering.
221  /// \param UpdateOp Update operation for the current lvalue.
222  void EmitAtomicUpdate(llvm::AtomicOrdering AO,
223  const llvm::function_ref<RValue(RValue)> &UpdateOp,
224  bool IsVolatile);
225  /// \brief Emits atomic update.
226  /// \param AO Atomic ordering.
227  void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
228  bool IsVolatile);
229 
230  /// Materialize an atomic r-value in atomic-layout memory.
231  llvm::Value *materializeRValue(RValue rvalue) const;
232 
233  /// \brief Translates LLVM atomic ordering to GNU atomic ordering for
234  /// libcalls.
236  translateAtomicOrdering(const llvm::AtomicOrdering AO);
237 
238  private:
239  bool requiresMemSetZero(llvm::Type *type) const;
240 
241  /// \brief Creates temp alloca for intermediate operations on atomic value.
242  llvm::Value *CreateTempAlloca() const;
243 
244  /// \brief Emits atomic load as a libcall.
245  void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
246  llvm::AtomicOrdering AO, bool IsVolatile);
247  /// \brief Emits atomic load as LLVM instruction.
248  llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
249  /// \brief Emits atomic compare-and-exchange op as a libcall.
250  llvm::Value *EmitAtomicCompareExchangeLibcall(
251  llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
252  llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
253  llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent);
254  /// \brief Emits atomic compare-and-exchange op as LLVM instruction.
255  std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
256  llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
257  llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
258  llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent,
259  bool IsWeak = false);
260  /// \brief Emit atomic update as libcalls.
261  void
262  EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
263  const llvm::function_ref<RValue(RValue)> &UpdateOp,
264  bool IsVolatile);
265  /// \brief Emit atomic update as LLVM instructions.
266  void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
267  const llvm::function_ref<RValue(RValue)> &UpdateOp,
268  bool IsVolatile);
269  /// \brief Emit atomic update as libcalls.
270  void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
271  bool IsVolatile);
272  /// \brief Emit atomic update as LLVM instructions.
273  void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
274  bool IsVolatile);
275  };
276 }
277 
279 AtomicInfo::translateAtomicOrdering(const llvm::AtomicOrdering AO) {
280  switch (AO) {
281  case llvm::Unordered:
282  case llvm::NotAtomic:
283  case llvm::Monotonic:
285  case llvm::Acquire:
287  case llvm::Release:
289  case llvm::AcquireRelease:
291  case llvm::SequentiallyConsistent:
293  }
294  llvm_unreachable("Unhandled AtomicOrdering");
295 }
296 
297 llvm::Value *AtomicInfo::CreateTempAlloca() const {
298  auto *TempAlloca = CGF.CreateMemTemp(
299  (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
300  : AtomicTy,
301  "atomic-temp");
302  TempAlloca->setAlignment(getAtomicAlignment().getQuantity());
303  // Cast to pointer to value type for bitfields.
304  if (LVal.isBitField())
305  return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
306  TempAlloca, getAtomicAddress()->getType());
307  return TempAlloca;
308 }
309 
311  StringRef fnName,
312  QualType resultType,
313  CallArgList &args) {
314  const CGFunctionInfo &fnInfo =
315  CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
317  llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
318  llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
319  return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
320 }
321 
322 /// Does a store of the given IR type modify the full expected width?
323 static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
324  uint64_t expectedSize) {
325  return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
326 }
327 
328 /// Does the atomic type require memsetting to zero before initialization?
329 ///
330 /// The IR type is provided as a way of making certain queries faster.
331 bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
332  // If the atomic type has size padding, we definitely need a memset.
333  if (hasPadding()) return true;
334 
335  // Otherwise, do some simple heuristics to try to avoid it:
336  switch (getEvaluationKind()) {
337  // For scalars and complexes, check whether the store size of the
338  // type uses the full size.
339  case TEK_Scalar:
340  return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
341  case TEK_Complex:
342  return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
343  AtomicSizeInBits / 2);
344 
345  // Padding in structs has an undefined bit pattern. User beware.
346  case TEK_Aggregate:
347  return false;
348  }
349  llvm_unreachable("bad evaluation kind");
350 }
351 
352 bool AtomicInfo::emitMemSetZeroIfNecessary() const {
353  assert(LVal.isSimple());
354  llvm::Value *addr = LVal.getAddress();
355  if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
356  return false;
357 
358  CGF.Builder.CreateMemSet(
359  addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
360  CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
361  LVal.getAlignment().getQuantity());
362  return true;
363 }
364 
365 static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
366  llvm::Value *Dest, llvm::Value *Ptr,
367  llvm::Value *Val1, llvm::Value *Val2,
368  uint64_t Size, unsigned Align,
369  llvm::AtomicOrdering SuccessOrder,
370  llvm::AtomicOrdering FailureOrder) {
371  // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
372  llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1);
373  Expected->setAlignment(Align);
374  llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
375  Desired->setAlignment(Align);
376 
377  llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
378  Ptr, Expected, Desired, SuccessOrder, FailureOrder);
379  Pair->setVolatile(E->isVolatile());
380  Pair->setWeak(IsWeak);
381 
382  // Cmp holds the result of the compare-exchange operation: true on success,
383  // false on failure.
384  llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
385  llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
386 
387  // This basic block is used to hold the store instruction if the operation
388  // failed.
389  llvm::BasicBlock *StoreExpectedBB =
390  CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
391 
392  // This basic block is the exit point of the operation, we should end up
393  // here regardless of whether or not the operation succeeded.
394  llvm::BasicBlock *ContinueBB =
395  CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
396 
397  // Update Expected if Expected isn't equal to Old, otherwise branch to the
398  // exit point.
399  CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
400 
401  CGF.Builder.SetInsertPoint(StoreExpectedBB);
402  // Update the memory at Expected with Old's value.
403  llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1);
404  StoreExpected->setAlignment(Align);
405  // Finally, branch to the exit point.
406  CGF.Builder.CreateBr(ContinueBB);
407 
408  CGF.Builder.SetInsertPoint(ContinueBB);
409  // Update the memory at Dest with Cmp's value.
410  CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
411  return;
412 }
413 
414 /// Given an ordering required on success, emit all possible cmpxchg
415 /// instructions to cope with the provided (but possibly only dynamically known)
416 /// FailureOrder.
418  bool IsWeak, llvm::Value *Dest,
419  llvm::Value *Ptr, llvm::Value *Val1,
420  llvm::Value *Val2,
421  llvm::Value *FailureOrderVal,
422  uint64_t Size, unsigned Align,
423  llvm::AtomicOrdering SuccessOrder) {
424  llvm::AtomicOrdering FailureOrder;
425  if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
426  switch (FO->getSExtValue()) {
427  default:
428  FailureOrder = llvm::Monotonic;
429  break;
432  FailureOrder = llvm::Acquire;
433  break;
435  FailureOrder = llvm::SequentiallyConsistent;
436  break;
437  }
438  if (FailureOrder >= SuccessOrder) {
439  // Don't assert on undefined behaviour.
440  FailureOrder =
441  llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
442  }
443  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align,
444  SuccessOrder, FailureOrder);
445  return;
446  }
447 
448  // Create all the relevant BB's
449  llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
450  *SeqCstBB = nullptr;
451  MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
452  if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release)
453  AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
454  if (SuccessOrder == llvm::SequentiallyConsistent)
455  SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
456 
457  llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
458 
459  llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
460 
461  // Emit all the different atomics
462 
463  // MonotonicBB is arbitrarily chosen as the default case; in practice, this
464  // doesn't matter unless someone is crazy enough to use something that
465  // doesn't fold to a constant for the ordering.
466  CGF.Builder.SetInsertPoint(MonotonicBB);
467  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
468  Size, Align, SuccessOrder, llvm::Monotonic);
469  CGF.Builder.CreateBr(ContBB);
470 
471  if (AcquireBB) {
472  CGF.Builder.SetInsertPoint(AcquireBB);
473  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
474  Size, Align, SuccessOrder, llvm::Acquire);
475  CGF.Builder.CreateBr(ContBB);
476  SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
477  AcquireBB);
478  SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
479  AcquireBB);
480  }
481  if (SeqCstBB) {
482  CGF.Builder.SetInsertPoint(SeqCstBB);
483  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
484  Size, Align, SuccessOrder, llvm::SequentiallyConsistent);
485  CGF.Builder.CreateBr(ContBB);
486  SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
487  SeqCstBB);
488  }
489 
490  CGF.Builder.SetInsertPoint(ContBB);
491 }
492 
494  llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
495  llvm::Value *IsWeak, llvm::Value *FailureOrder,
496  uint64_t Size, unsigned Align,
497  llvm::AtomicOrdering Order) {
498  llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
499  llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
500 
501  switch (E->getOp()) {
502  case AtomicExpr::AO__c11_atomic_init:
503  llvm_unreachable("Already handled!");
504 
505  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
506  emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
507  FailureOrder, Size, Align, Order);
508  return;
509  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
510  emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
511  FailureOrder, Size, Align, Order);
512  return;
513  case AtomicExpr::AO__atomic_compare_exchange:
514  case AtomicExpr::AO__atomic_compare_exchange_n: {
515  if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
516  emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
517  Val1, Val2, FailureOrder, Size, Align, Order);
518  } else {
519  // Create all the relevant BB's
520  llvm::BasicBlock *StrongBB =
521  CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
522  llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
523  llvm::BasicBlock *ContBB =
524  CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
525 
526  llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
527  SI->addCase(CGF.Builder.getInt1(false), StrongBB);
528 
529  CGF.Builder.SetInsertPoint(StrongBB);
530  emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
531  FailureOrder, Size, Align, Order);
532  CGF.Builder.CreateBr(ContBB);
533 
534  CGF.Builder.SetInsertPoint(WeakBB);
535  emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
536  FailureOrder, Size, Align, Order);
537  CGF.Builder.CreateBr(ContBB);
538 
539  CGF.Builder.SetInsertPoint(ContBB);
540  }
541  return;
542  }
543  case AtomicExpr::AO__c11_atomic_load:
544  case AtomicExpr::AO__atomic_load_n:
545  case AtomicExpr::AO__atomic_load: {
546  llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
547  Load->setAtomic(Order);
548  Load->setAlignment(Size);
549  Load->setVolatile(E->isVolatile());
550  llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
551  StoreDest->setAlignment(Align);
552  return;
553  }
554 
555  case AtomicExpr::AO__c11_atomic_store:
556  case AtomicExpr::AO__atomic_store:
557  case AtomicExpr::AO__atomic_store_n: {
558  assert(!Dest && "Store does not return a value");
559  llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
560  LoadVal1->setAlignment(Align);
561  llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
562  Store->setAtomic(Order);
563  Store->setAlignment(Size);
564  Store->setVolatile(E->isVolatile());
565  return;
566  }
567 
568  case AtomicExpr::AO__c11_atomic_exchange:
569  case AtomicExpr::AO__atomic_exchange_n:
570  case AtomicExpr::AO__atomic_exchange:
571  Op = llvm::AtomicRMWInst::Xchg;
572  break;
573 
574  case AtomicExpr::AO__atomic_add_fetch:
575  PostOp = llvm::Instruction::Add;
576  // Fall through.
577  case AtomicExpr::AO__c11_atomic_fetch_add:
578  case AtomicExpr::AO__atomic_fetch_add:
579  Op = llvm::AtomicRMWInst::Add;
580  break;
581 
582  case AtomicExpr::AO__atomic_sub_fetch:
583  PostOp = llvm::Instruction::Sub;
584  // Fall through.
585  case AtomicExpr::AO__c11_atomic_fetch_sub:
586  case AtomicExpr::AO__atomic_fetch_sub:
587  Op = llvm::AtomicRMWInst::Sub;
588  break;
589 
590  case AtomicExpr::AO__atomic_and_fetch:
591  PostOp = llvm::Instruction::And;
592  // Fall through.
593  case AtomicExpr::AO__c11_atomic_fetch_and:
594  case AtomicExpr::AO__atomic_fetch_and:
596  break;
597 
598  case AtomicExpr::AO__atomic_or_fetch:
599  PostOp = llvm::Instruction::Or;
600  // Fall through.
601  case AtomicExpr::AO__c11_atomic_fetch_or:
602  case AtomicExpr::AO__atomic_fetch_or:
603  Op = llvm::AtomicRMWInst::Or;
604  break;
605 
606  case AtomicExpr::AO__atomic_xor_fetch:
607  PostOp = llvm::Instruction::Xor;
608  // Fall through.
609  case AtomicExpr::AO__c11_atomic_fetch_xor:
610  case AtomicExpr::AO__atomic_fetch_xor:
611  Op = llvm::AtomicRMWInst::Xor;
612  break;
613 
614  case AtomicExpr::AO__atomic_nand_fetch:
615  PostOp = llvm::Instruction::And;
616  // Fall through.
617  case AtomicExpr::AO__atomic_fetch_nand:
618  Op = llvm::AtomicRMWInst::Nand;
619  break;
620  }
621 
622  llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
623  LoadVal1->setAlignment(Align);
624  llvm::AtomicRMWInst *RMWI =
625  CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
626  RMWI->setVolatile(E->isVolatile());
627 
628  // For __atomic_*_fetch operations, perform the operation again to
629  // determine the value which was written.
630  llvm::Value *Result = RMWI;
631  if (PostOp)
632  Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
633  if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
634  Result = CGF.Builder.CreateNot(Result);
635  llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
636  StoreDest->setAlignment(Align);
637 }
638 
639 // This function emits any expression (scalar, complex, or aggregate)
640 // into a temporary alloca.
641 static llvm::Value *
643  llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
644  CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
645  /*Init*/ true);
646  return DeclPtr;
647 }
648 
649 static void
651  bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
652  SourceLocation Loc, CharUnits SizeInChars) {
653  if (UseOptimizedLibcall) {
654  // Load value and pass it to the function directly.
655  unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
656  int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
657  ValTy =
658  CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
659  llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
660  SizeInBits)->getPointerTo();
661  Val = CGF.EmitLoadOfScalar(CGF.Builder.CreateBitCast(Val, IPtrTy), false,
662  Align, CGF.getContext().getPointerType(ValTy),
663  Loc);
664  // Coerce the value into an appropriately sized integer type.
665  Args.add(RValue::get(Val), ValTy);
666  } else {
667  // Non-optimized functions always take a reference.
668  Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
669  CGF.getContext().VoidPtrTy);
670  }
671 }
672 
674  QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
675  QualType MemTy = AtomicTy;
676  if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
677  MemTy = AT->getValueType();
678  CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
679  uint64_t Size = sizeChars.getQuantity();
680  CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
681  unsigned Align = alignChars.getQuantity();
682  unsigned MaxInlineWidthInBits =
684  bool UseLibcall = (Size != Align ||
685  getContext().toBits(sizeChars) > MaxInlineWidthInBits);
686 
687  llvm::Value *IsWeak = nullptr, *OrderFail = nullptr, *Val1 = nullptr,
688  *Val2 = nullptr;
689  llvm::Value *Ptr = EmitScalarExpr(E->getPtr());
690 
691  if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
692  assert(!Dest && "Init does not return a value");
693  LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
694  EmitAtomicInit(E->getVal1(), lvalue);
695  return RValue::get(nullptr);
696  }
697 
698  llvm::Value *Order = EmitScalarExpr(E->getOrder());
699 
700  switch (E->getOp()) {
701  case AtomicExpr::AO__c11_atomic_init:
702  llvm_unreachable("Already handled!");
703 
704  case AtomicExpr::AO__c11_atomic_load:
705  case AtomicExpr::AO__atomic_load_n:
706  break;
707 
708  case AtomicExpr::AO__atomic_load:
709  Dest = EmitScalarExpr(E->getVal1());
710  break;
711 
712  case AtomicExpr::AO__atomic_store:
713  Val1 = EmitScalarExpr(E->getVal1());
714  break;
715 
716  case AtomicExpr::AO__atomic_exchange:
717  Val1 = EmitScalarExpr(E->getVal1());
718  Dest = EmitScalarExpr(E->getVal2());
719  break;
720 
721  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
722  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
723  case AtomicExpr::AO__atomic_compare_exchange_n:
724  case AtomicExpr::AO__atomic_compare_exchange:
725  Val1 = EmitScalarExpr(E->getVal1());
726  if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
727  Val2 = EmitScalarExpr(E->getVal2());
728  else
729  Val2 = EmitValToTemp(*this, E->getVal2());
730  OrderFail = EmitScalarExpr(E->getOrderFail());
731  if (E->getNumSubExprs() == 6)
732  IsWeak = EmitScalarExpr(E->getWeak());
733  break;
734 
735  case AtomicExpr::AO__c11_atomic_fetch_add:
736  case AtomicExpr::AO__c11_atomic_fetch_sub:
737  if (MemTy->isPointerType()) {
738  // For pointer arithmetic, we're required to do a bit of math:
739  // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
740  // ... but only for the C11 builtins. The GNU builtins expect the
741  // user to multiply by sizeof(T).
742  QualType Val1Ty = E->getVal1()->getType();
743  llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
744  CharUnits PointeeIncAmt =
746  Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
747  Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
748  EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
749  break;
750  }
751  // Fall through.
752  case AtomicExpr::AO__atomic_fetch_add:
753  case AtomicExpr::AO__atomic_fetch_sub:
754  case AtomicExpr::AO__atomic_add_fetch:
755  case AtomicExpr::AO__atomic_sub_fetch:
756  case AtomicExpr::AO__c11_atomic_store:
757  case AtomicExpr::AO__c11_atomic_exchange:
758  case AtomicExpr::AO__atomic_store_n:
759  case AtomicExpr::AO__atomic_exchange_n:
760  case AtomicExpr::AO__c11_atomic_fetch_and:
761  case AtomicExpr::AO__c11_atomic_fetch_or:
762  case AtomicExpr::AO__c11_atomic_fetch_xor:
763  case AtomicExpr::AO__atomic_fetch_and:
764  case AtomicExpr::AO__atomic_fetch_or:
765  case AtomicExpr::AO__atomic_fetch_xor:
766  case AtomicExpr::AO__atomic_fetch_nand:
767  case AtomicExpr::AO__atomic_and_fetch:
768  case AtomicExpr::AO__atomic_or_fetch:
769  case AtomicExpr::AO__atomic_xor_fetch:
770  case AtomicExpr::AO__atomic_nand_fetch:
771  Val1 = EmitValToTemp(*this, E->getVal1());
772  break;
773  }
774 
775  QualType RValTy = E->getType().getUnqualifiedType();
776 
777  auto GetDest = [&] {
778  if (!RValTy->isVoidType() && !Dest) {
779  Dest = CreateMemTemp(RValTy, ".atomicdst");
780  }
781  return Dest;
782  };
783 
784  // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
785  if (UseLibcall) {
786  bool UseOptimizedLibcall = false;
787  switch (E->getOp()) {
788  case AtomicExpr::AO__c11_atomic_fetch_add:
789  case AtomicExpr::AO__atomic_fetch_add:
790  case AtomicExpr::AO__c11_atomic_fetch_and:
791  case AtomicExpr::AO__atomic_fetch_and:
792  case AtomicExpr::AO__c11_atomic_fetch_or:
793  case AtomicExpr::AO__atomic_fetch_or:
794  case AtomicExpr::AO__c11_atomic_fetch_sub:
795  case AtomicExpr::AO__atomic_fetch_sub:
796  case AtomicExpr::AO__c11_atomic_fetch_xor:
797  case AtomicExpr::AO__atomic_fetch_xor:
798  // For these, only library calls for certain sizes exist.
799  UseOptimizedLibcall = true;
800  break;
801  default:
802  // Only use optimized library calls for sizes for which they exist.
803  if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
804  UseOptimizedLibcall = true;
805  break;
806  }
807 
808  CallArgList Args;
809  if (!UseOptimizedLibcall) {
810  // For non-optimized library calls, the size is the first parameter
811  Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
812  getContext().getSizeType());
813  }
814  // Atomic address is the first or second parameter
816 
817  std::string LibCallName;
818  QualType LoweredMemTy =
819  MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
820  QualType RetTy;
821  bool HaveRetTy = false;
822  switch (E->getOp()) {
823  // There is only one libcall for compare an exchange, because there is no
824  // optimisation benefit possible from a libcall version of a weak compare
825  // and exchange.
826  // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
827  // void *desired, int success, int failure)
828  // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
829  // int success, int failure)
830  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
831  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
832  case AtomicExpr::AO__atomic_compare_exchange:
833  case AtomicExpr::AO__atomic_compare_exchange_n:
834  LibCallName = "__atomic_compare_exchange";
835  RetTy = getContext().BoolTy;
836  HaveRetTy = true;
838  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
839  E->getExprLoc(), sizeChars);
840  Args.add(RValue::get(Order), getContext().IntTy);
841  Order = OrderFail;
842  break;
843  // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
844  // int order)
845  // T __atomic_exchange_N(T *mem, T val, int order)
846  case AtomicExpr::AO__c11_atomic_exchange:
847  case AtomicExpr::AO__atomic_exchange_n:
848  case AtomicExpr::AO__atomic_exchange:
849  LibCallName = "__atomic_exchange";
850  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
851  E->getExprLoc(), sizeChars);
852  break;
853  // void __atomic_store(size_t size, void *mem, void *val, int order)
854  // void __atomic_store_N(T *mem, T val, int order)
855  case AtomicExpr::AO__c11_atomic_store:
856  case AtomicExpr::AO__atomic_store:
857  case AtomicExpr::AO__atomic_store_n:
858  LibCallName = "__atomic_store";
859  RetTy = getContext().VoidTy;
860  HaveRetTy = true;
861  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
862  E->getExprLoc(), sizeChars);
863  break;
864  // void __atomic_load(size_t size, void *mem, void *return, int order)
865  // T __atomic_load_N(T *mem, int order)
866  case AtomicExpr::AO__c11_atomic_load:
867  case AtomicExpr::AO__atomic_load:
868  case AtomicExpr::AO__atomic_load_n:
869  LibCallName = "__atomic_load";
870  break;
871  // T __atomic_fetch_add_N(T *mem, T val, int order)
872  case AtomicExpr::AO__c11_atomic_fetch_add:
873  case AtomicExpr::AO__atomic_fetch_add:
874  LibCallName = "__atomic_fetch_add";
875  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
876  E->getExprLoc(), sizeChars);
877  break;
878  // T __atomic_fetch_and_N(T *mem, T val, int order)
879  case AtomicExpr::AO__c11_atomic_fetch_and:
880  case AtomicExpr::AO__atomic_fetch_and:
881  LibCallName = "__atomic_fetch_and";
882  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
883  E->getExprLoc(), sizeChars);
884  break;
885  // T __atomic_fetch_or_N(T *mem, T val, int order)
886  case AtomicExpr::AO__c11_atomic_fetch_or:
887  case AtomicExpr::AO__atomic_fetch_or:
888  LibCallName = "__atomic_fetch_or";
889  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
890  E->getExprLoc(), sizeChars);
891  break;
892  // T __atomic_fetch_sub_N(T *mem, T val, int order)
893  case AtomicExpr::AO__c11_atomic_fetch_sub:
894  case AtomicExpr::AO__atomic_fetch_sub:
895  LibCallName = "__atomic_fetch_sub";
896  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
897  E->getExprLoc(), sizeChars);
898  break;
899  // T __atomic_fetch_xor_N(T *mem, T val, int order)
900  case AtomicExpr::AO__c11_atomic_fetch_xor:
901  case AtomicExpr::AO__atomic_fetch_xor:
902  LibCallName = "__atomic_fetch_xor";
903  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
904  E->getExprLoc(), sizeChars);
905  break;
906  default: return EmitUnsupportedRValue(E, "atomic library call");
907  }
908 
909  // Optimized functions have the size in their name.
910  if (UseOptimizedLibcall)
911  LibCallName += "_" + llvm::utostr(Size);
912  // By default, assume we return a value of the atomic type.
913  if (!HaveRetTy) {
914  if (UseOptimizedLibcall) {
915  // Value is returned directly.
916  // The function returns an appropriately sized integer type.
918  getContext().toBits(sizeChars), /*Signed=*/false);
919  } else {
920  // Value is returned through parameter before the order.
921  RetTy = getContext().VoidTy;
923  }
924  }
925  // order is always the last parameter
926  Args.add(RValue::get(Order),
927  getContext().IntTy);
928 
929  RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
930  // The value is returned directly from the libcall.
931  if (HaveRetTy && !RetTy->isVoidType())
932  return Res;
933  // The value is returned via an explicit out param.
934  if (RetTy->isVoidType())
935  return RValue::get(nullptr);
936  // The value is returned directly for optimized libcalls but the caller is
937  // expected an out-param.
938  if (UseOptimizedLibcall) {
939  llvm::Value *ResVal = Res.getScalarVal();
940  llvm::StoreInst *StoreDest = Builder.CreateStore(
941  ResVal,
942  Builder.CreateBitCast(GetDest(), ResVal->getType()->getPointerTo()));
943  StoreDest->setAlignment(Align);
944  }
945  return convertTempToRValue(Dest, RValTy, E->getExprLoc());
946  }
947 
948  bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
949  E->getOp() == AtomicExpr::AO__atomic_store ||
950  E->getOp() == AtomicExpr::AO__atomic_store_n;
951  bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
952  E->getOp() == AtomicExpr::AO__atomic_load ||
953  E->getOp() == AtomicExpr::AO__atomic_load_n;
954 
955  llvm::Type *ITy =
956  llvm::IntegerType::get(getLLVMContext(), Size * 8);
957  llvm::Value *OrigDest = GetDest();
958  Ptr = Builder.CreateBitCast(
959  Ptr, ITy->getPointerTo(Ptr->getType()->getPointerAddressSpace()));
960  if (Val1) Val1 = Builder.CreateBitCast(Val1, ITy->getPointerTo());
961  if (Val2) Val2 = Builder.CreateBitCast(Val2, ITy->getPointerTo());
962  if (Dest && !E->isCmpXChg())
963  Dest = Builder.CreateBitCast(Dest, ITy->getPointerTo());
964 
965  if (isa<llvm::ConstantInt>(Order)) {
966  int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
967  switch (ord) {
969  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
970  Size, Align, llvm::Monotonic);
971  break;
974  if (IsStore)
975  break; // Avoid crashing on code with undefined behavior
976  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
977  Size, Align, llvm::Acquire);
978  break;
980  if (IsLoad)
981  break; // Avoid crashing on code with undefined behavior
982  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
983  Size, Align, llvm::Release);
984  break;
986  if (IsLoad || IsStore)
987  break; // Avoid crashing on code with undefined behavior
988  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
989  Size, Align, llvm::AcquireRelease);
990  break;
992  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
993  Size, Align, llvm::SequentiallyConsistent);
994  break;
995  default: // invalid order
996  // We should not ever get here normally, but it's hard to
997  // enforce that in general.
998  break;
999  }
1000  if (RValTy->isVoidType())
1001  return RValue::get(nullptr);
1002  return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
1003  }
1004 
1005  // Long case, when Order isn't obviously constant.
1006 
1007  // Create all the relevant BB's
1008  llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
1009  *ReleaseBB = nullptr, *AcqRelBB = nullptr,
1010  *SeqCstBB = nullptr;
1011  MonotonicBB = createBasicBlock("monotonic", CurFn);
1012  if (!IsStore)
1013  AcquireBB = createBasicBlock("acquire", CurFn);
1014  if (!IsLoad)
1015  ReleaseBB = createBasicBlock("release", CurFn);
1016  if (!IsLoad && !IsStore)
1017  AcqRelBB = createBasicBlock("acqrel", CurFn);
1018  SeqCstBB = createBasicBlock("seqcst", CurFn);
1019  llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1020 
1021  // Create the switch for the split
1022  // MonotonicBB is arbitrarily chosen as the default case; in practice, this
1023  // doesn't matter unless someone is crazy enough to use something that
1024  // doesn't fold to a constant for the ordering.
1025  Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1026  llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1027 
1028  // Emit all the different atomics
1029  Builder.SetInsertPoint(MonotonicBB);
1030  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1031  Size, Align, llvm::Monotonic);
1032  Builder.CreateBr(ContBB);
1033  if (!IsStore) {
1034  Builder.SetInsertPoint(AcquireBB);
1035  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1036  Size, Align, llvm::Acquire);
1037  Builder.CreateBr(ContBB);
1038  SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
1039  AcquireBB);
1040  SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
1041  AcquireBB);
1042  }
1043  if (!IsLoad) {
1044  Builder.SetInsertPoint(ReleaseBB);
1045  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1046  Size, Align, llvm::Release);
1047  Builder.CreateBr(ContBB);
1048  SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
1049  ReleaseBB);
1050  }
1051  if (!IsLoad && !IsStore) {
1052  Builder.SetInsertPoint(AcqRelBB);
1053  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1054  Size, Align, llvm::AcquireRelease);
1055  Builder.CreateBr(ContBB);
1056  SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
1057  AcqRelBB);
1058  }
1059  Builder.SetInsertPoint(SeqCstBB);
1060  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1061  Size, Align, llvm::SequentiallyConsistent);
1062  Builder.CreateBr(ContBB);
1063  SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
1064  SeqCstBB);
1065 
1066  // Cleanup and return
1067  Builder.SetInsertPoint(ContBB);
1068  if (RValTy->isVoidType())
1069  return RValue::get(nullptr);
1070  return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
1071 }
1072 
1073 llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
1074  unsigned addrspace =
1075  cast<llvm::PointerType>(addr->getType())->getAddressSpace();
1076  llvm::IntegerType *ty =
1077  llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1078  return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
1079 }
1080 
1081 RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
1082  AggValueSlot resultSlot,
1083  SourceLocation loc, bool AsValue) const {
1084  if (LVal.isSimple()) {
1085  if (EvaluationKind == TEK_Aggregate)
1086  return resultSlot.asRValue();
1087 
1088  // Drill into the padding structure if we have one.
1089  if (hasPadding())
1090  addr = CGF.Builder.CreateStructGEP(nullptr, addr, 0);
1091 
1092  // Otherwise, just convert the temporary to an r-value using the
1093  // normal conversion routine.
1094  return CGF.convertTempToRValue(addr, getValueType(), loc);
1095  }
1096  if (!AsValue)
1097  // Get RValue from temp memory as atomic for non-simple lvalues
1098  return RValue::get(
1099  CGF.Builder.CreateAlignedLoad(addr, AtomicAlign.getQuantity()));
1100  if (LVal.isBitField())
1102  addr, LVal.getBitFieldInfo(), LVal.getType(), LVal.getAlignment()));
1103  if (LVal.isVectorElt())
1104  return CGF.EmitLoadOfLValue(LValue::MakeVectorElt(addr, LVal.getVectorIdx(),
1105  LVal.getType(),
1106  LVal.getAlignment()),
1107  loc);
1108  assert(LVal.isExtVectorElt());
1110  addr, LVal.getExtVectorElts(), LVal.getType(), LVal.getAlignment()));
1111 }
1112 
1113 RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1114  AggValueSlot ResultSlot,
1115  SourceLocation Loc,
1116  bool AsValue) const {
1117  // Try not to in some easy cases.
1118  assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
1119  if (getEvaluationKind() == TEK_Scalar &&
1120  (((!LVal.isBitField() ||
1121  LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1122  !hasPadding()) ||
1123  !AsValue)) {
1124  auto *ValTy = AsValue
1125  ? CGF.ConvertTypeForMem(ValueTy)
1126  : getAtomicAddress()->getType()->getPointerElementType();
1127  if (ValTy->isIntegerTy()) {
1128  assert(IntVal->getType() == ValTy && "Different integer types.");
1129  return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
1130  } else if (ValTy->isPointerTy())
1131  return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
1132  else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1133  return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
1134  }
1135 
1136  // Create a temporary. This needs to be big enough to hold the
1137  // atomic integer.
1138  llvm::Value *Temp;
1139  bool TempIsVolatile = false;
1140  CharUnits TempAlignment;
1141  if (AsValue && getEvaluationKind() == TEK_Aggregate) {
1142  assert(!ResultSlot.isIgnored());
1143  Temp = ResultSlot.getAddr();
1144  TempAlignment = getValueAlignment();
1145  TempIsVolatile = ResultSlot.isVolatile();
1146  } else {
1147  Temp = CreateTempAlloca();
1148  TempAlignment = getAtomicAlignment();
1149  }
1150 
1151  // Slam the integer into the temporary.
1152  llvm::Value *CastTemp = emitCastToAtomicIntPointer(Temp);
1153  CGF.Builder.CreateAlignedStore(IntVal, CastTemp, TempAlignment.getQuantity())
1154  ->setVolatile(TempIsVolatile);
1155 
1156  return convertTempToRValue(Temp, ResultSlot, Loc, AsValue);
1157 }
1158 
1159 void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1160  llvm::AtomicOrdering AO, bool) {
1161  // void __atomic_load(size_t size, void *mem, void *return, int order);
1162  CallArgList Args;
1163  Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1164  Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicAddress())),
1165  CGF.getContext().VoidPtrTy);
1166  Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
1167  CGF.getContext().VoidPtrTy);
1168  Args.add(RValue::get(
1169  llvm::ConstantInt::get(CGF.IntTy, translateAtomicOrdering(AO))),
1170  CGF.getContext().IntTy);
1171  emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
1172 }
1173 
1174 llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1175  bool IsVolatile) {
1176  // Okay, we're doing this natively.
1177  llvm::Value *Addr = emitCastToAtomicIntPointer(getAtomicAddress());
1178  llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1179  Load->setAtomic(AO);
1180 
1181  // Other decoration.
1182  Load->setAlignment(getAtomicAlignment().getQuantity());
1183  if (IsVolatile)
1184  Load->setVolatile(true);
1185  if (LVal.getTBAAInfo())
1186  CGF.CGM.DecorateInstruction(Load, LVal.getTBAAInfo());
1187  return Load;
1188 }
1189 
1190 /// An LValue is a candidate for having its loads and stores be made atomic if
1191 /// we are operating under /volatile:ms *and* the LValue itself is volatile and
1192 /// performing such an operation can be performed without a libcall.
1194  AtomicInfo AI(*this, LV);
1195  bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
1196  // An atomic is inline if we don't need to use a libcall.
1197  bool AtomicIsInline = !AI.shouldUseLibcall();
1198  return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline;
1199 }
1200 
1201 /// An type is a candidate for having its loads and stores be made atomic if
1202 /// we are operating under /volatile:ms *and* we know the access is volatile and
1203 /// performing such an operation can be performed without a libcall.
1205  bool IsVolatile) const {
1206  // An atomic is inline if we don't need to use a libcall (e.g. it is builtin).
1207  bool AtomicIsInline = getContext().getTargetInfo().hasBuiltinAtomic(
1208  getContext().getTypeSize(Ty), getContext().getTypeAlign(Ty));
1209  return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline;
1210 }
1211 
1213  AggValueSlot Slot) {
1214  llvm::AtomicOrdering AO;
1215  bool IsVolatile = LV.isVolatileQualified();
1216  if (LV.getType()->isAtomicType()) {
1217  AO = llvm::SequentiallyConsistent;
1218  } else {
1219  AO = llvm::Acquire;
1220  IsVolatile = true;
1221  }
1222  return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1223 }
1224 
1225 RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1226  bool AsValue, llvm::AtomicOrdering AO,
1227  bool IsVolatile) {
1228  // Check whether we should use a library call.
1229  if (shouldUseLibcall()) {
1230  llvm::Value *TempAddr;
1231  if (LVal.isSimple() && !ResultSlot.isIgnored()) {
1232  assert(getEvaluationKind() == TEK_Aggregate);
1233  TempAddr = ResultSlot.getAddr();
1234  } else
1235  TempAddr = CreateTempAlloca();
1236 
1237  EmitAtomicLoadLibcall(TempAddr, AO, IsVolatile);
1238 
1239  // Okay, turn that back into the original value or whole atomic (for
1240  // non-simple lvalues) type.
1241  return convertTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1242  }
1243 
1244  // Okay, we're doing this natively.
1245  auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1246 
1247  // If we're ignoring an aggregate return, don't do anything.
1248  if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
1249  return RValue::getAggregate(nullptr, false);
1250 
1251  // Okay, turn that back into the original value or atomic (for non-simple
1252  // lvalues) type.
1253  return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1254 }
1255 
1256 /// Emit a load from an l-value of atomic type. Note that the r-value
1257 /// we produce is an r-value of the atomic *value* type.
1259  llvm::AtomicOrdering AO, bool IsVolatile,
1260  AggValueSlot resultSlot) {
1261  AtomicInfo Atomics(*this, src);
1262  return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1263  IsVolatile);
1264 }
1265 
1266 /// Copy an r-value into memory as part of storing to an atomic type.
1267 /// This needs to create a bit-pattern suitable for atomic operations.
1268 void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1269  assert(LVal.isSimple());
1270  // If we have an r-value, the rvalue should be of the atomic type,
1271  // which means that the caller is responsible for having zeroed
1272  // any padding. Just do an aggregate copy of that type.
1273  if (rvalue.isAggregate()) {
1274  CGF.EmitAggregateCopy(getAtomicAddress(),
1275  rvalue.getAggregateAddr(),
1276  getAtomicType(),
1277  (rvalue.isVolatileQualified()
1278  || LVal.isVolatileQualified()),
1279  LVal.getAlignment());
1280  return;
1281  }
1282 
1283  // Okay, otherwise we're copying stuff.
1284 
1285  // Zero out the buffer if necessary.
1286  emitMemSetZeroIfNecessary();
1287 
1288  // Drill past the padding if present.
1289  LValue TempLVal = projectValue();
1290 
1291  // Okay, store the rvalue in.
1292  if (rvalue.isScalar()) {
1293  CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
1294  } else {
1295  CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
1296  }
1297 }
1298 
1299 
1300 /// Materialize an r-value into memory for the purposes of storing it
1301 /// to an atomic type.
1302 llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
1303  // Aggregate r-values are already in memory, and EmitAtomicStore
1304  // requires them to be values of the atomic type.
1305  if (rvalue.isAggregate())
1306  return rvalue.getAggregateAddr();
1307 
1308  // Otherwise, make a temporary and materialize into it.
1309  LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType(),
1310  getAtomicAlignment());
1311  AtomicInfo Atomics(CGF, TempLV);
1312  Atomics.emitCopyIntoMemory(rvalue);
1313  return TempLV.getAddress();
1314 }
1315 
1316 llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1317  // If we've got a scalar value of the right size, try to avoid going
1318  // through memory.
1319  if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
1320  llvm::Value *Value = RVal.getScalarVal();
1321  if (isa<llvm::IntegerType>(Value->getType()))
1322  return CGF.EmitToMemory(Value, ValueTy);
1323  else {
1324  llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1325  CGF.getLLVMContext(),
1326  LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1327  if (isa<llvm::PointerType>(Value->getType()))
1328  return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1329  else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1330  return CGF.Builder.CreateBitCast(Value, InputIntTy);
1331  }
1332  }
1333  // Otherwise, we need to go through memory.
1334  // Put the r-value in memory.
1335  llvm::Value *Addr = materializeRValue(RVal);
1336 
1337  // Cast the temporary to the atomic int type and pull a value out.
1338  Addr = emitCastToAtomicIntPointer(Addr);
1339  return CGF.Builder.CreateAlignedLoad(Addr,
1340  getAtomicAlignment().getQuantity());
1341 }
1342 
1343 std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1344  llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1345  llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
1346  // Do the atomic store.
1347  auto *Addr = emitCastToAtomicIntPointer(getAtomicAddress());
1348  auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr, ExpectedVal, DesiredVal,
1349  Success, Failure);
1350  // Other decoration.
1351  Inst->setVolatile(LVal.isVolatileQualified());
1352  Inst->setWeak(IsWeak);
1353 
1354  // Okay, turn that back into the original value type.
1355  auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1356  auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1357  return std::make_pair(PreviousVal, SuccessFailureVal);
1358 }
1359 
1360 llvm::Value *
1361 AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1362  llvm::Value *DesiredAddr,
1363  llvm::AtomicOrdering Success,
1364  llvm::AtomicOrdering Failure) {
1365  // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1366  // void *desired, int success, int failure);
1367  CallArgList Args;
1368  Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1369  Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicAddress())),
1370  CGF.getContext().VoidPtrTy);
1371  Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
1372  CGF.getContext().VoidPtrTy);
1373  Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
1374  CGF.getContext().VoidPtrTy);
1375  Args.add(RValue::get(llvm::ConstantInt::get(
1376  CGF.IntTy, translateAtomicOrdering(Success))),
1377  CGF.getContext().IntTy);
1378  Args.add(RValue::get(llvm::ConstantInt::get(
1379  CGF.IntTy, translateAtomicOrdering(Failure))),
1380  CGF.getContext().IntTy);
1381  auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
1382  CGF.getContext().BoolTy, Args);
1383 
1384  return SuccessFailureRVal.getScalarVal();
1385 }
1386 
1387 std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1388  RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
1389  llvm::AtomicOrdering Failure, bool IsWeak) {
1390  if (Failure >= Success)
1391  // Don't assert on undefined behavior.
1392  Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1393 
1394  // Check whether we should use a library call.
1395  if (shouldUseLibcall()) {
1396  // Produce a source address.
1397  auto *ExpectedAddr = materializeRValue(Expected);
1398  auto *DesiredAddr = materializeRValue(Desired);
1399  auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr,
1400  Success, Failure);
1401  return std::make_pair(
1402  convertTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1403  SourceLocation(), /*AsValue=*/false),
1404  Res);
1405  }
1406 
1407  // If we've got a scalar value of the right size, try to avoid going
1408  // through memory.
1409  auto *ExpectedVal = convertRValueToInt(Expected);
1410  auto *DesiredVal = convertRValueToInt(Desired);
1411  auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1412  Failure, IsWeak);
1413  return std::make_pair(
1414  ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
1415  SourceLocation(), /*AsValue=*/false),
1416  Res.second);
1417 }
1418 
1419 static void
1420 EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
1421  const llvm::function_ref<RValue(RValue)> &UpdateOp,
1422  llvm::Value *DesiredAddr) {
1423  llvm::Value *Ptr = nullptr;
1424  LValue UpdateLVal;
1425  RValue UpRVal;
1426  LValue AtomicLVal = Atomics.getAtomicLValue();
1427  LValue DesiredLVal;
1428  if (AtomicLVal.isSimple()) {
1429  UpRVal = OldRVal;
1430  DesiredLVal =
1431  LValue::MakeAddr(DesiredAddr, AtomicLVal.getType(),
1432  AtomicLVal.getAlignment(), CGF.CGM.getContext());
1433  } else {
1434  // Build new lvalue for temp address
1435  Ptr = Atomics.materializeRValue(OldRVal);
1436  if (AtomicLVal.isBitField()) {
1437  UpdateLVal =
1438  LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1439  AtomicLVal.getType(), AtomicLVal.getAlignment());
1440  DesiredLVal =
1441  LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1442  AtomicLVal.getType(), AtomicLVal.getAlignment());
1443  } else if (AtomicLVal.isVectorElt()) {
1444  UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1445  AtomicLVal.getType(),
1446  AtomicLVal.getAlignment());
1447  DesiredLVal = LValue::MakeVectorElt(
1448  DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1449  AtomicLVal.getAlignment());
1450  } else {
1451  assert(AtomicLVal.isExtVectorElt());
1452  UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1453  AtomicLVal.getType(),
1454  AtomicLVal.getAlignment());
1455  DesiredLVal = LValue::MakeExtVectorElt(
1456  DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1457  AtomicLVal.getAlignment());
1458  }
1459  UpdateLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
1460  DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
1461  UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
1462  }
1463  // Store new value in the corresponding memory area
1464  RValue NewRVal = UpdateOp(UpRVal);
1465  if (NewRVal.isScalar()) {
1466  CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
1467  } else {
1468  assert(NewRVal.isComplex());
1469  CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
1470  /*isInit=*/false);
1471  }
1472 }
1473 
1474 void AtomicInfo::EmitAtomicUpdateLibcall(
1475  llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1476  bool IsVolatile) {
1477  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1478 
1479  llvm::Value *ExpectedAddr = CreateTempAlloca();
1480 
1481  EmitAtomicLoadLibcall(ExpectedAddr, AO, IsVolatile);
1482  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1483  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1484  CGF.EmitBlock(ContBB);
1485  auto *DesiredAddr = CreateTempAlloca();
1486  if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1487  requiresMemSetZero(
1488  getAtomicAddress()->getType()->getPointerElementType())) {
1489  auto *OldVal = CGF.Builder.CreateAlignedLoad(
1490  ExpectedAddr, getAtomicAlignment().getQuantity());
1491  CGF.Builder.CreateAlignedStore(OldVal, DesiredAddr,
1492  getAtomicAlignment().getQuantity());
1493  }
1494  auto OldRVal = convertTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1495  SourceLocation(), /*AsValue=*/false);
1496  EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
1497  auto *Res =
1498  EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr, AO, Failure);
1499  CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1500  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1501 }
1502 
1503 void AtomicInfo::EmitAtomicUpdateOp(
1504  llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1505  bool IsVolatile) {
1506  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1507 
1508  // Do the atomic load.
1509  auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1510  // For non-simple lvalues perform compare-and-swap procedure.
1511  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1512  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1513  auto *CurBB = CGF.Builder.GetInsertBlock();
1514  CGF.EmitBlock(ContBB);
1515  llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1516  /*NumReservedValues=*/2);
1517  PHI->addIncoming(OldVal, CurBB);
1518  auto *NewAtomicAddr = CreateTempAlloca();
1519  auto *NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1520  if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1521  requiresMemSetZero(
1522  getAtomicAddress()->getType()->getPointerElementType())) {
1523  CGF.Builder.CreateAlignedStore(PHI, NewAtomicIntAddr,
1524  getAtomicAlignment().getQuantity());
1525  }
1526  auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
1527  SourceLocation(), /*AsValue=*/false);
1528  EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
1529  auto *DesiredVal = CGF.Builder.CreateAlignedLoad(
1530  NewAtomicIntAddr, getAtomicAlignment().getQuantity());
1531  // Try to write new value using cmpxchg operation
1532  auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1533  PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1534  CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1535  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1536 }
1537 
1538 static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
1539  RValue UpdateRVal, llvm::Value *DesiredAddr) {
1540  LValue AtomicLVal = Atomics.getAtomicLValue();
1541  LValue DesiredLVal;
1542  // Build new lvalue for temp address
1543  if (AtomicLVal.isBitField()) {
1544  DesiredLVal =
1545  LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1546  AtomicLVal.getType(), AtomicLVal.getAlignment());
1547  } else if (AtomicLVal.isVectorElt()) {
1548  DesiredLVal =
1549  LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1550  AtomicLVal.getType(), AtomicLVal.getAlignment());
1551  } else {
1552  assert(AtomicLVal.isExtVectorElt());
1553  DesiredLVal = LValue::MakeExtVectorElt(
1554  DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1555  AtomicLVal.getAlignment());
1556  }
1557  DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
1558  // Store new value in the corresponding memory area
1559  assert(UpdateRVal.isScalar());
1560  CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
1561 }
1562 
1563 void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1564  RValue UpdateRVal, bool IsVolatile) {
1565  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1566 
1567  llvm::Value *ExpectedAddr = CreateTempAlloca();
1568 
1569  EmitAtomicLoadLibcall(ExpectedAddr, AO, IsVolatile);
1570  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1571  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1572  CGF.EmitBlock(ContBB);
1573  auto *DesiredAddr = CreateTempAlloca();
1574  if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1575  requiresMemSetZero(
1576  getAtomicAddress()->getType()->getPointerElementType())) {
1577  auto *OldVal = CGF.Builder.CreateAlignedLoad(
1578  ExpectedAddr, getAtomicAlignment().getQuantity());
1579  CGF.Builder.CreateAlignedStore(OldVal, DesiredAddr,
1580  getAtomicAlignment().getQuantity());
1581  }
1582  EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
1583  auto *Res =
1584  EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr, AO, Failure);
1585  CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1586  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1587 }
1588 
1589 void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1590  bool IsVolatile) {
1591  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1592 
1593  // Do the atomic load.
1594  auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1595  // For non-simple lvalues perform compare-and-swap procedure.
1596  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1597  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1598  auto *CurBB = CGF.Builder.GetInsertBlock();
1599  CGF.EmitBlock(ContBB);
1600  llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1601  /*NumReservedValues=*/2);
1602  PHI->addIncoming(OldVal, CurBB);
1603  auto *NewAtomicAddr = CreateTempAlloca();
1604  auto *NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1605  if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1606  requiresMemSetZero(
1607  getAtomicAddress()->getType()->getPointerElementType())) {
1608  CGF.Builder.CreateAlignedStore(PHI, NewAtomicIntAddr,
1609  getAtomicAlignment().getQuantity());
1610  }
1611  EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
1612  auto *DesiredVal = CGF.Builder.CreateAlignedLoad(
1613  NewAtomicIntAddr, getAtomicAlignment().getQuantity());
1614  // Try to write new value using cmpxchg operation
1615  auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1616  PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1617  CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1618  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1619 }
1620 
1621 void AtomicInfo::EmitAtomicUpdate(
1622  llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1623  bool IsVolatile) {
1624  if (shouldUseLibcall()) {
1625  EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1626  } else {
1627  EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1628  }
1629 }
1630 
1631 void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
1632  bool IsVolatile) {
1633  if (shouldUseLibcall()) {
1634  EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1635  } else {
1636  EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1637  }
1638 }
1639 
1641  bool isInit) {
1642  bool IsVolatile = lvalue.isVolatileQualified();
1643  llvm::AtomicOrdering AO;
1644  if (lvalue.getType()->isAtomicType()) {
1645  AO = llvm::SequentiallyConsistent;
1646  } else {
1647  AO = llvm::Release;
1648  IsVolatile = true;
1649  }
1650  return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1651 }
1652 
1653 /// Emit a store to an l-value of atomic type.
1654 ///
1655 /// Note that the r-value is expected to be an r-value *of the atomic
1656 /// type*; this means that for aggregate r-values, it should include
1657 /// storage for any padding that was necessary.
1659  llvm::AtomicOrdering AO, bool IsVolatile,
1660  bool isInit) {
1661  // If this is an aggregate r-value, it should agree in type except
1662  // maybe for address-space qualification.
1663  assert(!rvalue.isAggregate() ||
1664  rvalue.getAggregateAddr()->getType()->getPointerElementType()
1665  == dest.getAddress()->getType()->getPointerElementType());
1666 
1667  AtomicInfo atomics(*this, dest);
1668  LValue LVal = atomics.getAtomicLValue();
1669 
1670  // If this is an initialization, just put the value there normally.
1671  if (LVal.isSimple()) {
1672  if (isInit) {
1673  atomics.emitCopyIntoMemory(rvalue);
1674  return;
1675  }
1676 
1677  // Check whether we should use a library call.
1678  if (atomics.shouldUseLibcall()) {
1679  // Produce a source address.
1680  llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
1681 
1682  // void __atomic_store(size_t size, void *mem, void *val, int order)
1683  CallArgList args;
1684  args.add(RValue::get(atomics.getAtomicSizeValue()),
1685  getContext().getSizeType());
1686  args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicAddress())),
1687  getContext().VoidPtrTy);
1689  args.add(RValue::get(llvm::ConstantInt::get(
1690  IntTy, AtomicInfo::translateAtomicOrdering(AO))),
1691  getContext().IntTy);
1692  emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1693  return;
1694  }
1695 
1696  // Okay, we're doing this natively.
1697  llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
1698 
1699  // Do the atomic store.
1700  llvm::Value *addr =
1701  atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
1702  intValue = Builder.CreateIntCast(
1703  intValue, addr->getType()->getPointerElementType(), /*isSigned=*/false);
1704  llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1705 
1706  // Initializations don't need to be atomic.
1707  if (!isInit)
1708  store->setAtomic(AO);
1709 
1710  // Other decoration.
1711  store->setAlignment(dest.getAlignment().getQuantity());
1712  if (IsVolatile)
1713  store->setVolatile(true);
1714  if (dest.getTBAAInfo())
1715  CGM.DecorateInstruction(store, dest.getTBAAInfo());
1716  return;
1717  }
1718 
1719  // Emit simple atomic update operation.
1720  atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
1721 }
1722 
1723 /// Emit a compare-and-exchange op for atomic type.
1724 ///
1725 std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
1726  LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
1727  llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
1728  AggValueSlot Slot) {
1729  // If this is an aggregate r-value, it should agree in type except
1730  // maybe for address-space qualification.
1731  assert(!Expected.isAggregate() ||
1732  Expected.getAggregateAddr()->getType()->getPointerElementType() ==
1733  Obj.getAddress()->getType()->getPointerElementType());
1734  assert(!Desired.isAggregate() ||
1735  Desired.getAggregateAddr()->getType()->getPointerElementType() ==
1736  Obj.getAddress()->getType()->getPointerElementType());
1737  AtomicInfo Atomics(*this, Obj);
1738 
1739  return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
1740  IsWeak);
1741 }
1742 
1744  LValue LVal, llvm::AtomicOrdering AO,
1745  const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
1746  AtomicInfo Atomics(*this, LVal);
1747  Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
1748 }
1749 
1751  AtomicInfo atomics(*this, dest);
1752 
1753  switch (atomics.getEvaluationKind()) {
1754  case TEK_Scalar: {
1755  llvm::Value *value = EmitScalarExpr(init);
1756  atomics.emitCopyIntoMemory(RValue::get(value));
1757  return;
1758  }
1759 
1760  case TEK_Complex: {
1761  ComplexPairTy value = EmitComplexExpr(init);
1762  atomics.emitCopyIntoMemory(RValue::getComplex(value));
1763  return;
1764  }
1765 
1766  case TEK_Aggregate: {
1767  // Fix up the destination if the initializer isn't an expression
1768  // of atomic type.
1769  bool Zeroed = false;
1770  if (!init->getType()->isAtomicType()) {
1771  Zeroed = atomics.emitMemSetZeroIfNecessary();
1772  dest = atomics.projectValue();
1773  }
1774 
1775  // Evaluate the expression directly into the destination.
1780  Zeroed ? AggValueSlot::IsZeroed :
1782 
1783  EmitAggExpr(init, slot);
1784  return;
1785  }
1786  }
1787  llvm_unreachable("bad evaluation kind");
1788 }
Defines the clang::ASTContext interface.
llvm::IntegerType * IntTy
int
CanQualType VoidPtrTy
Definition: ASTContext.h:831
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest, llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2, llvm::Value *IsWeak, llvm::Value *FailureOrder, uint64_t Size, unsigned Align, llvm::AtomicOrdering Order)
Definition: CGAtomic.cpp:493
llvm::Type * ConvertTypeForMem(QualType T)
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, IsZeroed_t isZeroed=IsNotZeroed)
Definition: CGValue.h:441
void setAlignment(CharUnits A)
Definition: CGValue.h:262
const TargetInfo & getTarget() const
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
Definition: CGValue.h:61
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:163
void setTBAAInfo(llvm::MDNode *N)
Definition: CGValue.h:254
const llvm::DataLayout & getDataLayout() const
const void * Store
Definition: StoreRef.h:26
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
Definition: CGExpr.cpp:1469
bool typeIsSuitableForInlineAtomic(QualType Ty, bool IsVolatile) const
Definition: CGAtomic.cpp:1204
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
Expr * getVal1() const
Definition: Expr.h:4872
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
Definition: CGAtomic.cpp:1640
llvm::Value * getAddress() const
Definition: CGValue.h:265
bool isVolatile() const
Definition: CGValue.h:459
bool isVolatile() const
Definition: Expr.h:4898
void DecorateInstruction(llvm::Instruction *Inst, llvm::MDNode *TBAAInfo, bool ConvertTypeToTag=true)
bool isVoidType() const
Definition: Type.h:5426
bool isVolatileQualified() const
Definition: CGValue.h:199
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:89
QualType getExtVectorType(QualType VectorType, unsigned NumElts) const
Return the unique reference to an extended vector type of the specified element type and size...
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)
Definition: CGAtomic.cpp:1743
Expr * getOrder() const
Definition: Expr.h:4869
RValue EmitLoadOfExtVectorElementLValue(LValue V)
Definition: CGExpr.cpp:1391
LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment=CharUnits())
Expr * getVal2() const
Definition: Expr.h:4882
RValue EmitCall(const CGFunctionInfo &FnInfo, llvm::Value *Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, const Decl *TargetDecl=nullptr, llvm::Instruction **callOrInvoke=nullptr)
Definition: CGCall.cpp:3106
CharUnits getAlignment() const
Definition: CGValue.h:261
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:518
uint32_t Offset
Definition: CacheTokens.cpp:43
QualType getIntPtrType() const
Return a type compatible with "intptr_t" (C99 7.18.1.4), as defined by the target.
llvm::Value * getAggregateAddr() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:66
virtual bool hasBuiltinAtomic(uint64_t AtomicSizeInBits, uint64_t AlignmentInBits) const
Returns true if the given target supports lock-free atomic operations at the specified width and alig...
unsigned Align
Definition: ASTContext.h:80
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitAtomicInit(Expr *E, LValue lvalue)
Definition: CGAtomic.cpp:1750
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
bool isExtVectorElt() const
Definition: CGValue.h:196
static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
Definition: CGAtomic.cpp:323
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
RValue EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest=nullptr)
Definition: CGAtomic.cpp:673
bool isCmpXChg() const
Definition: Expr.h:4902
QualType getPointeeType() const
Definition: Type.cpp:414
static unsigned getNumSubExprs(AtomicOp Op)
Determine the number of arguments the specified atomic builtin should have.
Definition: Expr.cpp:4331
static TypeEvaluationKind getEvaluationKind(QualType T)
llvm::Value * getBitFieldAddr() const
Definition: CGValue.h:283
bool isAggregate() const
Definition: CGValue.h:49
bool isAtomicType() const
Definition: Type.h:5314
RValue asRValue() const
Definition: CGValue.h:492
ASTContext & getContext() const
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
Definition: CGExpr.cpp:1219
bool isVectorElt() const
Definition: CGValue.h:194
void add(RValue rvalue, QualType type, bool needscopy=false)
Definition: CGCall.h:81
llvm::LLVMContext & getLLVMContext()
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
llvm::Value * getExtVectorAddr() const
Definition: CGValue.h:276
llvm::Value * EmitCastToVoidPtr(llvm::Value *value)
Emit a cast to void* in the appropriate address space.
Definition: CGExpr.cpp:43
The result type of a method or function.
AtomicOp getOp() const
Definition: Expr.h:4893
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
Definition: CGAtomic.cpp:1212
bool isVolatile() const
Definition: CGValue.h:240
void EmitAnyExprToMem(const Expr *E, llvm::Value *Location, Qualifiers Quals, bool IsInitializer)
Definition: CGExpr.cpp:148
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
llvm::Constant * CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeSet ExtraAttrs=llvm::AttributeSet())
Create a new runtime function with the specified type and name.
bool isSimple() const
Definition: CGValue.h:193
ASTContext & getContext() const
Encodes a location in the source. The SourceManager can decode this to get at the full include stack...
bool LValueIsSuitableForInlineAtomic(LValue Src)
Definition: CGAtomic.cpp:1193
void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr, QualType EltTy, bool isVolatile=false, CharUnits Alignment=CharUnits::Zero(), bool isAssignment=false)
Definition: CGExprAgg.cpp:1425
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Definition: CGAtomic.cpp:1725
Expr * getPtr() const
Definition: Expr.h:4866
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
Definition: CGExpr.cpp:1233
const CGBitFieldInfo & getBitFieldInfo() const
Definition: CGValue.h:287
static LValue MakeVectorElt(llvm::Value *Vec, llvm::Value *Idx, QualType type, CharUnits Alignment)
Definition: CGValue.h:309
llvm::MDNode * getTBAAInfo() const
Definition: CGValue.h:253
An aggregate value slot.
Definition: CGValue.h:363
static RValue getAggregate(llvm::Value *V, bool Volatile=false)
Definition: CGValue.h:92
static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, llvm::Value *Dest, llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2, llvm::Value *FailureOrderVal, uint64_t Size, unsigned Align, llvm::AtomicOrdering SuccessOrder)
Definition: CGAtomic.cpp:417
CanQualType VoidTy
Definition: ASTContext.h:817
const CodeGenOptions & getCodeGenOpts() const
static LValue MakeAddr(llvm::Value *address, QualType type, CharUnits alignment, ASTContext &Context, llvm::MDNode *TBAAInfo=nullptr)
Definition: CGValue.h:295
llvm::AllocaInst * CreateMemTemp(QualType T, const Twine &Name="tmp")
Definition: CGExpr.cpp:80
bool isGlobalReg() const
Definition: CGValue.h:197
bool isBitField() const
Definition: CGValue.h:195
SourceLocation getExprLoc() const LLVM_READONLY
Definition: Expr.cpp:193
RValue convertTempToRValue(llvm::Value *addr, QualType type, SourceLocation Loc)
Definition: CGExpr.cpp:3471
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
QualType getType() const
Definition: Expr.h:125
static const Type * getElementType(const Expr *BaseExpr)
bool isScalar() const
Definition: CGValue.h:47
RValue EmitUnsupportedRValue(const Expr *E, const char *Name)
Definition: CGExpr.cpp:791
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition: CGValue.h:78
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:116
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
Definition: ASTMatchers.h:1639
static LValue MakeBitfield(llvm::Value *Addr, const CGBitFieldInfo &Info, QualType type, CharUnits Alignment)
Create a new object to represent a bit-field access.
Definition: CGValue.h:335
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:54
static AggValueSlot ignored()
Definition: CGValue.h:409
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
unsigned getMaxAtomicInlineWidth() const
Return the maximum width lock-free atomic operation which can be inlined given the supported features...
void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, bool Volatile, unsigned Alignment, QualType Ty, llvm::MDNode *TBAAInfo=nullptr, bool isInit=false, QualType TBAABaseTy=QualType(), uint64_t TBAAOffset=0)
Definition: CGExpr.cpp:1244
void EmitAggExpr(const Expr *E, AggValueSlot AS)
Definition: CGExprAgg.cpp:1403
llvm::Constant * getExtVectorElts() const
Definition: CGValue.h:277
static void AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args, bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy, SourceLocation Loc, CharUnits SizeInChars)
Definition: CGAtomic.cpp:650
const T * getAs() const
Definition: Type.h:5555
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
bool isComplex() const
Definition: CGValue.h:48
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
Definition: CGStmt.cpp:348
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::Value * getVectorIdx() const
Definition: CGValue.h:273
llvm::Value * getAddr() const
Definition: CGValue.h:475
Expr * getWeak() const
Definition: Expr.h:4888
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition: Type.h:5096
static llvm::Value * EmitValToTemp(CodeGenFunction &CGF, Expr *E)
Definition: CGAtomic.cpp:642
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
Definition: CGExpr.cpp:1316
QualType getType() const
Definition: CGValue.h:205
uint64_t Width
Definition: ASTContext.h:79
CanQualType IntTy
Definition: ASTContext.h:825
RValue EmitLoadOfBitfieldLValue(LValue LV)
Definition: CGExpr.cpp:1357
static RValue get(llvm::Value *V)
Definition: CGValue.h:71
llvm::Value * EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, unsigned Alignment, QualType Ty, SourceLocation Loc, llvm::MDNode *TBAAInfo=nullptr, QualType TBAABaseTy=QualType(), uint64_t TBAAOffset=0)
Definition: CGExpr.cpp:1120
bool isVolatileQualified() const
Definition: CGValue.h:51
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, const llvm::function_ref< RValue(RValue)> &UpdateOp, llvm::Value *DesiredAddr)
Definition: CGAtomic.cpp:1420
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Definition: CGCall.cpp:414
static RValue emitAtomicLibcall(CodeGenFunction &CGF, StringRef fnName, QualType resultType, CallArgList &args)
Definition: CGAtomic.cpp:310
#define true
Definition: stdbool.h:32
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
CanQualType BoolTy
Definition: ASTContext.h:818
static LValue MakeExtVectorElt(llvm::Value *Vec, llvm::Constant *Elts, QualType type, CharUnits Alignment)
Definition: CGValue.h:319
Expr * getOrderFail() const
Definition: Expr.h:4878
bool isIgnored() const
Definition: CGValue.h:479
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, llvm::Value *Dest, llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2, uint64_t Size, unsigned Align, llvm::AtomicOrdering SuccessOrder, llvm::AtomicOrdering FailureOrder)
Definition: CGAtomic.cpp:365
Structure with information about how a bitfield should be accessed.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:5043
bool isPointerType() const
Definition: Type.h:5232
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1253