21#include "llvm/ADT/DenseMap.h"
22#include "llvm/IR/DataLayout.h"
23#include "llvm/IR/Intrinsics.h"
35 CharUnits AtomicAlign;
42 AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
43 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45 assert(!lvalue.isGlobalReg());
47 if (lvalue.isSimple()) {
48 AtomicTy = lvalue.getType();
49 if (
auto *ATy = AtomicTy->
getAs<AtomicType>())
50 ValueTy = ATy->getValueType();
57 TypeInfo ValueTI =
C.getTypeInfo(ValueTy);
58 ValueSizeInBits = ValueTI.
Width;
59 ValueAlignInBits = ValueTI.
Align;
61 TypeInfo AtomicTI =
C.getTypeInfo(AtomicTy);
62 AtomicSizeInBits = AtomicTI.
Width;
63 AtomicAlignInBits = AtomicTI.
Align;
65 assert(ValueSizeInBits <= AtomicSizeInBits);
66 assert(ValueAlignInBits <= AtomicAlignInBits);
68 AtomicAlign =
C.toCharUnitsFromBits(AtomicAlignInBits);
69 ValueAlign =
C.toCharUnitsFromBits(ValueAlignInBits);
70 if (lvalue.getAlignment().isZero())
71 lvalue.setAlignment(AtomicAlign);
74 }
else if (lvalue.isBitField()) {
75 ValueTy = lvalue.getType();
76 ValueSizeInBits =
C.getTypeSize(ValueTy);
77 auto &OrigBFI = lvalue.getBitFieldInfo();
78 auto Offset = OrigBFI.Offset %
C.toBits(lvalue.getAlignment());
79 AtomicSizeInBits =
C.toBits(
80 C.toCharUnitsFromBits(Offset + OrigBFI.Size +
C.getCharWidth() - 1)
81 .alignTo(lvalue.getAlignment()));
82 llvm::Value *BitFieldPtr = lvalue.getRawBitFieldPointer(CGF);
84 (
C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
85 lvalue.getAlignment();
86 llvm::Value *StoragePtr = CGF.
Builder.CreateConstGEP1_64(
87 CGF.
Int8Ty, BitFieldPtr, OffsetInChars.getQuantity());
89 StoragePtr, CGF.
UnqualPtrTy,
"atomic_bitfield_base");
94 llvm::Type *StorageTy = CGF.
Builder.getIntNTy(AtomicSizeInBits);
95 LVal = LValue::MakeBitfield(
96 Address(StoragePtr, StorageTy, lvalue.getAlignment()), BFI,
97 lvalue.getType(), lvalue.getBaseInfo(), lvalue.getTBAAInfo());
98 AtomicTy =
C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
102 C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
103 AtomicTy =
C.getConstantArrayType(
C.CharTy, Size,
nullptr,
104 ArraySizeModifier::Normal,
107 AtomicAlign = ValueAlign = lvalue.getAlignment();
108 }
else if (lvalue.isVectorElt()) {
109 ValueTy = lvalue.getType()->
castAs<VectorType>()->getElementType();
110 ValueSizeInBits =
C.getTypeSize(ValueTy);
111 AtomicTy = lvalue.getType();
112 AtomicSizeInBits =
C.getTypeSize(AtomicTy);
113 AtomicAlign = ValueAlign = lvalue.getAlignment();
116 assert(lvalue.isExtVectorElt());
117 ValueTy = lvalue.getType();
118 ValueSizeInBits =
C.getTypeSize(ValueTy);
121 lvalue.getExtVectorAddress().getElementType())
123 AtomicSizeInBits =
C.getTypeSize(AtomicTy);
124 AtomicAlign = ValueAlign = lvalue.getAlignment();
127 UseLibcall = !
C.getTargetInfo().hasBuiltinAtomic(
128 AtomicSizeInBits,
C.toBits(lvalue.getAlignment()));
131 QualType getAtomicType()
const {
return AtomicTy; }
132 QualType getValueType()
const {
return ValueTy; }
133 CharUnits getAtomicAlignment()
const {
return AtomicAlign; }
134 uint64_t getAtomicSizeInBits()
const {
return AtomicSizeInBits; }
135 uint64_t getValueSizeInBits()
const {
return ValueSizeInBits; }
137 bool shouldUseLibcall()
const {
return UseLibcall; }
138 const LValue &getAtomicLValue()
const {
return LVal; }
139 llvm::Value *getAtomicPointer()
const {
141 return LVal.emitRawPointer(CGF);
142 else if (LVal.isBitField())
143 return LVal.getRawBitFieldPointer(CGF);
144 else if (LVal.isVectorElt())
145 return LVal.getRawVectorPointer(CGF);
146 assert(LVal.isExtVectorElt());
147 return LVal.getRawExtVectorPointer(CGF);
149 Address getAtomicAddress()
const {
152 ElTy = LVal.getAddress().getElementType();
153 else if (LVal.isBitField())
154 ElTy = LVal.getBitFieldAddress().getElementType();
155 else if (LVal.isVectorElt())
156 ElTy = LVal.getVectorAddress().getElementType();
158 ElTy = LVal.getExtVectorAddress().getElementType();
159 return Address(getAtomicPointer(), ElTy, getAtomicAlignment());
162 Address getAtomicAddressAsAtomicIntPointer()
const {
163 return castToAtomicIntPointer(getAtomicAddress());
172 bool hasPadding()
const {
173 return (ValueSizeInBits != AtomicSizeInBits);
176 bool emitMemSetZeroIfNecessary()
const;
178 llvm::Value *getAtomicSizeValue()
const {
185 Address castToAtomicIntPointer(Address
Addr)
const;
190 Address convertToAtomicIntPointer(Address
Addr)
const;
193 RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
194 SourceLocation loc,
bool AsValue)
const;
196 llvm::Value *getScalarRValValueOrNull(RValue RVal)
const;
199 llvm::Value *convertRValueToInt(RValue RVal,
bool CmpXchg =
false)
const;
201 RValue ConvertToValueOrAtomic(llvm::Value *IntVal, AggValueSlot ResultSlot,
202 SourceLocation Loc,
bool AsValue,
203 bool CmpXchg =
false)
const;
206 void emitCopyIntoMemory(RValue rvalue)
const;
209 LValue projectValue()
const {
210 assert(LVal.isSimple());
211 Address addr = getAtomicAddress();
215 return LValue::MakeAddr(addr, getValueType(), CGF.
getContext(),
216 LVal.getBaseInfo(), LVal.getTBAAInfo());
221 RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
222 bool AsValue, llvm::AtomicOrdering AO,
233 std::pair<RValue, llvm::Value *>
234 EmitAtomicCompareExchange(RValue Expected, RValue Desired,
236 llvm::AtomicOrdering::SequentiallyConsistent,
237 llvm::AtomicOrdering Failure =
238 llvm::AtomicOrdering::SequentiallyConsistent,
239 bool IsWeak =
false);
244 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
245 const llvm::function_ref<RValue(RValue)> &UpdateOp,
249 void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
253 Address materializeRValue(RValue rvalue)
const;
256 Address CreateTempAlloca()
const;
258 bool requiresMemSetZero(llvm::Type *
type)
const;
262 void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
263 llvm::AtomicOrdering AO,
bool IsVolatile);
265 llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO,
bool IsVolatile,
266 bool CmpXchg =
false);
268 llvm::Value *EmitAtomicCompareExchangeLibcall(
269 llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
271 llvm::AtomicOrdering::SequentiallyConsistent,
272 llvm::AtomicOrdering Failure =
273 llvm::AtomicOrdering::SequentiallyConsistent);
275 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
276 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
278 llvm::AtomicOrdering::SequentiallyConsistent,
279 llvm::AtomicOrdering Failure =
280 llvm::AtomicOrdering::SequentiallyConsistent,
281 bool IsWeak =
false);
284 EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
285 const llvm::function_ref<RValue(RValue)> &UpdateOp,
288 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
289 const llvm::function_ref<RValue(RValue)> &UpdateOp,
292 void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
295 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
300Address AtomicInfo::CreateTempAlloca()
const {
302 (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
304 getAtomicAlignment(),
307 if (LVal.isBitField())
309 TempAlloca, getAtomicAddress().
getType(),
310 getAtomicAddress().getElementType());
322 fnAttrB.addAttribute(llvm::Attribute::NoUnwind);
323 fnAttrB.addAttribute(llvm::Attribute::WillReturn);
324 llvm::AttributeList fnAttrs = llvm::AttributeList::get(
325 CGF.
getLLVMContext(), llvm::AttributeList::FunctionIndex, fnAttrB);
327 llvm::FunctionCallee fn =
335 uint64_t expectedSize) {
342bool AtomicInfo::requiresMemSetZero(llvm::Type *
type)
const {
344 if (hasPadding())
return true;
347 switch (getEvaluationKind()) {
354 AtomicSizeInBits / 2);
360 llvm_unreachable(
"bad evaluation kind");
363bool AtomicInfo::emitMemSetZeroIfNecessary()
const {
364 assert(LVal.isSimple());
365 Address addr = LVal.getAddress();
372 LVal.getAlignment().getAsAlign());
380 llvm::AtomicOrdering SuccessOrder,
381 llvm::AtomicOrdering FailureOrder,
382 llvm::SyncScope::ID
Scope) {
390 Pair->setWeak(IsWeak);
395 llvm::Value *Old = CGF.
Builder.CreateExtractValue(Pair, 0);
396 llvm::Value *Cmp = CGF.
Builder.CreateExtractValue(Pair, 1);
400 llvm::BasicBlock *StoreExpectedBB =
405 llvm::BasicBlock *ContinueBB =
410 CGF.
Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
412 CGF.
Builder.SetInsertPoint(StoreExpectedBB);
418 CGF.
Builder.CreateBr(ContinueBB);
420 CGF.
Builder.SetInsertPoint(ContinueBB);
431 llvm::Value *FailureOrderVal,
433 llvm::AtomicOrdering SuccessOrder,
434 llvm::SyncScope::ID
Scope) {
435 llvm::AtomicOrdering FailureOrder;
436 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
437 auto FOS = FO->getSExtValue();
438 if (!llvm::isValidAtomicOrderingCABI(FOS))
439 FailureOrder = llvm::AtomicOrdering::Monotonic;
441 switch ((llvm::AtomicOrderingCABI)FOS) {
442 case llvm::AtomicOrderingCABI::relaxed:
445 case llvm::AtomicOrderingCABI::release:
446 case llvm::AtomicOrderingCABI::acq_rel:
447 FailureOrder = llvm::AtomicOrdering::Monotonic;
449 case llvm::AtomicOrderingCABI::consume:
450 case llvm::AtomicOrderingCABI::acquire:
451 FailureOrder = llvm::AtomicOrdering::Acquire;
453 case llvm::AtomicOrderingCABI::seq_cst:
454 FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
462 FailureOrder,
Scope);
475 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
477 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::consume),
479 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acquire),
481 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::seq_cst),
485 CGF.
Builder.SetInsertPoint(MonotonicBB);
487 Size, SuccessOrder, llvm::AtomicOrdering::Monotonic,
Scope);
490 CGF.
Builder.SetInsertPoint(AcquireBB);
492 llvm::AtomicOrdering::Acquire,
Scope);
495 CGF.
Builder.SetInsertPoint(SeqCstBB);
497 llvm::AtomicOrdering::SequentiallyConsistent,
Scope);
500 CGF.
Builder.SetInsertPoint(ContBB);
510 llvm::CmpInst::Predicate Pred;
513 llvm_unreachable(
"Unexpected min/max operation");
514 case AtomicExpr::AO__atomic_max_fetch:
515 case AtomicExpr::AO__scoped_atomic_max_fetch:
516 Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
518 case AtomicExpr::AO__atomic_min_fetch:
519 case AtomicExpr::AO__scoped_atomic_min_fetch:
520 Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;
523 llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS,
"tst");
524 return Builder.CreateSelect(Cmp, OldVal, RHS,
"newval");
529 llvm::Value *IsWeak, llvm::Value *FailureOrder,
530 uint64_t Size, llvm::AtomicOrdering Order,
531 llvm::SyncScope::ID
Scope) {
532 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
533 bool PostOpMinMax =
false;
536 switch (E->
getOp()) {
537 case AtomicExpr::AO__c11_atomic_init:
538 case AtomicExpr::AO__opencl_atomic_init:
539 llvm_unreachable(
"Already handled!");
541 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
542 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
543 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
545 FailureOrder, Size, Order,
Scope);
547 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
548 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
549 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
551 FailureOrder, Size, Order,
Scope);
553 case AtomicExpr::AO__atomic_compare_exchange:
554 case AtomicExpr::AO__atomic_compare_exchange_n:
555 case AtomicExpr::AO__scoped_atomic_compare_exchange:
556 case AtomicExpr::AO__scoped_atomic_compare_exchange_n: {
557 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
559 Val1, Val2, FailureOrder, Size, Order,
Scope);
562 llvm::BasicBlock *StrongBB =
565 llvm::BasicBlock *ContBB =
568 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(IsWeak, WeakBB);
569 SI->addCase(CGF.
Builder.getInt1(
false), StrongBB);
571 CGF.
Builder.SetInsertPoint(StrongBB);
573 FailureOrder, Size, Order,
Scope);
576 CGF.
Builder.SetInsertPoint(WeakBB);
578 FailureOrder, Size, Order,
Scope);
581 CGF.
Builder.SetInsertPoint(ContBB);
585 case AtomicExpr::AO__c11_atomic_load:
586 case AtomicExpr::AO__opencl_atomic_load:
587 case AtomicExpr::AO__hip_atomic_load:
588 case AtomicExpr::AO__atomic_load_n:
589 case AtomicExpr::AO__atomic_load:
590 case AtomicExpr::AO__scoped_atomic_load_n:
591 case AtomicExpr::AO__scoped_atomic_load: {
593 Load->setAtomic(Order,
Scope);
601 case AtomicExpr::AO__c11_atomic_store:
602 case AtomicExpr::AO__opencl_atomic_store:
603 case AtomicExpr::AO__hip_atomic_store:
604 case AtomicExpr::AO__atomic_store:
605 case AtomicExpr::AO__atomic_store_n:
606 case AtomicExpr::AO__scoped_atomic_store:
607 case AtomicExpr::AO__scoped_atomic_store_n: {
610 Store->setAtomic(Order,
Scope);
616 case AtomicExpr::AO__c11_atomic_exchange:
617 case AtomicExpr::AO__hip_atomic_exchange:
618 case AtomicExpr::AO__opencl_atomic_exchange:
619 case AtomicExpr::AO__atomic_exchange_n:
620 case AtomicExpr::AO__atomic_exchange:
621 case AtomicExpr::AO__scoped_atomic_exchange_n:
622 case AtomicExpr::AO__scoped_atomic_exchange:
623 Op = llvm::AtomicRMWInst::Xchg;
626 case AtomicExpr::AO__atomic_add_fetch:
627 case AtomicExpr::AO__scoped_atomic_add_fetch:
629 : llvm::Instruction::Add;
631 case AtomicExpr::AO__c11_atomic_fetch_add:
632 case AtomicExpr::AO__hip_atomic_fetch_add:
633 case AtomicExpr::AO__opencl_atomic_fetch_add:
634 case AtomicExpr::AO__atomic_fetch_add:
635 case AtomicExpr::AO__scoped_atomic_fetch_add:
637 : llvm::AtomicRMWInst::Add;
640 case AtomicExpr::AO__atomic_sub_fetch:
641 case AtomicExpr::AO__scoped_atomic_sub_fetch:
643 : llvm::Instruction::Sub;
645 case AtomicExpr::AO__c11_atomic_fetch_sub:
646 case AtomicExpr::AO__hip_atomic_fetch_sub:
647 case AtomicExpr::AO__opencl_atomic_fetch_sub:
648 case AtomicExpr::AO__atomic_fetch_sub:
649 case AtomicExpr::AO__scoped_atomic_fetch_sub:
651 : llvm::AtomicRMWInst::Sub;
654 case AtomicExpr::AO__atomic_min_fetch:
655 case AtomicExpr::AO__scoped_atomic_min_fetch:
658 case AtomicExpr::AO__c11_atomic_fetch_min:
659 case AtomicExpr::AO__hip_atomic_fetch_min:
660 case AtomicExpr::AO__opencl_atomic_fetch_min:
661 case AtomicExpr::AO__atomic_fetch_min:
662 case AtomicExpr::AO__scoped_atomic_fetch_min:
664 ? llvm::AtomicRMWInst::FMin
666 ? llvm::AtomicRMWInst::Min
667 : llvm::AtomicRMWInst::UMin);
670 case AtomicExpr::AO__atomic_max_fetch:
671 case AtomicExpr::AO__scoped_atomic_max_fetch:
674 case AtomicExpr::AO__c11_atomic_fetch_max:
675 case AtomicExpr::AO__hip_atomic_fetch_max:
676 case AtomicExpr::AO__opencl_atomic_fetch_max:
677 case AtomicExpr::AO__atomic_fetch_max:
678 case AtomicExpr::AO__scoped_atomic_fetch_max:
680 ? llvm::AtomicRMWInst::FMax
682 ? llvm::AtomicRMWInst::Max
683 : llvm::AtomicRMWInst::UMax);
686 case AtomicExpr::AO__atomic_and_fetch:
687 case AtomicExpr::AO__scoped_atomic_and_fetch:
688 PostOp = llvm::Instruction::And;
690 case AtomicExpr::AO__c11_atomic_fetch_and:
691 case AtomicExpr::AO__hip_atomic_fetch_and:
692 case AtomicExpr::AO__opencl_atomic_fetch_and:
693 case AtomicExpr::AO__atomic_fetch_and:
694 case AtomicExpr::AO__scoped_atomic_fetch_and:
695 Op = llvm::AtomicRMWInst::And;
698 case AtomicExpr::AO__atomic_or_fetch:
699 case AtomicExpr::AO__scoped_atomic_or_fetch:
700 PostOp = llvm::Instruction::Or;
702 case AtomicExpr::AO__c11_atomic_fetch_or:
703 case AtomicExpr::AO__hip_atomic_fetch_or:
704 case AtomicExpr::AO__opencl_atomic_fetch_or:
705 case AtomicExpr::AO__atomic_fetch_or:
706 case AtomicExpr::AO__scoped_atomic_fetch_or:
707 Op = llvm::AtomicRMWInst::Or;
710 case AtomicExpr::AO__atomic_xor_fetch:
711 case AtomicExpr::AO__scoped_atomic_xor_fetch:
712 PostOp = llvm::Instruction::Xor;
714 case AtomicExpr::AO__c11_atomic_fetch_xor:
715 case AtomicExpr::AO__hip_atomic_fetch_xor:
716 case AtomicExpr::AO__opencl_atomic_fetch_xor:
717 case AtomicExpr::AO__atomic_fetch_xor:
718 case AtomicExpr::AO__scoped_atomic_fetch_xor:
719 Op = llvm::AtomicRMWInst::Xor;
722 case AtomicExpr::AO__atomic_nand_fetch:
723 case AtomicExpr::AO__scoped_atomic_nand_fetch:
724 PostOp = llvm::Instruction::And;
726 case AtomicExpr::AO__c11_atomic_fetch_nand:
727 case AtomicExpr::AO__atomic_fetch_nand:
728 case AtomicExpr::AO__scoped_atomic_fetch_nand:
729 Op = llvm::AtomicRMWInst::Nand;
732 case AtomicExpr::AO__atomic_test_and_set: {
733 llvm::AtomicRMWInst *RMWI =
744 case AtomicExpr::AO__atomic_clear: {
745 llvm::StoreInst *Store =
747 Store->setAtomic(Order,
Scope);
755 llvm::AtomicRMWInst *RMWI =
761 llvm::Value *Result = RMWI;
767 Result = CGF.
Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
769 if (E->
getOp() == AtomicExpr::AO__atomic_nand_fetch ||
770 E->
getOp() == AtomicExpr::AO__scoped_atomic_nand_fetch)
771 Result = CGF.
Builder.CreateNot(Result);
788 llvm::Value *IsWeak, llvm::Value *FailureOrder,
789 uint64_t Size, llvm::AtomicOrdering Order,
790 llvm::Value *
Scope) {
791 auto ScopeModel =
Expr->getScopeModel();
796 llvm::SyncScope::ID SS;
806 SS = llvm::SyncScope::System;
813 if (
auto SC = dyn_cast<llvm::ConstantInt>(
Scope)) {
824 auto Scopes = ScopeModel->getRuntimeValues();
825 llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
826 for (
auto S : Scopes)
829 llvm::BasicBlock *ContBB =
832 auto *SC = Builder.CreateIntCast(
Scope, Builder.getInt32Ty(),
false);
835 auto FallBack = ScopeModel->getFallBackValue();
836 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
837 for (
auto S : Scopes) {
840 SI->addCase(Builder.getInt32(S), B);
842 Builder.SetInsertPoint(B);
849 Builder.CreateBr(ContBB);
852 Builder.SetInsertPoint(ContBB);
861 MemTy = AT->getValueType();
862 llvm::Value *IsWeak =
nullptr, *OrderFail =
nullptr;
869 if (E->
getOp() == AtomicExpr::AO__c11_atomic_init ||
870 E->
getOp() == AtomicExpr::AO__opencl_atomic_init) {
876 auto TInfo =
getContext().getTypeInfoInChars(AtomicTy);
877 uint64_t Size = TInfo.Width.getQuantity();
878 unsigned MaxInlineWidthInBits =
getTarget().getMaxAtomicInlineWidth();
881 getContext().toCharUnitsFromBits(MaxInlineWidthInBits);
883 bool Misaligned = (Ptr.
getAlignment() % TInfo.Width) != 0;
884 bool Oversized =
getContext().toBits(TInfo.Width) > MaxInlineWidthInBits;
887 << (int)TInfo.Width.getQuantity()
892 << (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.
getQuantity();
898 bool ShouldCastToIntPtrTy =
true;
900 switch (E->
getOp()) {
901 case AtomicExpr::AO__c11_atomic_init:
902 case AtomicExpr::AO__opencl_atomic_init:
903 llvm_unreachable(
"Already handled above with EmitAtomicInit!");
905 case AtomicExpr::AO__atomic_load_n:
906 case AtomicExpr::AO__scoped_atomic_load_n:
907 case AtomicExpr::AO__c11_atomic_load:
908 case AtomicExpr::AO__opencl_atomic_load:
909 case AtomicExpr::AO__hip_atomic_load:
910 case AtomicExpr::AO__atomic_test_and_set:
911 case AtomicExpr::AO__atomic_clear:
914 case AtomicExpr::AO__atomic_load:
915 case AtomicExpr::AO__scoped_atomic_load:
919 case AtomicExpr::AO__atomic_store:
920 case AtomicExpr::AO__scoped_atomic_store:
924 case AtomicExpr::AO__atomic_exchange:
925 case AtomicExpr::AO__scoped_atomic_exchange:
930 case AtomicExpr::AO__atomic_compare_exchange:
931 case AtomicExpr::AO__atomic_compare_exchange_n:
932 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
933 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
934 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
935 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
936 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
937 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
938 case AtomicExpr::AO__scoped_atomic_compare_exchange:
939 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
941 if (E->
getOp() == AtomicExpr::AO__atomic_compare_exchange ||
942 E->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
947 if (E->
getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
948 E->
getOp() == AtomicExpr::AO__atomic_compare_exchange ||
949 E->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
950 E->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
954 case AtomicExpr::AO__c11_atomic_fetch_add:
955 case AtomicExpr::AO__c11_atomic_fetch_sub:
956 case AtomicExpr::AO__hip_atomic_fetch_add:
957 case AtomicExpr::AO__hip_atomic_fetch_sub:
958 case AtomicExpr::AO__opencl_atomic_fetch_add:
959 case AtomicExpr::AO__opencl_atomic_fetch_sub:
969 Val1Scalar =
Builder.CreateMul(Val1Scalar,
CGM.getSize(PointeeIncAmt));
976 case AtomicExpr::AO__atomic_fetch_add:
977 case AtomicExpr::AO__atomic_fetch_max:
978 case AtomicExpr::AO__atomic_fetch_min:
979 case AtomicExpr::AO__atomic_fetch_sub:
980 case AtomicExpr::AO__atomic_add_fetch:
981 case AtomicExpr::AO__atomic_max_fetch:
982 case AtomicExpr::AO__atomic_min_fetch:
983 case AtomicExpr::AO__atomic_sub_fetch:
984 case AtomicExpr::AO__c11_atomic_fetch_max:
985 case AtomicExpr::AO__c11_atomic_fetch_min:
986 case AtomicExpr::AO__opencl_atomic_fetch_max:
987 case AtomicExpr::AO__opencl_atomic_fetch_min:
988 case AtomicExpr::AO__hip_atomic_fetch_max:
989 case AtomicExpr::AO__hip_atomic_fetch_min:
990 case AtomicExpr::AO__scoped_atomic_fetch_add:
991 case AtomicExpr::AO__scoped_atomic_fetch_max:
992 case AtomicExpr::AO__scoped_atomic_fetch_min:
993 case AtomicExpr::AO__scoped_atomic_fetch_sub:
994 case AtomicExpr::AO__scoped_atomic_add_fetch:
995 case AtomicExpr::AO__scoped_atomic_max_fetch:
996 case AtomicExpr::AO__scoped_atomic_min_fetch:
997 case AtomicExpr::AO__scoped_atomic_sub_fetch:
1001 case AtomicExpr::AO__atomic_fetch_and:
1002 case AtomicExpr::AO__atomic_fetch_nand:
1003 case AtomicExpr::AO__atomic_fetch_or:
1004 case AtomicExpr::AO__atomic_fetch_xor:
1005 case AtomicExpr::AO__atomic_and_fetch:
1006 case AtomicExpr::AO__atomic_nand_fetch:
1007 case AtomicExpr::AO__atomic_or_fetch:
1008 case AtomicExpr::AO__atomic_xor_fetch:
1009 case AtomicExpr::AO__atomic_store_n:
1010 case AtomicExpr::AO__atomic_exchange_n:
1011 case AtomicExpr::AO__c11_atomic_fetch_and:
1012 case AtomicExpr::AO__c11_atomic_fetch_nand:
1013 case AtomicExpr::AO__c11_atomic_fetch_or:
1014 case AtomicExpr::AO__c11_atomic_fetch_xor:
1015 case AtomicExpr::AO__c11_atomic_store:
1016 case AtomicExpr::AO__c11_atomic_exchange:
1017 case AtomicExpr::AO__hip_atomic_fetch_and:
1018 case AtomicExpr::AO__hip_atomic_fetch_or:
1019 case AtomicExpr::AO__hip_atomic_fetch_xor:
1020 case AtomicExpr::AO__hip_atomic_store:
1021 case AtomicExpr::AO__hip_atomic_exchange:
1022 case AtomicExpr::AO__opencl_atomic_fetch_and:
1023 case AtomicExpr::AO__opencl_atomic_fetch_or:
1024 case AtomicExpr::AO__opencl_atomic_fetch_xor:
1025 case AtomicExpr::AO__opencl_atomic_store:
1026 case AtomicExpr::AO__opencl_atomic_exchange:
1027 case AtomicExpr::AO__scoped_atomic_fetch_and:
1028 case AtomicExpr::AO__scoped_atomic_fetch_nand:
1029 case AtomicExpr::AO__scoped_atomic_fetch_or:
1030 case AtomicExpr::AO__scoped_atomic_fetch_xor:
1031 case AtomicExpr::AO__scoped_atomic_and_fetch:
1032 case AtomicExpr::AO__scoped_atomic_nand_fetch:
1033 case AtomicExpr::AO__scoped_atomic_or_fetch:
1034 case AtomicExpr::AO__scoped_atomic_xor_fetch:
1035 case AtomicExpr::AO__scoped_atomic_store_n:
1036 case AtomicExpr::AO__scoped_atomic_exchange_n:
1047 AtomicInfo Atomics(*
this, AtomicVal);
1049 if (ShouldCastToIntPtrTy) {
1050 Ptr = Atomics.castToAtomicIntPointer(Ptr);
1052 Val1 = Atomics.convertToAtomicIntPointer(Val1);
1054 Val2 = Atomics.convertToAtomicIntPointer(Val2);
1057 if (ShouldCastToIntPtrTy)
1058 Dest = Atomics.castToAtomicIntPointer(Dest);
1062 Dest = Atomics.CreateTempAlloca();
1063 if (ShouldCastToIntPtrTy)
1064 Dest = Atomics.castToAtomicIntPointer(Dest);
1067 bool PowerOf2Size = (Size & (Size - 1)) == 0;
1068 bool UseLibcall = !PowerOf2Size || (Size > 16);
1088 auto CastToGenericAddrSpace = [&](llvm::Value *
V,
QualType PT) {
1095 auto *DestType = llvm::PointerType::get(
getLLVMContext(), DestAS);
1106 std::string LibCallName;
1108 bool HaveRetTy =
false;
1109 switch (E->
getOp()) {
1110 case AtomicExpr::AO__c11_atomic_init:
1111 case AtomicExpr::AO__opencl_atomic_init:
1112 llvm_unreachable(
"Already handled!");
1119 case AtomicExpr::AO__atomic_compare_exchange:
1120 case AtomicExpr::AO__atomic_compare_exchange_n:
1121 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1122 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1123 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
1124 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
1125 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1126 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1127 case AtomicExpr::AO__scoped_atomic_compare_exchange:
1128 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
1129 LibCallName =
"__atomic_compare_exchange";
1143 case AtomicExpr::AO__atomic_exchange:
1144 case AtomicExpr::AO__atomic_exchange_n:
1145 case AtomicExpr::AO__c11_atomic_exchange:
1146 case AtomicExpr::AO__hip_atomic_exchange:
1147 case AtomicExpr::AO__opencl_atomic_exchange:
1148 case AtomicExpr::AO__scoped_atomic_exchange:
1149 case AtomicExpr::AO__scoped_atomic_exchange_n:
1150 LibCallName =
"__atomic_exchange";
1156 case AtomicExpr::AO__atomic_store:
1157 case AtomicExpr::AO__atomic_store_n:
1158 case AtomicExpr::AO__c11_atomic_store:
1159 case AtomicExpr::AO__hip_atomic_store:
1160 case AtomicExpr::AO__opencl_atomic_store:
1161 case AtomicExpr::AO__scoped_atomic_store:
1162 case AtomicExpr::AO__scoped_atomic_store_n:
1163 LibCallName =
"__atomic_store";
1171 case AtomicExpr::AO__atomic_load:
1172 case AtomicExpr::AO__atomic_load_n:
1173 case AtomicExpr::AO__c11_atomic_load:
1174 case AtomicExpr::AO__hip_atomic_load:
1175 case AtomicExpr::AO__opencl_atomic_load:
1176 case AtomicExpr::AO__scoped_atomic_load:
1177 case AtomicExpr::AO__scoped_atomic_load_n:
1178 LibCallName =
"__atomic_load";
1180 case AtomicExpr::AO__atomic_add_fetch:
1181 case AtomicExpr::AO__scoped_atomic_add_fetch:
1182 case AtomicExpr::AO__atomic_fetch_add:
1183 case AtomicExpr::AO__c11_atomic_fetch_add:
1184 case AtomicExpr::AO__hip_atomic_fetch_add:
1185 case AtomicExpr::AO__opencl_atomic_fetch_add:
1186 case AtomicExpr::AO__scoped_atomic_fetch_add:
1187 case AtomicExpr::AO__atomic_and_fetch:
1188 case AtomicExpr::AO__scoped_atomic_and_fetch:
1189 case AtomicExpr::AO__atomic_fetch_and:
1190 case AtomicExpr::AO__c11_atomic_fetch_and:
1191 case AtomicExpr::AO__hip_atomic_fetch_and:
1192 case AtomicExpr::AO__opencl_atomic_fetch_and:
1193 case AtomicExpr::AO__scoped_atomic_fetch_and:
1194 case AtomicExpr::AO__atomic_or_fetch:
1195 case AtomicExpr::AO__scoped_atomic_or_fetch:
1196 case AtomicExpr::AO__atomic_fetch_or:
1197 case AtomicExpr::AO__c11_atomic_fetch_or:
1198 case AtomicExpr::AO__hip_atomic_fetch_or:
1199 case AtomicExpr::AO__opencl_atomic_fetch_or:
1200 case AtomicExpr::AO__scoped_atomic_fetch_or:
1201 case AtomicExpr::AO__atomic_sub_fetch:
1202 case AtomicExpr::AO__scoped_atomic_sub_fetch:
1203 case AtomicExpr::AO__atomic_fetch_sub:
1204 case AtomicExpr::AO__c11_atomic_fetch_sub:
1205 case AtomicExpr::AO__hip_atomic_fetch_sub:
1206 case AtomicExpr::AO__opencl_atomic_fetch_sub:
1207 case AtomicExpr::AO__scoped_atomic_fetch_sub:
1208 case AtomicExpr::AO__atomic_xor_fetch:
1209 case AtomicExpr::AO__scoped_atomic_xor_fetch:
1210 case AtomicExpr::AO__atomic_fetch_xor:
1211 case AtomicExpr::AO__c11_atomic_fetch_xor:
1212 case AtomicExpr::AO__hip_atomic_fetch_xor:
1213 case AtomicExpr::AO__opencl_atomic_fetch_xor:
1214 case AtomicExpr::AO__scoped_atomic_fetch_xor:
1215 case AtomicExpr::AO__atomic_nand_fetch:
1216 case AtomicExpr::AO__atomic_fetch_nand:
1217 case AtomicExpr::AO__c11_atomic_fetch_nand:
1218 case AtomicExpr::AO__scoped_atomic_fetch_nand:
1219 case AtomicExpr::AO__scoped_atomic_nand_fetch:
1220 case AtomicExpr::AO__atomic_min_fetch:
1221 case AtomicExpr::AO__atomic_fetch_min:
1222 case AtomicExpr::AO__c11_atomic_fetch_min:
1223 case AtomicExpr::AO__hip_atomic_fetch_min:
1224 case AtomicExpr::AO__opencl_atomic_fetch_min:
1225 case AtomicExpr::AO__scoped_atomic_fetch_min:
1226 case AtomicExpr::AO__scoped_atomic_min_fetch:
1227 case AtomicExpr::AO__atomic_max_fetch:
1228 case AtomicExpr::AO__atomic_fetch_max:
1229 case AtomicExpr::AO__c11_atomic_fetch_max:
1230 case AtomicExpr::AO__hip_atomic_fetch_max:
1231 case AtomicExpr::AO__opencl_atomic_fetch_max:
1232 case AtomicExpr::AO__scoped_atomic_fetch_max:
1233 case AtomicExpr::AO__scoped_atomic_max_fetch:
1234 case AtomicExpr::AO__atomic_test_and_set:
1235 case AtomicExpr::AO__atomic_clear:
1236 llvm_unreachable(
"Integral atomic operations always become atomicrmw!");
1241 std::string(
"__opencl") + StringRef(LibCallName).drop_front(1).str();
1269 bool IsStore = E->
getOp() == AtomicExpr::AO__c11_atomic_store ||
1270 E->
getOp() == AtomicExpr::AO__opencl_atomic_store ||
1271 E->
getOp() == AtomicExpr::AO__hip_atomic_store ||
1272 E->
getOp() == AtomicExpr::AO__atomic_store ||
1273 E->
getOp() == AtomicExpr::AO__atomic_store_n ||
1274 E->
getOp() == AtomicExpr::AO__scoped_atomic_store ||
1275 E->
getOp() == AtomicExpr::AO__scoped_atomic_store_n ||
1276 E->
getOp() == AtomicExpr::AO__atomic_clear;
1277 bool IsLoad = E->
getOp() == AtomicExpr::AO__c11_atomic_load ||
1278 E->
getOp() == AtomicExpr::AO__opencl_atomic_load ||
1279 E->
getOp() == AtomicExpr::AO__hip_atomic_load ||
1280 E->
getOp() == AtomicExpr::AO__atomic_load ||
1281 E->
getOp() == AtomicExpr::AO__atomic_load_n ||
1282 E->
getOp() == AtomicExpr::AO__scoped_atomic_load ||
1283 E->
getOp() == AtomicExpr::AO__scoped_atomic_load_n;
1289 if (llvm::isValidAtomicOrderingCABI(ord))
1290 switch ((llvm::AtomicOrderingCABI)ord) {
1291 case llvm::AtomicOrderingCABI::relaxed:
1292 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1293 llvm::AtomicOrdering::Monotonic,
Scope);
1295 case llvm::AtomicOrderingCABI::consume:
1296 case llvm::AtomicOrderingCABI::acquire:
1299 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1300 llvm::AtomicOrdering::Acquire,
Scope);
1302 case llvm::AtomicOrderingCABI::release:
1305 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1306 llvm::AtomicOrdering::Release,
Scope);
1308 case llvm::AtomicOrderingCABI::acq_rel:
1309 if (IsLoad || IsStore)
1311 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1312 llvm::AtomicOrdering::AcquireRelease,
Scope);
1314 case llvm::AtomicOrderingCABI::seq_cst:
1315 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1316 llvm::AtomicOrdering::SequentiallyConsistent,
Scope);
1329 llvm::BasicBlock *MonotonicBB =
nullptr, *AcquireBB =
nullptr,
1330 *ReleaseBB =
nullptr, *AcqRelBB =
nullptr,
1331 *SeqCstBB =
nullptr;
1337 if (!IsLoad && !IsStore)
1346 Order =
Builder.CreateIntCast(Order,
Builder.getInt32Ty(),
false);
1347 llvm::SwitchInst *SI =
Builder.CreateSwitch(Order, MonotonicBB);
1350 Builder.SetInsertPoint(MonotonicBB);
1351 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1352 llvm::AtomicOrdering::Monotonic,
Scope);
1355 Builder.SetInsertPoint(AcquireBB);
1356 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1357 llvm::AtomicOrdering::Acquire,
Scope);
1359 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::consume),
1361 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acquire),
1365 Builder.SetInsertPoint(ReleaseBB);
1366 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1367 llvm::AtomicOrdering::Release,
Scope);
1369 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::release),
1372 if (!IsLoad && !IsStore) {
1373 Builder.SetInsertPoint(AcqRelBB);
1374 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1375 llvm::AtomicOrdering::AcquireRelease,
Scope);
1377 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acq_rel),
1380 Builder.SetInsertPoint(SeqCstBB);
1381 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1382 llvm::AtomicOrdering::SequentiallyConsistent,
Scope);
1384 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::seq_cst),
1388 Builder.SetInsertPoint(ContBB);
1392 assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1398 llvm::IntegerType *ty =
1404 llvm::Type *Ty =
Addr.getElementType();
1406 if (SourceSizeInBits != AtomicSizeInBits) {
1407 Address Tmp = CreateTempAlloca();
1409 std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1413 return castToAtomicIntPointer(
Addr);
1416RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1417 AggValueSlot resultSlot,
1419 bool asValue)
const {
1420 if (LVal.isSimple()) {
1435 if (LVal.isBitField())
1437 LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1438 LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1439 if (LVal.isVectorElt())
1441 LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1442 LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1443 assert(LVal.isExtVectorElt());
1445 addr, LVal.getExtVectorElts(), LVal.getType(),
1446 LVal.getBaseInfo(), TBAAAccessInfo()));
1455 if (ValTy->isFloatingPointTy())
1456 return ValTy->isX86_FP80Ty() || CmpXchg;
1457 return !ValTy->isIntegerTy() && !ValTy->isPointerTy();
1460RValue AtomicInfo::ConvertToValueOrAtomic(llvm::Value *Val,
1461 AggValueSlot ResultSlot,
1462 SourceLocation Loc,
bool AsValue,
1463 bool CmpXchg)
const {
1465 assert((Val->getType()->isIntegerTy() || Val->getType()->isPointerTy() ||
1466 Val->getType()->isIEEELikeFPTy()) &&
1467 "Expected integer, pointer or floating point value when converting "
1470 (((!LVal.isBitField() ||
1471 LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1474 auto *ValTy = AsValue
1476 : getAtomicAddress().getElementType();
1478 assert((!ValTy->isIntegerTy() || Val->getType() == ValTy) &&
1479 "Different integer types.");
1482 if (llvm::CastInst::isBitCastable(Val->getType(), ValTy))
1489 bool TempIsVolatile =
false;
1495 Temp = CreateTempAlloca();
1499 Address CastTemp = castToAtomicIntPointer(Temp);
1502 return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1505void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1506 llvm::AtomicOrdering AO,
bool) {
1518llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1519 bool IsVolatile,
bool CmpXchg) {
1521 Address
Addr = getAtomicAddress();
1523 Addr = castToAtomicIntPointer(
Addr);
1525 Load->setAtomic(AO);
1529 Load->setVolatile(
true);
1538 if (!
CGM.getLangOpts().MSVolatile)
return false;
1539 AtomicInfo AI(*
this, LV);
1542 bool AtomicIsInline = !AI.shouldUseLibcall();
1547 return IsVolatile && AtomicIsInline;
1552 llvm::AtomicOrdering AO;
1555 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1557 AO = llvm::AtomicOrdering::Acquire;
1564 bool AsValue, llvm::AtomicOrdering AO,
1567 if (shouldUseLibcall()) {
1569 if (LVal.isSimple() && !ResultSlot.
isIgnored()) {
1573 TempAddr = CreateTempAlloca();
1575 EmitAtomicLoadLibcall(TempAddr.
emitRawPointer(CGF), AO, IsVolatile);
1579 return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1583 auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1591 return ConvertToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1597 llvm::AtomicOrdering AO,
bool IsVolatile,
1599 AtomicInfo Atomics(*
this, src);
1600 return Atomics.EmitAtomicLoad(resultSlot, loc,
true, AO,
1606void AtomicInfo::emitCopyIntoMemory(
RValue rvalue)
const {
1607 assert(LVal.isSimple());
1616 LVal.isVolatileQualified();
1625 emitMemSetZeroIfNecessary();
1628 LValue TempLVal = projectValue();
1641Address AtomicInfo::materializeRValue(RValue rvalue)
const {
1648 LValue TempLV = CGF.
MakeAddrLValue(CreateTempAlloca(), getAtomicType());
1649 AtomicInfo Atomics(CGF, TempLV);
1650 Atomics.emitCopyIntoMemory(rvalue);
1651 return TempLV.getAddress();
1654llvm::Value *AtomicInfo::getScalarRValValueOrNull(RValue RVal)
const {
1655 if (RVal.
isScalar() && (!hasPadding() || !LVal.isSimple()))
1660llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal,
bool CmpXchg)
const {
1663 if (llvm::Value *
Value = getScalarRValValueOrNull(RVal)) {
1667 llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1669 LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1670 if (llvm::BitCastInst::isBitCastable(
Value->getType(), InputIntTy))
1676 Address
Addr = materializeRValue(RVal);
1679 Addr = castToAtomicIntPointer(
Addr);
1683std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1684 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1685 llvm::AtomicOrdering
Success, llvm::AtomicOrdering Failure,
bool IsWeak) {
1687 Address
Addr = getAtomicAddressAsAtomicIntPointer();
1691 Inst->setVolatile(LVal.isVolatileQualified());
1692 Inst->setWeak(IsWeak);
1695 auto *PreviousVal = CGF.
Builder.CreateExtractValue(Inst, 0);
1696 auto *SuccessFailureVal = CGF.
Builder.CreateExtractValue(Inst, 1);
1697 return std::make_pair(PreviousVal, SuccessFailureVal);
1701AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1702 llvm::Value *DesiredAddr,
1704 llvm::AtomicOrdering Failure) {
1713 llvm::ConstantInt::get(CGF.
IntTy, (
int)llvm::toCABI(
Success))),
1716 llvm::ConstantInt::get(CGF.
IntTy, (
int)llvm::toCABI(Failure))),
1721 return SuccessFailureRVal.getScalarVal();
1724std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1725 RValue Expected, RValue Desired, llvm::AtomicOrdering
Success,
1726 llvm::AtomicOrdering Failure,
bool IsWeak) {
1728 if (shouldUseLibcall()) {
1730 Address ExpectedAddr = materializeRValue(Expected);
1732 llvm::Value *DesiredPtr = materializeRValue(Desired).emitRawPointer(CGF);
1733 auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr,
1735 return std::make_pair(
1737 SourceLocation(),
false),
1743 auto *ExpectedVal = convertRValueToInt(Expected,
true);
1744 auto *DesiredVal = convertRValueToInt(Desired,
true);
1745 auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal,
Success,
1747 return std::make_pair(
1749 SourceLocation(),
false,
1759 LValue AtomicLVal = Atomics.getAtomicLValue();
1761 if (AtomicLVal.isSimple()) {
1763 DesiredLVal = CGF.
MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
1766 Address Ptr = Atomics.materializeRValue(OldRVal);
1768 if (AtomicLVal.isBitField()) {
1770 LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1771 AtomicLVal.getType(),
1772 AtomicLVal.getBaseInfo(),
1773 AtomicLVal.getTBAAInfo());
1775 LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1776 AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1777 AtomicLVal.getTBAAInfo());
1778 }
else if (AtomicLVal.isVectorElt()) {
1779 UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1780 AtomicLVal.getType(),
1781 AtomicLVal.getBaseInfo(),
1782 AtomicLVal.getTBAAInfo());
1783 DesiredLVal = LValue::MakeVectorElt(
1784 DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1785 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1787 assert(AtomicLVal.isExtVectorElt());
1788 UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1789 AtomicLVal.getType(),
1790 AtomicLVal.getBaseInfo(),
1791 AtomicLVal.getTBAAInfo());
1792 DesiredLVal = LValue::MakeExtVectorElt(
1793 DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1794 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1799 RValue NewRVal = UpdateOp(UpRVal);
1809void AtomicInfo::EmitAtomicUpdateLibcall(
1810 llvm::AtomicOrdering AO,
const llvm::function_ref<RValue(RValue)> &UpdateOp,
1812 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1814 Address ExpectedAddr = CreateTempAlloca();
1816 EmitAtomicLoadLibcall(ExpectedAddr.
emitRawPointer(CGF), AO, IsVolatile);
1820 Address DesiredAddr = CreateTempAlloca();
1821 if ((LVal.isBitField() && BFI.
Size != ValueSizeInBits) ||
1822 requiresMemSetZero(getAtomicAddress().getElementType())) {
1826 auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1828 SourceLocation(),
false);
1833 EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);
1834 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1838void AtomicInfo::EmitAtomicUpdateOp(
1839 llvm::AtomicOrdering AO,
const llvm::function_ref<RValue(RValue)> &UpdateOp,
1841 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1844 auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile,
true);
1848 auto *CurBB = CGF.
Builder.GetInsertBlock();
1850 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1852 PHI->addIncoming(OldVal, CurBB);
1853 Address NewAtomicAddr = CreateTempAlloca();
1854 Address NewAtomicIntAddr =
1856 ? castToAtomicIntPointer(NewAtomicAddr)
1859 if ((LVal.isBitField() && BFI.
Size != ValueSizeInBits) ||
1860 requiresMemSetZero(getAtomicAddress().getElementType())) {
1864 SourceLocation(),
false,
1869 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1870 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
1871 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1877 LValue AtomicLVal = Atomics.getAtomicLValue();
1880 if (AtomicLVal.isBitField()) {
1882 LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1883 AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1884 AtomicLVal.getTBAAInfo());
1885 }
else if (AtomicLVal.isVectorElt()) {
1887 LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1888 AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1889 AtomicLVal.getTBAAInfo());
1891 assert(AtomicLVal.isExtVectorElt());
1892 DesiredLVal = LValue::MakeExtVectorElt(
1893 DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1894 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1901void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1902 RValue UpdateRVal,
bool IsVolatile) {
1903 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1905 Address ExpectedAddr = CreateTempAlloca();
1907 EmitAtomicLoadLibcall(ExpectedAddr.
emitRawPointer(CGF), AO, IsVolatile);
1911 Address DesiredAddr = CreateTempAlloca();
1912 if ((LVal.isBitField() && BFI.
Size != ValueSizeInBits) ||
1913 requiresMemSetZero(getAtomicAddress().getElementType())) {
1921 EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);
1922 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1926void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1928 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1931 auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile,
true);
1935 auto *CurBB = CGF.
Builder.GetInsertBlock();
1937 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1939 PHI->addIncoming(OldVal, CurBB);
1940 Address NewAtomicAddr = CreateTempAlloca();
1941 Address NewAtomicIntAddr = castToAtomicIntPointer(NewAtomicAddr);
1942 if ((LVal.isBitField() && BFI.
Size != ValueSizeInBits) ||
1943 requiresMemSetZero(getAtomicAddress().getElementType())) {
1949 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1950 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
1951 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1955void AtomicInfo::EmitAtomicUpdate(
1956 llvm::AtomicOrdering AO,
const llvm::function_ref<RValue(RValue)> &UpdateOp,
1958 if (shouldUseLibcall()) {
1959 EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1961 EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1965void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
1967 if (shouldUseLibcall()) {
1968 EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1970 EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1977 llvm::AtomicOrdering AO;
1979 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1981 AO = llvm::AtomicOrdering::Release;
1993 llvm::AtomicOrdering AO,
bool IsVolatile,
2001 AtomicInfo atomics(*
this, dest);
2002 LValue LVal = atomics.getAtomicLValue();
2007 atomics.emitCopyIntoMemory(rvalue);
2012 if (atomics.shouldUseLibcall()) {
2014 Address srcAddr = atomics.materializeRValue(rvalue);
2031 llvm::Value *ValToStore = atomics.convertRValueToInt(rvalue);
2035 if (llvm::Value *
Value = atomics.getScalarRValValueOrNull(rvalue))
2037 Addr = atomics.castToAtomicIntPointer(
Addr);
2038 ValToStore =
Builder.CreateIntCast(ValToStore,
Addr.getElementType(),
2041 llvm::StoreInst *store =
Builder.CreateStore(ValToStore,
Addr);
2043 if (AO == llvm::AtomicOrdering::Acquire)
2044 AO = llvm::AtomicOrdering::Monotonic;
2045 else if (AO == llvm::AtomicOrdering::AcquireRelease)
2046 AO = llvm::AtomicOrdering::Release;
2049 store->setAtomic(AO);
2053 store->setVolatile(
true);
2059 atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
2066 llvm::AtomicOrdering
Success, llvm::AtomicOrdering Failure,
bool IsWeak,
2071 Expected.getAggregateAddress().getElementType() ==
2076 AtomicInfo Atomics(*
this, Obj);
2078 return Atomics.EmitAtomicCompareExchange(
Expected, Desired,
Success, Failure,
2082llvm::AtomicRMWInst *
2084 llvm::Value *Val, llvm::AtomicOrdering Order,
2085 llvm::SyncScope::ID SSID,
2087 llvm::AtomicRMWInst *RMW =
2088 Builder.CreateAtomicRMW(Op,
Addr, Val, Order, SSID);
2094 LValue LVal, llvm::AtomicOrdering AO,
2095 const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
bool IsVolatile) {
2096 AtomicInfo Atomics(*
this, LVal);
2097 Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2101 AtomicInfo atomics(*
this, dest);
2103 switch (atomics.getEvaluationKind()) {
2119 bool Zeroed =
false;
2121 Zeroed = atomics.emitMemSetZeroIfNecessary();
2122 dest = atomics.projectValue();
2136 llvm_unreachable(
"bad evaluation kind");
Defines the clang::ASTContext interface.
static llvm::Value * EmitPostAtomicMinMax(CGBuilderTy &Builder, AtomicExpr::AtomicOp Op, bool IsSigned, llvm::Value *OldVal, llvm::Value *RHS)
Duplicate the atomic min/max operation in conventional IR for the builtin variants that return the ne...
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, const llvm::function_ref< RValue(RValue)> &UpdateOp, Address DesiredAddr)
static Address EmitValToTemp(CodeGenFunction &CGF, Expr *E)
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *IsWeak, llvm::Value *FailureOrder, uint64_t Size, llvm::AtomicOrdering Order, llvm::SyncScope::ID Scope)
static RValue emitAtomicLibcall(CodeGenFunction &CGF, StringRef fnName, QualType resultType, CallArgList &args)
static bool shouldCastToInt(llvm::Type *ValTy, bool CmpXchg)
Return true if.
static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo, KnownNonNull_t IsKnownNonNull, CodeGenFunction &CGF)
static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, uint64_t size, cir::MemOrder successOrder, cir::MemOrder failureOrder)
static void emitAtomicCmpXchgFailureSet(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, Expr *failureOrderExpr, uint64_t size, cir::MemOrder successOrder)
static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
static QualType getPointeeType(const MemRegion *R)
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
QualType getExtVectorType(QualType VectorType, unsigned NumElts) const
Return the unique reference to an extended vector type of the specified element type and size.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
static std::unique_ptr< AtomicScopeModel > getScopeModel(AtomicOp Op)
Get atomic scope model for the atomic op code.
QualType getValueType() const
SourceLocation getBeginLoc() const LLVM_READONLY
Expr * getOrderFail() const
CharUnits - This is an opaque type for sizes expressed in character units.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
CharUnits getAlignment() const
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Address getAddress() const
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::AtomicCmpXchgInst * CreateAtomicCmpXchg(Address Addr, llvm::Value *Cmp, llvm::Value *New, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallArgList - Type for representing both the value and type of arguments in a call.
void add(RValue rvalue, QualType type)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitAtomicInit(Expr *E, LValue lvalue)
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
const LangOptions & getLangOpts() const
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
void maybeAttachRangeForLoad(llvm::LoadInst *Load, QualType Ty, SourceLocation Loc)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
const TargetInfo & getTarget() const
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
CGDebugInfo * getDebugInfo()
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
const TargetCodeGenInfo & getTargetHooks() const
ASTContext & getContext() const
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
llvm::AtomicRMWInst * emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Order=llvm::AtomicOrdering::SequentiallyConsistent, llvm::SyncScope::ID SSID=llvm::SyncScope::System, const AtomicExpr *AE=nullptr)
Emit an atomicrmw instruction, and applying relevant metadata when applicable.
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
llvm::Type * ConvertTypeForMem(QualType T)
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
RValue EmitAtomicExpr(AtomicExpr *E)
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
bool LValueIsSuitableForInlineAtomic(LValue Src)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
RValue EmitLoadOfExtVectorElementLValue(LValue V)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
llvm::LLVMContext & getLLVMContext()
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
This class organizes the cross-function state that is used while generating LLVM code.
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
const LangOptions & getLangOpts() const
CodeGenTypes & getTypes()
const llvm::DataLayout & getDataLayout() const
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
llvm::LLVMContext & getLLVMContext()
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
LValue - This represents an lvalue references.
bool isVolatileQualified() const
Address getAddress() const
TBAAAccessInfo getTBAAInfo() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
static RValue get(llvm::Value *V)
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
bool isVolatileQualified() const
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, SyncScope Scope, llvm::AtomicOrdering Ordering, llvm::LLVMContext &Ctx) const
Get the syncscope used in LLVM IR.
virtual void setTargetAtomicMetadata(CodeGenFunction &CGF, llvm::Instruction &AtomicInst, const AtomicExpr *Expr=nullptr) const
Allow the target to apply other metadata to an atomic instruction.
Concrete class used by the front-end to report problems and issues.
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
PointerType - C99 6.7.5.1 - Pointer Declarators.
A (possibly-)qualified type.
bool isNull() const
Return true if this QualType doesn't point to a type yet.
LangAS getAddressSpace() const
Return the address space of this type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Scope - A scope is a transient data structure that is used while parsing the program.
Encodes a location in the source.
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
bool isPointerType() const
const T * castAs() const
Member-template castAs<specific type>.
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isAtomicType() const
bool isFloatingType() const
const T * getAs() const
Member-template getAs<specific type>'.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool Load(InterpState &S, CodePtr OpPC)
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
@ Success
Annotation was successful.
llvm::StringRef getAsString(SyncScope S)
U cast(CodeGen::Address addr)
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
llvm::PointerType * VoidPtrTy
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * SizeTy
llvm::IntegerType * IntTy
int
llvm::PointerType * UnqualPtrTy