30#include "llvm/ADT/ArrayRef.h"
31#include "llvm/ADT/SmallVector.h"
32#include "llvm/ADT/StringExtras.h"
33#include "llvm/Bitcode/BitcodeReader.h"
34#include "llvm/IR/Constants.h"
35#include "llvm/IR/DerivedTypes.h"
36#include "llvm/IR/GlobalValue.h"
37#include "llvm/IR/InstrTypes.h"
38#include "llvm/IR/Value.h"
39#include "llvm/Support/AtomicOrdering.h"
40#include "llvm/Support/raw_ostream.h"
48using namespace llvm::omp;
55 enum CGOpenMPRegionKind {
58 ParallelOutlinedRegion,
68 CGOpenMPRegionInfo(
const CapturedStmt &CS,
69 const CGOpenMPRegionKind RegionKind,
72 : CGCapturedStmtInfo(CS,
CR_OpenMP), RegionKind(RegionKind),
73 CodeGen(CodeGen), Kind(Kind), HasCancel(HasCancel) {}
75 CGOpenMPRegionInfo(
const CGOpenMPRegionKind RegionKind,
78 : CGCapturedStmtInfo(
CR_OpenMP), RegionKind(RegionKind), CodeGen(CodeGen),
79 Kind(Kind), HasCancel(HasCancel) {}
83 virtual const VarDecl *getThreadIDVariable()
const = 0;
86 void EmitBody(CodeGenFunction &CGF,
const Stmt *S)
override;
90 virtual LValue getThreadIDVariableLValue(CodeGenFunction &CGF);
92 virtual void emitUntiedSwitch(CodeGenFunction & ) {}
94 CGOpenMPRegionKind getRegionKind()
const {
return RegionKind; }
98 bool hasCancel()
const {
return HasCancel; }
100 static bool classof(
const CGCapturedStmtInfo *Info) {
104 ~CGOpenMPRegionInfo()
override =
default;
107 CGOpenMPRegionKind RegionKind;
108 RegionCodeGenTy CodeGen;
114class CGOpenMPOutlinedRegionInfo final :
public CGOpenMPRegionInfo {
116 CGOpenMPOutlinedRegionInfo(
const CapturedStmt &CS,
const VarDecl *ThreadIDVar,
117 const RegionCodeGenTy &CodeGen,
119 StringRef HelperName)
120 : CGOpenMPRegionInfo(CS, ParallelOutlinedRegion, CodeGen,
Kind,
122 ThreadIDVar(ThreadIDVar), HelperName(HelperName) {
123 assert(ThreadIDVar !=
nullptr &&
"No ThreadID in OpenMP region.");
128 const VarDecl *getThreadIDVariable()
const override {
return ThreadIDVar; }
131 StringRef getHelperName()
const override {
return HelperName; }
133 static bool classof(
const CGCapturedStmtInfo *Info) {
134 return CGOpenMPRegionInfo::classof(Info) &&
136 ParallelOutlinedRegion;
142 const VarDecl *ThreadIDVar;
143 StringRef HelperName;
147class CGOpenMPTaskOutlinedRegionInfo final :
public CGOpenMPRegionInfo {
149 class UntiedTaskActionTy final :
public PrePostActionTy {
151 const VarDecl *PartIDVar;
152 const RegionCodeGenTy UntiedCodeGen;
153 llvm::SwitchInst *UntiedSwitch =
nullptr;
156 UntiedTaskActionTy(
bool Tied,
const VarDecl *PartIDVar,
157 const RegionCodeGenTy &UntiedCodeGen)
158 : Untied(!Tied), PartIDVar(PartIDVar), UntiedCodeGen(UntiedCodeGen) {}
159 void Enter(CodeGenFunction &CGF)
override {
164 PartIDVar->
getType()->castAs<PointerType>());
168 UntiedSwitch = CGF.
Builder.CreateSwitch(Res, DoneBB);
172 UntiedSwitch->addCase(CGF.
Builder.getInt32(0),
174 emitUntiedSwitch(CGF);
177 void emitUntiedSwitch(CodeGenFunction &CGF)
const {
181 PartIDVar->
getType()->castAs<PointerType>());
185 CodeGenFunction::JumpDest CurPoint =
189 UntiedSwitch->addCase(CGF.
Builder.getInt32(UntiedSwitch->getNumCases()),
195 unsigned getNumberOfParts()
const {
return UntiedSwitch->getNumCases(); }
197 CGOpenMPTaskOutlinedRegionInfo(
const CapturedStmt &CS,
198 const VarDecl *ThreadIDVar,
199 const RegionCodeGenTy &CodeGen,
201 const UntiedTaskActionTy &Action)
202 : CGOpenMPRegionInfo(CS, TaskOutlinedRegion, CodeGen,
Kind, HasCancel),
203 ThreadIDVar(ThreadIDVar), Action(Action) {
204 assert(ThreadIDVar !=
nullptr &&
"No ThreadID in OpenMP region.");
209 const VarDecl *getThreadIDVariable()
const override {
return ThreadIDVar; }
212 LValue getThreadIDVariableLValue(CodeGenFunction &CGF)
override;
215 StringRef getHelperName()
const override {
return ".omp_outlined."; }
217 void emitUntiedSwitch(CodeGenFunction &CGF)
override {
218 Action.emitUntiedSwitch(CGF);
221 static bool classof(
const CGCapturedStmtInfo *Info) {
222 return CGOpenMPRegionInfo::classof(Info) &&
230 const VarDecl *ThreadIDVar;
232 const UntiedTaskActionTy &Action;
237class CGOpenMPInlinedRegionInfo :
public CGOpenMPRegionInfo {
239 CGOpenMPInlinedRegionInfo(CodeGenFunction::CGCapturedStmtInfo *OldCSI,
240 const RegionCodeGenTy &CodeGen,
242 : CGOpenMPRegionInfo(InlinedRegion, CodeGen,
Kind, HasCancel),
244 OuterRegionInfo(dyn_cast_or_null<CGOpenMPRegionInfo>(OldCSI)) {}
247 llvm::Value *getContextValue()
const override {
249 return OuterRegionInfo->getContextValue();
250 llvm_unreachable(
"No context value for inlined OpenMP region");
253 void setContextValue(llvm::Value *
V)
override {
254 if (OuterRegionInfo) {
255 OuterRegionInfo->setContextValue(
V);
258 llvm_unreachable(
"No context value for inlined OpenMP region");
262 const FieldDecl *lookup(
const VarDecl *VD)
const override {
264 return OuterRegionInfo->lookup(VD);
270 FieldDecl *getThisFieldDecl()
const override {
272 return OuterRegionInfo->getThisFieldDecl();
278 const VarDecl *getThreadIDVariable()
const override {
280 return OuterRegionInfo->getThreadIDVariable();
285 LValue getThreadIDVariableLValue(CodeGenFunction &CGF)
override {
287 return OuterRegionInfo->getThreadIDVariableLValue(CGF);
288 llvm_unreachable(
"No LValue for inlined OpenMP construct");
292 StringRef getHelperName()
const override {
293 if (
auto *OuterRegionInfo = getOldCSI())
294 return OuterRegionInfo->getHelperName();
295 llvm_unreachable(
"No helper name for inlined OpenMP construct");
298 void emitUntiedSwitch(CodeGenFunction &CGF)
override {
300 OuterRegionInfo->emitUntiedSwitch(CGF);
303 CodeGenFunction::CGCapturedStmtInfo *getOldCSI()
const {
return OldCSI; }
305 static bool classof(
const CGCapturedStmtInfo *Info) {
306 return CGOpenMPRegionInfo::classof(Info) &&
310 ~CGOpenMPInlinedRegionInfo()
override =
default;
314 CodeGenFunction::CGCapturedStmtInfo *OldCSI;
315 CGOpenMPRegionInfo *OuterRegionInfo;
323class CGOpenMPTargetRegionInfo final :
public CGOpenMPRegionInfo {
325 CGOpenMPTargetRegionInfo(
const CapturedStmt &CS,
326 const RegionCodeGenTy &CodeGen, StringRef HelperName)
327 : CGOpenMPRegionInfo(CS, TargetRegion, CodeGen, OMPD_target,
329 HelperName(HelperName) {}
333 const VarDecl *getThreadIDVariable()
const override {
return nullptr; }
336 StringRef getHelperName()
const override {
return HelperName; }
338 static bool classof(
const CGCapturedStmtInfo *Info) {
339 return CGOpenMPRegionInfo::classof(Info) &&
344 StringRef HelperName;
348 llvm_unreachable(
"No codegen for expressions");
352class CGOpenMPInnerExprInfo final :
public CGOpenMPInlinedRegionInfo {
354 CGOpenMPInnerExprInfo(CodeGenFunction &CGF,
const CapturedStmt &CS)
355 : CGOpenMPInlinedRegionInfo(CGF.CapturedStmtInfo, EmptyCodeGen,
363 if (!C.capturesVariable() && !C.capturesVariableByCopy())
366 const VarDecl *VD = C.getCapturedVar();
367 if (VD->isLocalVarDeclOrParm())
370 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD),
372 VD->getType().getNonReferenceType(), VK_LValue,
374 PrivScope.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress());
376 (
void)PrivScope.Privatize();
380 const FieldDecl *lookup(
const VarDecl *VD)
const override {
381 if (
const FieldDecl *FD = CGOpenMPInlinedRegionInfo::lookup(VD))
387 void EmitBody(CodeGenFunction &CGF,
const Stmt *S)
override {
388 llvm_unreachable(
"No body for expressions");
393 const VarDecl *getThreadIDVariable()
const override {
394 llvm_unreachable(
"No thread id for expressions");
398 StringRef getHelperName()
const override {
399 llvm_unreachable(
"No helper name for expressions");
402 static bool classof(
const CGCapturedStmtInfo *Info) {
return false; }
406 CodeGenFunction::OMPPrivateScope PrivScope;
410class InlinedOpenMPRegionRAII {
411 CodeGenFunction &CGF;
412 llvm::DenseMap<const ValueDecl *, FieldDecl *> LambdaCaptureFields;
413 FieldDecl *LambdaThisCaptureField =
nullptr;
414 const CodeGen::CGBlockInfo *BlockInfo =
nullptr;
415 bool NoInheritance =
false;
422 InlinedOpenMPRegionRAII(CodeGenFunction &CGF,
const RegionCodeGenTy &CodeGen,
424 bool NoInheritance =
true)
425 : CGF(CGF), NoInheritance(NoInheritance) {
427 CGF.CapturedStmtInfo =
new CGOpenMPInlinedRegionInfo(
428 CGF.CapturedStmtInfo, CodeGen, Kind, HasCancel);
430 std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
431 LambdaThisCaptureField = CGF.LambdaThisCaptureField;
432 CGF.LambdaThisCaptureField =
nullptr;
433 BlockInfo = CGF.BlockInfo;
434 CGF.BlockInfo =
nullptr;
438 ~InlinedOpenMPRegionRAII() {
442 delete CGF.CapturedStmtInfo;
443 CGF.CapturedStmtInfo = OldCSI;
445 std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
446 CGF.LambdaThisCaptureField = LambdaThisCaptureField;
447 CGF.BlockInfo = BlockInfo;
455enum OpenMPLocationFlags :
unsigned {
457 OMP_IDENT_IMD = 0x01,
459 OMP_IDENT_KMPC = 0x02,
461 OMP_ATOMIC_REDUCE = 0x10,
463 OMP_IDENT_BARRIER_EXPL = 0x20,
465 OMP_IDENT_BARRIER_IMPL = 0x40,
467 OMP_IDENT_BARRIER_IMPL_FOR = 0x40,
469 OMP_IDENT_BARRIER_IMPL_SECTIONS = 0xC0,
471 OMP_IDENT_BARRIER_IMPL_SINGLE = 0x140,
473 OMP_IDENT_WORK_LOOP = 0x200,
475 OMP_IDENT_WORK_SECTIONS = 0x400,
477 OMP_IDENT_WORK_DISTRIBUTE = 0x800,
507enum IdentFieldIndex {
509 IdentField_Reserved_1,
513 IdentField_Reserved_2,
515 IdentField_Reserved_3,
524enum OpenMPSchedType {
527 OMP_sch_static_chunked = 33,
529 OMP_sch_dynamic_chunked = 35,
530 OMP_sch_guided_chunked = 36,
531 OMP_sch_runtime = 37,
534 OMP_sch_static_balanced_chunked = 45,
537 OMP_ord_static_chunked = 65,
539 OMP_ord_dynamic_chunked = 67,
540 OMP_ord_guided_chunked = 68,
541 OMP_ord_runtime = 69,
543 OMP_sch_default = OMP_sch_static,
545 OMP_dist_sch_static_chunked = 91,
546 OMP_dist_sch_static = 92,
549 OMP_sch_modifier_monotonic = (1 << 29),
551 OMP_sch_modifier_nonmonotonic = (1 << 30),
556class CleanupTy final :
public EHScopeStack::Cleanup {
557 PrePostActionTy *Action;
560 explicit CleanupTy(PrePostActionTy *Action) : Action(Action) {}
561 void Emit(CodeGenFunction &CGF, Flags )
override {
574 Callback(CodeGen, CGF, *PrePostAction);
577 Callback(CodeGen, CGF, Action);
585 if (
const auto *CE = dyn_cast<CallExpr>(ReductionOp))
586 if (
const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
587 if (
const auto *DRE =
588 dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
589 if (
const auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl()))
600 std::pair<llvm::Function *, llvm::Function *>
Reduction =
620 auto *GV =
new llvm::GlobalVariable(
622 llvm::GlobalValue::PrivateLinkage,
Init, Name);
663 llvm::Value *NumElements = CGF.
emitArrayLength(ArrayTy, ElementTy, DestAddr);
667 llvm::Value *SrcBegin =
nullptr;
669 SrcBegin = SrcAddr.emitRawPointer(CGF);
672 llvm::Value *DestEnd =
677 llvm::Value *IsEmpty =
678 CGF.
Builder.CreateICmpEQ(DestBegin, DestEnd,
"omp.arrayinit.isempty");
679 CGF.
Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
682 llvm::BasicBlock *EntryBB = CGF.
Builder.GetInsertBlock();
687 llvm::PHINode *SrcElementPHI =
nullptr;
690 SrcElementPHI = CGF.
Builder.CreatePHI(SrcBegin->getType(), 2,
691 "omp.arraycpy.srcElementPast");
692 SrcElementPHI->addIncoming(SrcBegin, EntryBB);
694 Address(SrcElementPHI, SrcAddr.getElementType(),
695 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
697 llvm::PHINode *DestElementPHI = CGF.
Builder.CreatePHI(
698 DestBegin->getType(), 2,
"omp.arraycpy.destElementPast");
699 DestElementPHI->addIncoming(DestBegin, EntryBB);
707 if (EmitDeclareReductionInit) {
709 SrcElementCurrent, ElementTy);
717 llvm::Value *SrcElementNext = CGF.
Builder.CreateConstGEP1_32(
718 SrcAddr.getElementType(), SrcElementPHI, 1,
719 "omp.arraycpy.dest.element");
720 SrcElementPHI->addIncoming(SrcElementNext, CGF.
Builder.GetInsertBlock());
724 llvm::Value *DestElementNext = CGF.
Builder.CreateConstGEP1_32(
726 "omp.arraycpy.dest.element");
729 CGF.
Builder.CreateICmpEQ(DestElementNext, DestEnd,
"omp.arraycpy.done");
730 CGF.
Builder.CreateCondBr(Done, DoneBB, BodyBB);
731 DestElementPHI->addIncoming(DestElementNext, CGF.
Builder.GetInsertBlock());
743 if (
const auto *OASE = dyn_cast<ArraySectionExpr>(E))
748void ReductionCodeGen::emitAggregateInitialization(
750 const OMPDeclareReductionDecl *DRD) {
754 const auto *PrivateVD =
756 bool EmitDeclareReductionInit =
759 EmitDeclareReductionInit,
760 EmitDeclareReductionInit ? ClausesData[N].ReductionOp
761 : PrivateVD->getInit(),
769 ClausesData.reserve(Shareds.size());
770 SharedAddresses.reserve(Shareds.size());
771 Sizes.reserve(Shareds.size());
772 BaseDecls.reserve(Shareds.size());
773 const auto *IOrig = Origs.begin();
774 const auto *IPriv = Privates.begin();
775 const auto *IRed = ReductionOps.begin();
776 for (
const Expr *Ref : Shareds) {
777 ClausesData.emplace_back(Ref, *IOrig, *IPriv, *IRed);
778 std::advance(IOrig, 1);
779 std::advance(IPriv, 1);
780 std::advance(IRed, 1);
785 assert(SharedAddresses.size() == N && OrigAddresses.size() == N &&
786 "Number of generated lvalues must be exactly N.");
787 LValue First = emitSharedLValue(CGF, ClausesData[N].Shared);
788 LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Shared);
789 SharedAddresses.emplace_back(
First, Second);
790 if (ClausesData[N].Shared == ClausesData[N].Ref) {
791 OrigAddresses.emplace_back(
First, Second);
793 LValue First = emitSharedLValue(CGF, ClausesData[N].Ref);
794 LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Ref);
795 OrigAddresses.emplace_back(
First, Second);
804 CGF.
getTypeSize(OrigAddresses[N].first.getType().getNonReferenceType()),
809 llvm::Value *SizeInChars;
810 auto *ElemType = OrigAddresses[N].first.getAddress().getElementType();
811 auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
812 if (AsArraySection) {
813 Size = CGF.
Builder.CreatePtrDiff(ElemType,
814 OrigAddresses[N].second.getPointer(CGF),
815 OrigAddresses[N].first.getPointer(CGF));
816 Size = CGF.
Builder.CreateNUWAdd(
817 Size, llvm::ConstantInt::get(Size->getType(), 1));
818 SizeInChars = CGF.
Builder.CreateNUWMul(Size, ElemSizeOf);
821 CGF.
getTypeSize(OrigAddresses[N].first.getType().getNonReferenceType());
822 Size = CGF.
Builder.CreateExactUDiv(SizeInChars, ElemSizeOf);
824 Sizes.emplace_back(SizeInChars, Size);
837 assert(!Size && !Sizes[N].second &&
838 "Size should be nullptr for non-variably modified reduction "
853 assert(SharedAddresses.size() > N &&
"No variable was generated");
854 const auto *PrivateVD =
860 (void)DefaultInit(CGF);
861 emitAggregateInitialization(CGF, N, PrivateAddr, SharedAddr, DRD);
862 }
else if (DRD && (DRD->
getInitializer() || !PrivateVD->hasInit())) {
863 (void)DefaultInit(CGF);
864 QualType SharedType = SharedAddresses[N].first.getType();
866 PrivateAddr, SharedAddr, SharedType);
867 }
else if (!DefaultInit(CGF) && PrivateVD->hasInit() &&
870 PrivateVD->
getType().getQualifiers(),
888 CGF.
pushDestroy(DTorKind, PrivateAddr, PrivateType);
907 BaseLV.getType(), BaseLV.getBaseInfo(),
941 const VarDecl *OrigVD =
nullptr;
942 if (
const auto *OASE = dyn_cast<ArraySectionExpr>(Ref)) {
943 const Expr *
Base = OASE->getBase()->IgnoreParenImpCasts();
944 while (
const auto *TempOASE = dyn_cast<ArraySectionExpr>(
Base))
945 Base = TempOASE->getBase()->IgnoreParenImpCasts();
946 while (
const auto *TempASE = dyn_cast<ArraySubscriptExpr>(
Base))
947 Base = TempASE->getBase()->IgnoreParenImpCasts();
950 }
else if (
const auto *ASE = dyn_cast<ArraySubscriptExpr>(Ref)) {
951 const Expr *
Base = ASE->getBase()->IgnoreParenImpCasts();
952 while (
const auto *TempASE = dyn_cast<ArraySubscriptExpr>(
Base))
953 Base = TempASE->getBase()->IgnoreParenImpCasts();
964 BaseDecls.emplace_back(OrigVD);
967 loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(),
969 Address SharedAddr = SharedAddresses[N].first.getAddress();
970 llvm::Value *Adjustment = CGF.
Builder.CreatePtrDiff(
973 llvm::Value *PrivatePointer =
979 SharedAddresses[N].first.getType(),
982 BaseDecls.emplace_back(
996 getThreadIDVariable()->
getType()->castAs<PointerType>());
1014LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue(
1017 getThreadIDVariable()->
getType(),
1035 llvm::OpenMPIRBuilderConfig Config(
1036 CGM.getLangOpts().OpenMPIsTargetDevice,
isGPU(),
1037 CGM.getLangOpts().OpenMPOffloadMandatory,
1040 Config.setDefaultTargetAS(
1046 CGM.getLangOpts().OpenMPIsTargetDevice
1047 ?
CGM.getLangOpts().OMPHostIRFile
1052 if (
CGM.getLangOpts().OpenMPForceUSM) {
1054 OMPBuilder.Config.setHasRequiresUnifiedSharedMemory(
true);
1062 if (!
Data.getValue().pointsToAliveValue())
1064 auto *GV = dyn_cast<llvm::GlobalVariable>(
Data.getValue());
1067 if (!GV->isDeclaration() || GV->getNumUses() > 0)
1069 GV->eraseFromParent();
1074 return OMPBuilder.createPlatformSpecificName(Parts);
1077static llvm::Function *
1079 const Expr *CombinerInitializer,
const VarDecl *In,
1080 const VarDecl *Out,
bool IsCombiner) {
1083 QualType PtrTy =
C.getPointerType(Ty).withRestrict();
1089 Args.push_back(&OmpOutParm);
1090 Args.push_back(&OmpInParm);
1095 {IsCombiner ?
"omp_combiner" :
"omp_initializer",
""});
1096 auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
1100 Fn->removeFnAttr(llvm::Attribute::NoInline);
1101 Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
1102 Fn->addFnAttr(llvm::Attribute::AlwaysInline);
1108 Out->getLocation());
1118 (void)
Scope.Privatize();
1119 if (!IsCombiner && Out->hasInit() &&
1122 Out->getType().getQualifiers(),
1125 if (CombinerInitializer)
1127 Scope.ForceCleanup();
1156std::pair<llvm::Function *, llvm::Function *>
1168struct PushAndPopStackRAII {
1169 PushAndPopStackRAII(llvm::OpenMPIRBuilder *OMPBuilder,
CodeGenFunction &CGF,
1170 bool HasCancel, llvm::omp::Directive Kind)
1171 : OMPBuilder(OMPBuilder) {
1187 auto FiniCB = [&CGF](llvm::OpenMPIRBuilder::InsertPointTy IP) {
1188 assert(IP.getBlock()->end() == IP.getPoint() &&
1189 "Clang CG should cause non-terminated block!");
1190 CGBuilderTy::InsertPointGuard IPG(CGF.
Builder);
1195 return llvm::Error::success();
1200 llvm::OpenMPIRBuilder::FinalizationInfo FI({FiniCB, Kind, HasCancel});
1201 OMPBuilder->pushFinalizationCB(std::move(FI));
1203 ~PushAndPopStackRAII() {
1205 OMPBuilder->popFinalizationCB();
1207 llvm::OpenMPIRBuilder *OMPBuilder;
1216 "thread id variable must be of type kmp_int32 *");
1218 bool HasCancel =
false;
1219 if (
const auto *OPD = dyn_cast<OMPParallelDirective>(&D))
1220 HasCancel = OPD->hasCancel();
1221 else if (
const auto *OPD = dyn_cast<OMPTargetParallelDirective>(&D))
1222 HasCancel = OPD->hasCancel();
1223 else if (
const auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&D))
1224 HasCancel = OPSD->hasCancel();
1225 else if (
const auto *OPFD = dyn_cast<OMPParallelForDirective>(&D))
1226 HasCancel = OPFD->hasCancel();
1227 else if (
const auto *OPFD = dyn_cast<OMPTargetParallelForDirective>(&D))
1228 HasCancel = OPFD->hasCancel();
1229 else if (
const auto *OPFD = dyn_cast<OMPDistributeParallelForDirective>(&D))
1230 HasCancel = OPFD->hasCancel();
1231 else if (
const auto *OPFD =
1232 dyn_cast<OMPTeamsDistributeParallelForDirective>(&D))
1233 HasCancel = OPFD->hasCancel();
1234 else if (
const auto *OPFD =
1235 dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&D))
1236 HasCancel = OPFD->hasCancel();
1241 PushAndPopStackRAII PSR(&OMPBuilder, CGF, HasCancel, InnermostKind);
1242 CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar,
CodeGen, InnermostKind,
1243 HasCancel, OutlinedHelperName);
1249 std::string Suffix =
getName({
"omp_outlined"});
1250 return (Name + Suffix).str();
1258 std::string Suffix =
getName({
"omp",
"reduction",
"reduction_func"});
1259 return (Name + Suffix).str();
1266 const CapturedStmt *CS = D.getCapturedStmt(OMPD_parallel);
1276 const CapturedStmt *CS = D.getCapturedStmt(OMPD_teams);
1286 bool Tied,
unsigned &NumberOfParts) {
1289 llvm::Value *ThreadID =
getThreadID(CGF, D.getBeginLoc());
1291 llvm::Value *TaskArgs[] = {
1293 CGF.EmitLoadOfPointerLValue(CGF.GetAddrOfLocalVar(TaskTVar),
1296 CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
1297 CGM.getModule(), OMPRTL___kmpc_omp_task),
1300 CGOpenMPTaskOutlinedRegionInfo::UntiedTaskActionTy Action(Tied, PartIDVar,
1304 "thread id variable must be of type kmp_int32 for tasks");
1309 bool HasCancel =
false;
1310 if (
const auto *TD = dyn_cast<OMPTaskDirective>(&D))
1311 HasCancel = TD->hasCancel();
1312 else if (
const auto *TD = dyn_cast<OMPTaskLoopDirective>(&D))
1313 HasCancel = TD->hasCancel();
1314 else if (
const auto *TD = dyn_cast<OMPMasterTaskLoopDirective>(&D))
1315 HasCancel = TD->hasCancel();
1316 else if (
const auto *TD = dyn_cast<OMPParallelMasterTaskLoopDirective>(&D))
1317 HasCancel = TD->hasCancel();
1320 CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar,
CodeGen,
1321 InnermostKind, HasCancel, Action);
1323 llvm::Function *Res = CGF.GenerateCapturedStmtFunction(*CS);
1325 NumberOfParts = Action.getNumberOfParts();
1330 bool AtCurrentPoint) {
1332 assert(!Elem.ServiceInsertPt &&
"Insert point is set already.");
1334 llvm::Value *Undef = llvm::UndefValue::get(CGF.
Int32Ty);
1335 if (AtCurrentPoint) {
1336 Elem.ServiceInsertPt =
new llvm::BitCastInst(Undef, CGF.
Int32Ty,
"svcpt",
1337 CGF.
Builder.GetInsertBlock());
1339 Elem.ServiceInsertPt =
new llvm::BitCastInst(Undef, CGF.
Int32Ty,
"svcpt");
1340 Elem.ServiceInsertPt->insertAfter(CGF.
AllocaInsertPt->getIterator());
1346 if (Elem.ServiceInsertPt) {
1347 llvm::Instruction *Ptr = Elem.ServiceInsertPt;
1348 Elem.ServiceInsertPt =
nullptr;
1349 Ptr->eraseFromParent();
1356 llvm::raw_svector_ostream OS(Buffer);
1365 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.
CurFuncDecl))
1366 OS << FD->getQualifiedNameAsString();
1373 unsigned Flags,
bool EmitLoc) {
1374 uint32_t SrcLocStrSize;
1375 llvm::Constant *SrcLocStr;
1376 if ((!EmitLoc &&
CGM.getCodeGenOpts().getDebugInfo() ==
1377 llvm::codegenoptions::NoDebugInfo) ||
1379 SrcLocStr =
OMPBuilder.getOrCreateDefaultSrcLocStr(SrcLocStrSize);
1381 std::string FunctionName;
1383 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.
CurFuncDecl))
1384 FunctionName = FD->getQualifiedNameAsString();
1397 SrcLocStr, SrcLocStrSize, llvm::omp::IdentFlag(Flags), Reserved2Flags);
1402 assert(CGF.
CurFn &&
"No function in current CodeGenFunction.");
1405 if (
CGM.getLangOpts().OpenMPIRBuilder) {
1408 uint32_t SrcLocStrSize;
1409 auto *SrcLocStr =
OMPBuilder.getOrCreateSrcLocStr(
1412 OMPBuilder.getOrCreateIdent(SrcLocStr, SrcLocStrSize));
1415 llvm::Value *ThreadID =
nullptr;
1420 ThreadID = I->second.ThreadID;
1421 if (ThreadID !=
nullptr)
1425 if (
auto *OMPRegionInfo =
1427 if (OMPRegionInfo->getThreadIDVariable()) {
1429 LValue LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
1433 CGF.
Builder.GetInsertBlock() == TopBlock ||
1438 CGF.
Builder.GetInsertBlock()) {
1442 if (CGF.
Builder.GetInsertBlock() == TopBlock)
1454 if (!Elem.ServiceInsertPt)
1456 CGBuilderTy::InsertPointGuard IPG(CGF.
Builder);
1457 CGF.
Builder.SetInsertPoint(Elem.ServiceInsertPt);
1461 OMPRTL___kmpc_global_thread_num),
1464 Elem.ThreadID =
Call;
1469 assert(CGF.
CurFn &&
"No function in current CodeGenFunction.");
1475 for (
const auto *D : I->second)
1480 for (
const auto *D : I->second)
1492static llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseKind
1494 std::optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
1495 OMPDeclareTargetDeclAttr::getDeviceType(VD);
1497 return llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseNone;
1499 switch ((
int)*DevTy) {
1500 case OMPDeclareTargetDeclAttr::DT_Host:
1501 return llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseHost;
1503 case OMPDeclareTargetDeclAttr::DT_NoHost:
1504 return llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseNoHost;
1506 case OMPDeclareTargetDeclAttr::DT_Any:
1507 return llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseAny;
1510 return llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseNone;
1515static llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryKind
1517 std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> MapType =
1518 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
1520 return llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryNone;
1521 switch ((
int)*MapType) {
1522 case OMPDeclareTargetDeclAttr::MapTypeTy::MT_To:
1523 return llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryTo;
1525 case OMPDeclareTargetDeclAttr::MapTypeTy::MT_Enter:
1526 return llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryEnter;
1528 case OMPDeclareTargetDeclAttr::MapTypeTy::MT_Link:
1529 return llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryLink;
1532 return llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryNone;
1541 auto FileInfoCallBack = [&]() {
1546 PLoc =
SM.getPresumedLoc(BeginLoc,
false);
1551 return OMPBuilder.getTargetEntryUniqueInfo(FileInfoCallBack,
1556 auto AddrOfGlobal = [&VD,
this]() {
return CGM.GetAddrOfGlobal(VD); };
1558 auto LinkageForVariable = [&VD,
this]() {
1559 return CGM.getLLVMLinkageVarDefinition(VD);
1562 std::vector<llvm::GlobalVariable *> GeneratedRefs;
1564 llvm::Type *LlvmPtrTy =
CGM.getTypes().ConvertTypeForMem(
1565 CGM.getContext().getPointerType(VD->
getType()));
1566 llvm::Constant *addr =
OMPBuilder.getAddrOfDeclareTargetVar(
1572 CGM.getMangledName(VD), GeneratedRefs,
CGM.getLangOpts().OpenMPSimd,
1573 CGM.getLangOpts().OMPTargetTriples, LlvmPtrTy, AddrOfGlobal,
1574 LinkageForVariable);
1583 assert(!
CGM.getLangOpts().OpenMPUseTLS ||
1584 !
CGM.getContext().getTargetInfo().isTLSSupported());
1586 std::string Suffix =
getName({
"cache",
""});
1587 return OMPBuilder.getOrCreateInternalVariable(
1588 CGM.Int8PtrPtrTy, Twine(
CGM.getMangledName(VD)).concat(Suffix).str());
1595 if (
CGM.getLangOpts().OpenMPUseTLS &&
1596 CGM.getContext().getTargetInfo().isTLSSupported())
1600 llvm::Value *Args[] = {
1603 CGM.getSize(
CGM.GetTargetTypeStoreSize(VarTy)),
1608 CGM.getModule(), OMPRTL___kmpc_threadprivate_cached),
1620 CGM.getModule(), OMPRTL___kmpc_global_thread_num),
1624 llvm::Value *Args[] = {
1627 Ctor, CopyCtor, Dtor};
1630 CGM.getModule(), OMPRTL___kmpc_threadprivate_register),
1637 if (
CGM.getLangOpts().OpenMPUseTLS &&
1638 CGM.getContext().getTargetInfo().isTLSSupported())
1645 llvm::Value *Ctor =
nullptr, *CopyCtor =
nullptr, *Dtor =
nullptr;
1647 if (
CGM.getLangOpts().CPlusPlus && PerformInit) {
1653 nullptr,
CGM.getContext().VoidPtrTy,
1655 Args.push_back(&Dst);
1657 const auto &FI =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(
1658 CGM.getContext().VoidPtrTy, Args);
1659 llvm::FunctionType *FTy =
CGM.getTypes().GetFunctionType(FI);
1660 std::string Name =
getName({
"__kmpc_global_ctor_",
""});
1661 llvm::Function *Fn =
1662 CGM.CreateGlobalInitOrCleanUpFunction(FTy, Name, FI, Loc);
1685 nullptr,
CGM.getContext().VoidPtrTy,
1687 Args.push_back(&Dst);
1689 const auto &FI =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(
1690 CGM.getContext().VoidTy, Args);
1691 llvm::FunctionType *FTy =
CGM.getTypes().GetFunctionType(FI);
1692 std::string Name =
getName({
"__kmpc_global_dtor_",
""});
1693 llvm::Function *Fn =
1694 CGM.CreateGlobalInitOrCleanUpFunction(FTy, Name, FI, Loc);
1717 CopyCtor = llvm::Constant::getNullValue(
CGM.UnqualPtrTy);
1718 if (Ctor ==
nullptr) {
1719 Ctor = llvm::Constant::getNullValue(
CGM.UnqualPtrTy);
1721 if (Dtor ==
nullptr) {
1722 Dtor = llvm::Constant::getNullValue(
CGM.UnqualPtrTy);
1725 auto *InitFunctionTy =
1726 llvm::FunctionType::get(
CGM.VoidTy,
false);
1727 std::string Name =
getName({
"__omp_threadprivate_init_",
""});
1728 llvm::Function *InitFunction =
CGM.CreateGlobalInitOrCleanUpFunction(
1729 InitFunctionTy, Name,
CGM.getTypes().arrangeNullaryFunction());
1733 CGM.getTypes().arrangeNullaryFunction(), ArgList,
1737 return InitFunction;
1745 llvm::GlobalValue *GV) {
1746 std::optional<OMPDeclareTargetDeclAttr *> ActiveAttr =
1747 OMPDeclareTargetDeclAttr::getActiveAttr(FD);
1750 if (!ActiveAttr || !(*ActiveAttr)->getIndirect())
1757 OMPBuilder.OffloadInfoManager.getTargetRegionEntryFnName(Name, EntryInfo);
1763 llvm::GlobalValue *
Addr = GV;
1764 if (
CGM.getLangOpts().OpenMPIsTargetDevice) {
1765 Addr =
new llvm::GlobalVariable(
1766 CGM.getModule(),
CGM.VoidPtrTy,
1767 true, llvm::GlobalValue::ExternalLinkage, GV, Name,
1768 nullptr, llvm::GlobalValue::NotThreadLocal,
1769 CGM.getModule().getDataLayout().getDefaultGlobalsAddressSpace());
1770 Addr->setVisibility(llvm::GlobalValue::ProtectedVisibility);
1773 OMPBuilder.OffloadInfoManager.registerDeviceGlobalVarEntryInfo(
1774 Name,
Addr,
CGM.GetTargetTypeStoreSize(
CGM.VoidPtrTy).getQuantity(),
1775 llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryIndirect,
1776 llvm::GlobalValue::WeakODRLinkage);
1782 std::string Suffix =
getName({
"artificial",
""});
1784 llvm::GlobalVariable *GAddr =
OMPBuilder.getOrCreateInternalVariable(
1785 VarLVType, Twine(Name).concat(Suffix).str());
1786 if (
CGM.getLangOpts().OpenMP &&
CGM.getLangOpts().OpenMPUseTLS &&
1787 CGM.getTarget().isTLSSupported()) {
1788 GAddr->setThreadLocal(
true);
1789 return Address(GAddr, GAddr->getValueType(),
1790 CGM.getContext().getTypeAlignInChars(VarType));
1792 std::string CacheSuffix =
getName({
"cache",
""});
1793 llvm::Value *Args[] = {
1801 Twine(Name).concat(Suffix).concat(CacheSuffix).str())};
1806 CGM.getModule(), OMPRTL___kmpc_threadprivate_cached),
1809 VarLVType,
CGM.getContext().getTypeAlignInChars(VarType));
1859 auto &M =
CGM.getModule();
1860 auto &&ThenGen = [&M, OutlinedFn, CapturedVars, RTLoc,
1863 llvm::Value *Args[] = {
1865 CGF.
Builder.getInt32(CapturedVars.size()),
1868 RealArgs.append(std::begin(Args), std::end(Args));
1869 RealArgs.append(CapturedVars.begin(), CapturedVars.end());
1871 llvm::FunctionCallee RTLFn =
1872 OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_fork_call);
1875 auto &&ElseGen = [&M, OutlinedFn, CapturedVars, RTLoc, Loc,
1881 llvm::Value *Args[] = {RTLoc, ThreadID};
1883 M, OMPRTL___kmpc_serialized_parallel),
1890 ".bound.zero.addr");
1895 OutlinedFnArgs.push_back(ZeroAddrBound.
getPointer());
1896 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
1904 OutlinedFn->removeFnAttr(llvm::Attribute::AlwaysInline);
1905 OutlinedFn->addFnAttr(llvm::Attribute::NoInline);
1911 M, OMPRTL___kmpc_end_serialized_parallel),
1930 if (
auto *OMPRegionInfo =
1932 if (OMPRegionInfo->getThreadIDVariable())
1933 return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress();
1942 return ThreadIDTemp;
1946 std::string Prefix = Twine(
"gomp_critical_user_", CriticalName).str();
1947 std::string Name =
getName({Prefix,
"var"});
1954 llvm::FunctionCallee EnterCallee;
1956 llvm::FunctionCallee ExitCallee;
1959 llvm::BasicBlock *ContBlock =
nullptr;
1962 CommonActionTy(llvm::FunctionCallee EnterCallee,
1964 llvm::FunctionCallee ExitCallee,
1966 : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
1971 llvm::Value *CallBool = CGF.
Builder.CreateIsNotNull(EnterRes);
1975 CGF.
Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
1979 void Done(CodeGenFunction &CGF) {
1984 void Exit(CodeGenFunction &CGF)
override {
1991 StringRef CriticalName,
2005 EnterArgs.push_back(CGF.
Builder.CreateIntCast(
2008 CommonActionTy Action(
2011 Hint ? OMPRTL___kmpc_critical_with_hint : OMPRTL___kmpc_critical),
2014 OMPRTL___kmpc_end_critical),
2031 CommonActionTy Action(
OMPBuilder.getOrCreateRuntimeFunction(
2032 CGM.getModule(), OMPRTL___kmpc_master),
2035 CGM.getModule(), OMPRTL___kmpc_end_master),
2053 llvm::Value *FilterVal = Filter
2055 : llvm::ConstantInt::get(
CGM.Int32Ty, 0);
2060 CommonActionTy Action(
OMPBuilder.getOrCreateRuntimeFunction(
2061 CGM.getModule(), OMPRTL___kmpc_masked),
2064 CGM.getModule(), OMPRTL___kmpc_end_masked),
2080 llvm::Value *Args[] = {
2082 llvm::ConstantInt::get(
CGM.IntTy, 0,
true)};
2084 CGM.getModule(), OMPRTL___kmpc_omp_taskyield),
2088 if (
auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.
CapturedStmtInfo))
2089 Region->emitUntiedSwitch(CGF);
2102 CommonActionTy Action(
OMPBuilder.getOrCreateRuntimeFunction(
2103 CGM.getModule(), OMPRTL___kmpc_taskgroup),
2106 CGM.getModule(), OMPRTL___kmpc_end_taskgroup),
2115 unsigned Index,
const VarDecl *Var) {
2136 Args.push_back(&LHSArg);
2137 Args.push_back(&RHSArg);
2143 llvm::GlobalValue::InternalLinkage, Name,
2146 Fn->setDoesNotRecurse();
2163 for (
unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) {
2164 const auto *DestVar =
2168 const auto *SrcVar =
2174 CGF.
EmitOMPCopy(
Type, DestAddr, SrcAddr, DestVar, SrcVar, AssignmentOps[I]);
2189 assert(CopyprivateVars.size() == SrcExprs.size() &&
2190 CopyprivateVars.size() == DstExprs.size() &&
2191 CopyprivateVars.size() == AssignmentOps.size());
2203 if (!CopyprivateVars.empty()) {
2206 C.getIntTypeForBitwidth(32, 1);
2207 DidIt = CGF.
CreateMemTemp(KmpInt32Ty,
".omp.copyprivate.did_it");
2212 CommonActionTy Action(
OMPBuilder.getOrCreateRuntimeFunction(
2213 CGM.getModule(), OMPRTL___kmpc_single),
2216 CGM.getModule(), OMPRTL___kmpc_end_single),
2229 llvm::APInt ArraySize(32, CopyprivateVars.size());
2230 QualType CopyprivateArrayTy =
C.getConstantArrayType(
2235 CGF.
CreateMemTemp(CopyprivateArrayTy,
".omp.copyprivate.cpr_list");
2236 for (
unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) {
2248 SrcExprs, DstExprs, AssignmentOps, Loc);
2249 llvm::Value *BufSize = CGF.
getTypeSize(CopyprivateArrayTy);
2253 llvm::Value *Args[] = {
2257 CL.emitRawPointer(CGF),
2262 CGM.getModule(), OMPRTL___kmpc_copyprivate),
2278 CommonActionTy Action(
OMPBuilder.getOrCreateRuntimeFunction(
2279 CGM.getModule(), OMPRTL___kmpc_ordered),
2282 CGM.getModule(), OMPRTL___kmpc_end_ordered),
2293 if (Kind == OMPD_for)
2294 Flags = OMP_IDENT_BARRIER_IMPL_FOR;
2295 else if (Kind == OMPD_sections)
2296 Flags = OMP_IDENT_BARRIER_IMPL_SECTIONS;
2297 else if (Kind == OMPD_single)
2298 Flags = OMP_IDENT_BARRIER_IMPL_SINGLE;
2299 else if (Kind == OMPD_barrier)
2300 Flags = OMP_IDENT_BARRIER_EXPL;
2302 Flags = OMP_IDENT_BARRIER_IMPL;
2314 ScheduleKind = OMPC_SCHEDULE_static;
2316 llvm::APInt ChunkSize(32, 1);
2326 bool ForceSimpleCall) {
2328 auto *OMPRegionInfo =
2331 llvm::OpenMPIRBuilder::InsertPointTy AfterIP =
2334 CGF.
Builder.restoreIP(AfterIP);
2347 if (OMPRegionInfo) {
2348 if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) {
2351 OMPRTL___kmpc_cancel_barrier),
2360 CGF.
Builder.CreateCondBr(Cmp, ExitBB, ContBB);
2372 CGM.getModule(), OMPRTL___kmpc_barrier),
2377 Expr *ME,
bool IsFatal) {
2379 : llvm::ConstantPointerNull::get(CGF.
VoidPtrTy);
2382 llvm::Value *Args[] = {
2384 llvm::ConstantInt::get(
CGM.Int32Ty, IsFatal ? 2 : 1),
2385 CGF.
Builder.CreatePointerCast(MVL,
CGM.Int8PtrTy)};
2387 CGM.getModule(), OMPRTL___kmpc_error),
2393 bool Chunked,
bool Ordered) {
2394 switch (ScheduleKind) {
2395 case OMPC_SCHEDULE_static:
2396 return Chunked ? (Ordered ? OMP_ord_static_chunked : OMP_sch_static_chunked)
2397 : (Ordered ? OMP_ord_static : OMP_sch_static);
2398 case OMPC_SCHEDULE_dynamic:
2399 return Ordered ? OMP_ord_dynamic_chunked : OMP_sch_dynamic_chunked;
2400 case OMPC_SCHEDULE_guided:
2401 return Ordered ? OMP_ord_guided_chunked : OMP_sch_guided_chunked;
2402 case OMPC_SCHEDULE_runtime:
2403 return Ordered ? OMP_ord_runtime : OMP_sch_runtime;
2404 case OMPC_SCHEDULE_auto:
2405 return Ordered ? OMP_ord_auto : OMP_sch_auto;
2407 assert(!Chunked &&
"chunk was specified but schedule kind not known");
2408 return Ordered ? OMP_ord_static : OMP_sch_static;
2410 llvm_unreachable(
"Unexpected runtime schedule");
2414static OpenMPSchedType
2417 return Chunked ? OMP_dist_sch_static_chunked : OMP_dist_sch_static;
2421 bool Chunked)
const {
2422 OpenMPSchedType Schedule =
2424 return Schedule == OMP_sch_static;
2430 return Schedule == OMP_dist_sch_static;
2434 bool Chunked)
const {
2435 OpenMPSchedType Schedule =
2437 return Schedule == OMP_sch_static_chunked;
2443 return Schedule == OMP_dist_sch_static_chunked;
2447 OpenMPSchedType Schedule =
2449 assert(Schedule != OMP_sch_static_chunked &&
"cannot be chunked here");
2450 return Schedule != OMP_sch_static;
2458 case OMPC_SCHEDULE_MODIFIER_monotonic:
2459 Modifier = OMP_sch_modifier_monotonic;
2461 case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
2462 Modifier = OMP_sch_modifier_nonmonotonic;
2464 case OMPC_SCHEDULE_MODIFIER_simd:
2465 if (Schedule == OMP_sch_static_chunked)
2466 Schedule = OMP_sch_static_balanced_chunked;
2473 case OMPC_SCHEDULE_MODIFIER_monotonic:
2474 Modifier = OMP_sch_modifier_monotonic;
2476 case OMPC_SCHEDULE_MODIFIER_nonmonotonic:
2477 Modifier = OMP_sch_modifier_nonmonotonic;
2479 case OMPC_SCHEDULE_MODIFIER_simd:
2480 if (Schedule == OMP_sch_static_chunked)
2481 Schedule = OMP_sch_static_balanced_chunked;
2493 if (CGM.
getLangOpts().OpenMP >= 50 && Modifier == 0) {
2494 if (!(Schedule == OMP_sch_static_chunked || Schedule == OMP_sch_static ||
2495 Schedule == OMP_sch_static_balanced_chunked ||
2496 Schedule == OMP_ord_static_chunked || Schedule == OMP_ord_static ||
2497 Schedule == OMP_dist_sch_static_chunked ||
2498 Schedule == OMP_dist_sch_static))
2499 Modifier = OMP_sch_modifier_nonmonotonic;
2501 return Schedule | Modifier;
2511 ScheduleKind.
Schedule, DispatchValues.
Chunk !=
nullptr, Ordered);
2513 (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked &&
2514 Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked &&
2515 Schedule != OMP_sch_static_balanced_chunked));
2522 llvm::Value *Chunk = DispatchValues.
Chunk ? DispatchValues.
Chunk
2523 : CGF.
Builder.getIntN(IVSize, 1);
2524 llvm::Value *Args[] = {
2528 CGM, Schedule, ScheduleKind.
M1, ScheduleKind.
M2)),
2531 CGF.
Builder.getIntN(IVSize, 1),
2548 CodeGenFunction &CGF, llvm::Value *UpdateLocation, llvm::Value *ThreadId,
2549 llvm::FunctionCallee ForStaticInitFunction, OpenMPSchedType Schedule,
2556 assert(Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked ||
2557 Schedule == OMP_sch_static_balanced_chunked ||
2558 Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked ||
2559 Schedule == OMP_dist_sch_static ||
2560 Schedule == OMP_dist_sch_static_chunked);
2567 llvm::Value *Chunk = Values.
Chunk;
2568 if (Chunk ==
nullptr) {
2569 assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static ||
2570 Schedule == OMP_dist_sch_static) &&
2571 "expected static non-chunked schedule");
2575 assert((Schedule == OMP_sch_static_chunked ||
2576 Schedule == OMP_sch_static_balanced_chunked ||
2577 Schedule == OMP_ord_static_chunked ||
2578 Schedule == OMP_dist_sch_static_chunked) &&
2579 "expected static chunked schedule");
2581 llvm::Value *Args[] = {
2604 "Expected loop-based or sections-based directive.");
2607 ? OMP_IDENT_WORK_LOOP
2608 : OMP_IDENT_WORK_SECTIONS);
2610 llvm::FunctionCallee StaticInitFunction =
2615 ScheduleNum, ScheduleKind.
M1, ScheduleKind.
M2, Values);
2622 OpenMPSchedType ScheduleNum =
2624 llvm::Value *UpdatedLocation =
2627 llvm::FunctionCallee StaticInitFunction;
2628 bool isGPUDistribute =
2629 CGM.getLangOpts().OpenMPIsTargetDevice &&
CGM.getTriple().isGPU();
2630 StaticInitFunction =
OMPBuilder.createForStaticInitFunction(
2641 assert((DKind == OMPD_distribute || DKind == OMPD_for ||
2642 DKind == OMPD_sections) &&
2643 "Expected distribute, for, or sections directive kind");
2647 llvm::Value *Args[] = {
2650 (DKind == OMPD_target_teams_loop)
2651 ? OMP_IDENT_WORK_DISTRIBUTE
2653 ? OMP_IDENT_WORK_LOOP
2654 : OMP_IDENT_WORK_SECTIONS),
2658 CGM.getLangOpts().OpenMPIsTargetDevice &&
CGM.getTriple().isGPU())
2661 CGM.getModule(), OMPRTL___kmpc_distribute_static_fini),
2665 CGM.getModule(), OMPRTL___kmpc_for_static_fini),
2690 llvm::Value *Args[] = {
2698 OMPBuilder.createDispatchNextFunction(IVSize, IVSigned), Args);
2705 const Expr *Message,
2708 return llvm::ConstantPointerNull::get(CGF.
VoidPtrTy);
2725 return llvm::ConstantInt::get(
CGM.Int32Ty,
2726 Severity == OMPC_SEVERITY_warning ? 1 : 2);
2749 RuntimeFunction FnID = OMPRTL___kmpc_push_num_threads;
2750 if (Modifier == OMPC_NUMTHREADS_strict) {
2751 FnID = OMPRTL___kmpc_push_num_threads_strict;
2756 OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), FnID), Args);
2760 ProcBindKind ProcBind,
2764 assert(ProcBind != OMP_PROC_BIND_unknown &&
"Unsupported proc_bind value.");
2766 llvm::Value *Args[] = {
2768 llvm::ConstantInt::get(
CGM.IntTy,
unsigned(ProcBind),
true)};
2770 CGM.getModule(), OMPRTL___kmpc_push_proc_bind),
2783 CGM.getModule(), OMPRTL___kmpc_flush),
2790enum KmpTaskTFields {
2817 if (
CGM.getLangOpts().OpenMPSimd ||
OMPBuilder.OffloadInfoManager.empty())
2820 llvm::OpenMPIRBuilder::EmitMetadataErrorReportFunctionTy &&ErrorReportFn =
2821 [
this](llvm::OpenMPIRBuilder::EmitMetadataErrorKind Kind,
2822 const llvm::TargetRegionEntryInfo &EntryInfo) ->
void {
2824 if (Kind != llvm::OpenMPIRBuilder::EMIT_MD_GLOBAL_VAR_LINK_ERROR) {
2825 for (
auto I =
CGM.getContext().getSourceManager().fileinfo_begin(),
2826 E =
CGM.getContext().getSourceManager().fileinfo_end();
2828 if (I->getFirst().getUniqueID().getDevice() == EntryInfo.DeviceID &&
2829 I->getFirst().getUniqueID().getFile() == EntryInfo.FileID) {
2830 Loc =
CGM.getContext().getSourceManager().translateFileLineCol(
2831 I->getFirst(), EntryInfo.Line, 1);
2837 case llvm::OpenMPIRBuilder::EMIT_MD_TARGET_REGION_ERROR: {
2838 unsigned DiagID =
CGM.getDiags().getCustomDiagID(
2840 "%0 is incorrect: either the "
2841 "address or the ID is invalid.");
2842 CGM.getDiags().Report(Loc, DiagID) << EntryInfo.ParentName;
2844 case llvm::OpenMPIRBuilder::EMIT_MD_DECLARE_TARGET_ERROR: {
2845 unsigned DiagID =
CGM.getDiags().getCustomDiagID(
2847 "variable %0 is incorrect: the "
2848 "address is invalid.");
2849 CGM.getDiags().Report(Loc, DiagID) << EntryInfo.ParentName;
2851 case llvm::OpenMPIRBuilder::EMIT_MD_GLOBAL_VAR_LINK_ERROR: {
2852 unsigned DiagID =
CGM.getDiags().getCustomDiagID(
2854 "Offloading entry for declare target variable is incorrect: the "
2855 "address is invalid.");
2856 CGM.getDiags().Report(DiagID);
2861 OMPBuilder.createOffloadEntriesAndInfoMetadata(ErrorReportFn);
2868 QualType KmpRoutineEntryTyArgs[] = {KmpInt32Ty,
C.VoidPtrTy};
2871 C.getFunctionType(KmpInt32Ty, KmpRoutineEntryTyArgs, EPI));
2877struct PrivateHelpersTy {
2878 PrivateHelpersTy(
const Expr *OriginalRef,
const VarDecl *Original,
2880 : OriginalRef(OriginalRef), Original(Original), PrivateCopy(PrivateCopy),
2881 PrivateElemInit(PrivateElemInit) {}
2882 PrivateHelpersTy(
const VarDecl *Original) : Original(Original) {}
2883 const Expr *OriginalRef =
nullptr;
2884 const VarDecl *Original =
nullptr;
2885 const VarDecl *PrivateCopy =
nullptr;
2886 const VarDecl *PrivateElemInit =
nullptr;
2887 bool isLocalPrivate()
const {
2888 return !OriginalRef && !PrivateCopy && !PrivateElemInit;
2891typedef std::pair<CharUnits , PrivateHelpersTy> PrivateDataTy;
2896 if (!CVD->
hasAttr<OMPAllocateDeclAttr>())
2898 const auto *AA = CVD->
getAttr<OMPAllocateDeclAttr>();
2900 return !(AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc &&
2901 !AA->getAllocator());
2906 if (!Privates.empty()) {
2911 RecordDecl *RD =
C.buildImplicitRecord(
".kmp_privates.t");
2913 for (
const auto &Pair : Privates) {
2914 const VarDecl *VD = Pair.second.Original;
2918 if (Pair.second.isLocalPrivate()) {
2941 QualType KmpRoutineEntryPointerQTy) {
2961 CanQualType KmpCmplrdataTy =
C.getCanonicalTagType(UD);
2962 RecordDecl *RD =
C.buildImplicitRecord(
"kmp_task_t");
2992 RecordDecl *RD =
C.buildImplicitRecord(
"kmp_task_t_with_privates");
3012static llvm::Function *
3015 QualType KmpTaskTWithPrivatesPtrQTy,
3017 QualType SharedsPtrTy, llvm::Function *TaskFunction,
3018 llvm::Value *TaskPrivatesMap) {
3026 Args.push_back(&GtidArg);
3027 Args.push_back(&TaskTypeArg);
3028 const auto &TaskEntryFnInfo =
3030 llvm::FunctionType *TaskEntryTy =
3033 auto *TaskEntry = llvm::Function::Create(
3034 TaskEntryTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.
getModule());
3036 TaskEntry->setDoesNotRecurse();
3051 const auto *KmpTaskTWithPrivatesQTyRD =
3056 auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
3058 llvm::Value *PartidParam = PartIdLVal.getPointer(CGF);
3060 auto SharedsFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds);
3066 auto PrivatesFI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
3067 llvm::Value *PrivatesParam;
3068 if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) {
3071 PrivatesLVal.getPointer(CGF), CGF.
VoidPtrTy);
3073 PrivatesParam = llvm::ConstantPointerNull::get(CGF.
VoidPtrTy);
3076 llvm::Value *CommonArgs[] = {
3077 GtidParam, PartidParam, PrivatesParam, TaskPrivatesMap,
3083 std::end(CommonArgs));
3085 auto LBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound);
3088 auto UBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound);
3091 auto StFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTStride);
3094 auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter);
3097 auto RFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTReductions);
3100 CallArgs.push_back(LBParam);
3101 CallArgs.push_back(UBParam);
3102 CallArgs.push_back(StParam);
3103 CallArgs.push_back(LIParam);
3104 CallArgs.push_back(RParam);
3106 CallArgs.push_back(SharedsParam);
3119 QualType KmpTaskTWithPrivatesPtrQTy,
3120 QualType KmpTaskTWithPrivatesQTy) {
3128 Args.push_back(&GtidArg);
3129 Args.push_back(&TaskTypeArg);
3130 const auto &DestructorFnInfo =
3132 llvm::FunctionType *DestructorFnTy =
3136 auto *DestructorFn =
3137 llvm::Function::Create(DestructorFnTy, llvm::GlobalValue::InternalLinkage,
3141 DestructorFn->setDoesNotRecurse();
3149 const auto *KmpTaskTWithPrivatesQTyRD =
3151 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
3153 for (
const auto *Field : FI->getType()->castAsRecordDecl()->fields()) {
3155 Field->getType().isDestructedType()) {
3157 CGF.
pushDestroy(DtorKind, FieldLValue.getAddress(), Field->getType());
3161 return DestructorFn;
3181 C,
nullptr, Loc,
nullptr,
3182 C.getPointerType(PrivatesQTy).withConst().withRestrict(),
3184 Args.push_back(&TaskPrivatesArg);
3185 llvm::DenseMap<CanonicalDeclPtr<const VarDecl>,
unsigned> PrivateVarsPos;
3186 unsigned Counter = 1;
3187 for (
const Expr *E :
Data.PrivateVars) {
3189 C,
nullptr, Loc,
nullptr,
3190 C.getPointerType(
C.getPointerType(E->
getType()))
3195 PrivateVarsPos[VD] = Counter;
3198 for (
const Expr *E :
Data.FirstprivateVars) {
3200 C,
nullptr, Loc,
nullptr,
3201 C.getPointerType(
C.getPointerType(E->
getType()))
3206 PrivateVarsPos[VD] = Counter;
3209 for (
const Expr *E :
Data.LastprivateVars) {
3211 C,
nullptr, Loc,
nullptr,
3212 C.getPointerType(
C.getPointerType(E->
getType()))
3217 PrivateVarsPos[VD] = Counter;
3223 Ty =
C.getPointerType(Ty);
3225 Ty =
C.getPointerType(Ty);
3227 C,
nullptr, Loc,
nullptr,
3228 C.getPointerType(
C.getPointerType(Ty)).withConst().withRestrict(),
3230 PrivateVarsPos[VD] = Counter;
3233 const auto &TaskPrivatesMapFnInfo =
3235 llvm::FunctionType *TaskPrivatesMapTy =
3239 auto *TaskPrivatesMap = llvm::Function::Create(
3240 TaskPrivatesMapTy, llvm::GlobalValue::InternalLinkage, Name,
3243 TaskPrivatesMapFnInfo);
3245 TaskPrivatesMap->removeFnAttr(llvm::Attribute::NoInline);
3246 TaskPrivatesMap->removeFnAttr(llvm::Attribute::OptimizeNone);
3247 TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline);
3251 TaskPrivatesMapFnInfo, Args, Loc, Loc);
3259 for (
const FieldDecl *Field : PrivatesQTyRD->fields()) {
3261 const VarDecl *VD = Args[PrivateVarsPos[Privates[Counter].second.Original]];
3265 RefLVal.getAddress(), RefLVal.getType()->castAs<
PointerType>());
3270 return TaskPrivatesMap;
3276 Address KmpTaskSharedsPtr, LValue TDBase,
3282 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->
field_begin());
3296 if ((!IsTargetTask && !
Data.FirstprivateVars.empty() && ForDup) ||
3297 (IsTargetTask && KmpTaskSharedsPtr.
isValid())) {
3304 FI = FI->getType()->castAsRecordDecl()->field_begin();
3305 for (
const PrivateDataTy &Pair : Privates) {
3307 if (Pair.second.isLocalPrivate()) {
3311 const VarDecl *VD = Pair.second.PrivateCopy;
3316 if (
const VarDecl *Elem = Pair.second.PrivateElemInit) {
3317 const VarDecl *OriginalVD = Pair.second.Original;
3320 LValue SharedRefLValue;
3323 if (IsTargetTask && !SharedField) {
3327 ->getNumParams() == 0 &&
3330 ->getDeclContext()) &&
3331 "Expected artificial target data variable.");
3334 }
else if (ForDup) {
3337 SharedRefLValue.getAddress().withAlignment(
3338 C.getDeclAlign(OriginalVD)),
3340 SharedRefLValue.getTBAAInfo());
3342 Pair.second.Original->getCanonicalDecl()) > 0 ||
3344 SharedRefLValue = CGF.
EmitLValue(Pair.second.OriginalRef);
3347 InlinedOpenMPRegionRAII Region(
3350 SharedRefLValue = CGF.
EmitLValue(Pair.second.OriginalRef);
3361 PrivateLValue.getAddress(), SharedRefLValue.getAddress(),
Type,
3362 [&CGF, Elem,
Init, &CapturesInfo](
Address DestElement,
3365 CodeGenFunction::OMPPrivateScope InitScope(CGF);
3366 InitScope.addPrivate(Elem, SrcElement);
3367 (void)InitScope.Privatize();
3369 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(
3370 CGF, &CapturesInfo);
3371 CGF.EmitAnyExprToMem(Init, DestElement,
3372 Init->getType().getQualifiers(),
3378 InitScope.addPrivate(Elem, SharedRefLValue.getAddress());
3379 (void)InitScope.Privatize();
3395 bool InitRequired =
false;
3396 for (
const PrivateDataTy &Pair : Privates) {
3397 if (Pair.second.isLocalPrivate())
3399 const VarDecl *VD = Pair.second.PrivateCopy;
3401 InitRequired = InitRequired || (isa_and_nonnull<CXXConstructExpr>(
Init) &&
3406 return InitRequired;
3423 QualType KmpTaskTWithPrivatesPtrQTy,
3431 KmpTaskTWithPrivatesPtrQTy,
3434 KmpTaskTWithPrivatesPtrQTy,
3438 Args.push_back(&DstArg);
3439 Args.push_back(&SrcArg);
3440 Args.push_back(&LastprivArg);
3441 const auto &TaskDupFnInfo =
3445 auto *TaskDup = llvm::Function::Create(
3446 TaskDupTy, llvm::GlobalValue::InternalLinkage, Name, &CGM.
getModule());
3448 TaskDup->setDoesNotRecurse();
3458 auto LIFI = std::next(KmpTaskTQTyRD->
field_begin(), KmpTaskTLastIter);
3460 TDBase, *KmpTaskTWithPrivatesQTyRD->
field_begin());
3468 assert(!Privates.empty());
3470 if (!
Data.FirstprivateVars.empty()) {
3475 TDBase, *KmpTaskTWithPrivatesQTyRD->
field_begin());
3483 emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, TDBase, KmpTaskTWithPrivatesQTyRD,
3484 SharedsTy, SharedsPtrTy,
Data, Privates,
true);
3494 for (
const PrivateDataTy &P : Privates) {
3495 if (P.second.isLocalPrivate())
3497 QualType Ty = P.second.Original->getType().getNonReferenceType();
3506class OMPIteratorGeneratorScope final
3508 CodeGenFunction &CGF;
3509 const OMPIteratorExpr *E =
nullptr;
3510 SmallVector<CodeGenFunction::JumpDest, 4> ContDests;
3511 SmallVector<CodeGenFunction::JumpDest, 4> ExitDests;
3512 OMPIteratorGeneratorScope() =
delete;
3513 OMPIteratorGeneratorScope(OMPIteratorGeneratorScope &) =
delete;
3516 OMPIteratorGeneratorScope(CodeGenFunction &CGF,
const OMPIteratorExpr *E)
3517 : CodeGenFunction::OMPPrivateScope(CGF), CGF(CGF), E(E) {
3520 SmallVector<llvm::Value *, 4> Uppers;
3522 Uppers.push_back(CGF.EmitScalarExpr(E->getHelper(I).Upper));
3523 const auto *VD = cast<VarDecl>(E->getIteratorDecl(I));
3524 addPrivate(VD, CGF.CreateMemTemp(VD->getType(), VD->getName()));
3525 const OMPIteratorHelperData &HelperData = E->getHelper(I);
3527 HelperData.CounterVD,
3528 CGF.CreateMemTemp(HelperData.CounterVD->getType(),
"counter.addr"));
3533 const OMPIteratorHelperData &HelperData = E->getHelper(I);
3535 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(HelperData.CounterVD),
3536 HelperData.CounterVD->getType());
3538 CGF.EmitStoreOfScalar(
3539 llvm::ConstantInt::get(CLVal.getAddress().getElementType(), 0),
3541 CodeGenFunction::JumpDest &ContDest =
3542 ContDests.emplace_back(CGF.getJumpDestInCurrentScope(
"iter.cont"));
3543 CodeGenFunction::JumpDest &ExitDest =
3544 ExitDests.emplace_back(CGF.getJumpDestInCurrentScope(
"iter.exit"));
3546 llvm::Value *N = Uppers[I];
3549 CGF.EmitBlock(ContDest.getBlock());
3551 CGF.EmitLoadOfScalar(CLVal, HelperData.CounterVD->getLocation());
3553 HelperData.CounterVD->getType()->isSignedIntegerOrEnumerationType()
3554 ? CGF.Builder.CreateICmpSLT(CVal, N)
3555 : CGF.Builder.CreateICmpULT(CVal, N);
3556 llvm::BasicBlock *BodyBB = CGF.createBasicBlock(
"iter.body");
3557 CGF.Builder.CreateCondBr(Cmp, BodyBB, ExitDest.getBlock());
3559 CGF.EmitBlock(BodyBB);
3561 CGF.EmitIgnoredExpr(HelperData.Update);
3564 ~OMPIteratorGeneratorScope() {
3569 const OMPIteratorHelperData &HelperData = E->
getHelper(I - 1);
3574 CGF.
EmitBlock(ExitDests[I - 1].getBlock(), I == 1);
3580static std::pair<llvm::Value *, llvm::Value *>
3582 const auto *OASE = dyn_cast<OMPArrayShapingExpr>(E);
3585 const Expr *
Base = OASE->getBase();
3590 llvm::Value *SizeVal;
3593 SizeVal = CGF.
getTypeSize(OASE->getBase()->getType()->getPointeeType());
3594 for (
const Expr *SE : OASE->getDimensions()) {
3598 SizeVal = CGF.
Builder.CreateNUWMul(SizeVal, Sz);
3600 }
else if (
const auto *ASE =
3603 Address UpAddrAddress = UpAddrLVal.getAddress();
3604 llvm::Value *UpAddr = CGF.
Builder.CreateConstGEP1_32(
3608 llvm::Value *UpIntPtr = CGF.
Builder.CreatePtrToInt(UpAddr, CGF.
SizeTy);
3609 SizeVal = CGF.
Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
3613 return std::make_pair(
Addr, SizeVal);
3618 QualType FlagsTy =
C.getIntTypeForBitwidth(32,
false);
3619 if (KmpTaskAffinityInfoTy.
isNull()) {
3621 C.buildImplicitRecord(
"kmp_task_affinity_info_t");
3627 KmpTaskAffinityInfoTy =
C.getCanonicalTagType(KmpAffinityInfoRD);
3634 llvm::Function *TaskFunction,
QualType SharedsTy,
3639 const auto *I =
Data.PrivateCopies.begin();
3640 for (
const Expr *E :
Data.PrivateVars) {
3642 Privates.emplace_back(
3648 I =
Data.FirstprivateCopies.begin();
3649 const auto *IElemInitRef =
Data.FirstprivateInits.begin();
3650 for (
const Expr *E :
Data.FirstprivateVars) {
3652 Privates.emplace_back(
3660 I =
Data.LastprivateCopies.begin();
3661 for (
const Expr *E :
Data.LastprivateVars) {
3663 Privates.emplace_back(
3671 Privates.emplace_back(
CGM.getPointerAlign(), PrivateHelpersTy(VD));
3673 Privates.emplace_back(
C.getDeclAlign(VD), PrivateHelpersTy(VD));
3675 llvm::stable_sort(Privates,
3676 [](
const PrivateDataTy &L,
const PrivateDataTy &R) {
3677 return L.first > R.first;
3679 QualType KmpInt32Ty =
C.getIntTypeForBitwidth(32, 1);
3690 assert((D.getDirectiveKind() == OMPD_task ||
3693 "Expected taskloop, task or target directive");
3700 const auto *KmpTaskTQTyRD =
KmpTaskTQTy->castAsRecordDecl();
3702 const RecordDecl *KmpTaskTWithPrivatesQTyRD =
3705 C.getCanonicalTagType(KmpTaskTWithPrivatesQTyRD);
3706 QualType KmpTaskTWithPrivatesPtrQTy =
3707 C.getPointerType(KmpTaskTWithPrivatesQTy);
3708 llvm::Type *KmpTaskTWithPrivatesPtrTy = CGF.
Builder.getPtrTy(0);
3709 llvm::Value *KmpTaskTWithPrivatesTySize =
3711 QualType SharedsPtrTy =
C.getPointerType(SharedsTy);
3714 llvm::Value *TaskPrivatesMap =
nullptr;
3715 llvm::Type *TaskPrivatesMapTy =
3716 std::next(TaskFunction->arg_begin(), 3)->getType();
3717 if (!Privates.empty()) {
3718 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->
field_begin());
3722 TaskPrivatesMap, TaskPrivatesMapTy);
3724 TaskPrivatesMap = llvm::ConstantPointerNull::get(
3730 CGM, Loc, D.getDirectiveKind(), KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
3731 KmpTaskTWithPrivatesQTy,
KmpTaskTQTy, SharedsPtrTy, TaskFunction,
3743 DestructorsFlag = 0x8,
3744 PriorityFlag = 0x20,
3745 DetachableFlag = 0x40,
3747 unsigned Flags =
Data.Tied ? TiedFlag : 0;
3748 bool NeedsCleanup =
false;
3749 if (!Privates.empty()) {
3753 Flags = Flags | DestructorsFlag;
3755 if (
Data.Priority.getInt())
3756 Flags = Flags | PriorityFlag;
3758 Flags = Flags | DetachableFlag;
3759 llvm::Value *TaskFlags =
3760 Data.Final.getPointer()
3761 ? CGF.
Builder.CreateSelect(
Data.Final.getPointer(),
3762 CGF.
Builder.getInt32(FinalFlag),
3764 : CGF.
Builder.getInt32(
Data.Final.getInt() ? FinalFlag : 0);
3765 TaskFlags = CGF.
Builder.CreateOr(TaskFlags, CGF.
Builder.getInt32(Flags));
3766 llvm::Value *SharedsSize =
CGM.getSize(
C.getTypeSizeInChars(SharedsTy));
3768 getThreadID(CGF, Loc), TaskFlags, KmpTaskTWithPrivatesTySize,
3771 llvm::Value *NewTask;
3778 llvm::Value *DeviceID;
3783 DeviceID = CGF.
Builder.getInt64(OMP_DEVICEID_UNDEF);
3784 AllocArgs.push_back(DeviceID);
3787 CGM.getModule(), OMPRTL___kmpc_omp_target_task_alloc),
3792 CGM.getModule(), OMPRTL___kmpc_omp_task_alloc),
3805 llvm::Value *Tid =
getThreadID(CGF, DC->getBeginLoc());
3806 Tid = CGF.
Builder.CreateIntCast(Tid, CGF.
IntTy,
false);
3809 CGM.getModule(), OMPRTL___kmpc_task_allow_completion_event),
3810 {Loc, Tid, NewTask});
3821 llvm::Value *NumOfElements =
nullptr;
3822 unsigned NumAffinities = 0;
3824 if (
const Expr *Modifier =
C->getModifier()) {
3826 for (
unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
3830 NumOfElements ? CGF.
Builder.CreateNUWMul(NumOfElements, Sz) : Sz;
3833 NumAffinities +=
C->varlist_size();
3838 enum RTLAffinityInfoFieldsTy { BaseAddr, Len, Flags };
3840 QualType KmpTaskAffinityInfoArrayTy;
3841 if (NumOfElements) {
3842 NumOfElements = CGF.
Builder.CreateNUWAdd(
3843 llvm::ConstantInt::get(CGF.
SizeTy, NumAffinities), NumOfElements);
3846 C.getIntTypeForBitwidth(
C.getTypeSize(
C.getSizeType()), 0),
3850 KmpTaskAffinityInfoArrayTy =
C.getVariableArrayType(
3858 NumOfElements = CGF.
Builder.CreateIntCast(NumOfElements, CGF.
Int32Ty,
3861 KmpTaskAffinityInfoArrayTy =
C.getConstantArrayType(
3863 llvm::APInt(
C.getTypeSize(
C.getSizeType()), NumAffinities),
nullptr,
3866 CGF.
CreateMemTemp(KmpTaskAffinityInfoArrayTy,
".affs.arr.addr");
3868 NumOfElements = llvm::ConstantInt::get(
CGM.Int32Ty, NumAffinities,
3875 bool HasIterator =
false;
3877 if (
C->getModifier()) {
3881 for (
const Expr *E :
C->varlist()) {
3890 Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr));
3895 Base, *std::next(KmpAffinityInfoRD->field_begin(), Len));
3909 const Expr *Modifier =
C->getModifier();
3912 OMPIteratorGeneratorScope IteratorScope(
3914 for (
const Expr *E :
C->varlist()) {
3924 Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr));
3929 Base, *std::next(KmpAffinityInfoRD->field_begin(), Len));
3931 Idx = CGF.
Builder.CreateNUWAdd(
3932 Idx, llvm::ConstantInt::get(Idx->getType(), 1));
3947 CGM.getModule(), OMPRTL___kmpc_omp_reg_task_with_affinity),
3948 {LocRef, GTid, NewTask, NumOfElements, AffinListPtr});
3950 llvm::Value *NewTaskNewTaskTTy =
3952 NewTask, KmpTaskTWithPrivatesPtrTy);
3954 KmpTaskTWithPrivatesQTy);
3965 *std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds)),
3967 CGF.
Int8Ty,
CGM.getNaturalTypeAlignment(SharedsTy));
3974 if (!Privates.empty()) {
3976 SharedsTy, SharedsPtrTy,
Data, Privates,
3981 CGM, Loc, D, KmpTaskTWithPrivatesPtrQTy, KmpTaskTWithPrivatesQTyRD,
3982 KmpTaskTQTyRD, SharedsTy, SharedsPtrTy,
Data, Privates,
3983 !
Data.LastprivateVars.empty());
3987 enum { Priority = 0, Destructors = 1 };
3989 auto FI = std::next(KmpTaskTQTyRD->field_begin(), Data1);
3990 const auto *KmpCmplrdataUD = (*FI)->getType()->castAsRecordDecl();
3991 assert(KmpCmplrdataUD->isUnion());
3994 CGM, Loc, KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
3995 KmpTaskTWithPrivatesQTy);
3998 Data1LV, *std::next(KmpCmplrdataUD->field_begin(), Destructors));
4004 if (
Data.Priority.getInt()) {
4006 TDBase, *std::next(KmpTaskTQTyRD->field_begin(), Data2));
4008 Data2LV, *std::next(KmpCmplrdataUD->field_begin(), Priority));
4011 Result.NewTask = NewTask;
4012 Result.TaskEntry = TaskEntry;
4013 Result.NewTaskNewTaskTTy = NewTaskNewTaskTTy;
4015 Result.KmpTaskTQTyRD = KmpTaskTQTyRD;
4021 RTLDependenceKindTy DepKind;
4023 case OMPC_DEPEND_in:
4024 DepKind = RTLDependenceKindTy::DepIn;
4027 case OMPC_DEPEND_out:
4028 case OMPC_DEPEND_inout:
4029 DepKind = RTLDependenceKindTy::DepInOut;
4031 case OMPC_DEPEND_mutexinoutset:
4032 DepKind = RTLDependenceKindTy::DepMutexInOutSet;
4034 case OMPC_DEPEND_inoutset:
4035 DepKind = RTLDependenceKindTy::DepInOutSet;
4037 case OMPC_DEPEND_outallmemory:
4038 DepKind = RTLDependenceKindTy::DepOmpAllMem;
4040 case OMPC_DEPEND_source:
4041 case OMPC_DEPEND_sink:
4042 case OMPC_DEPEND_depobj:
4043 case OMPC_DEPEND_inoutallmemory:
4045 llvm_unreachable(
"Unknown task dependence type");
4053 FlagsTy =
C.getIntTypeForBitwidth(
C.getTypeSize(
C.BoolTy),
false);
4054 if (KmpDependInfoTy.
isNull()) {
4055 RecordDecl *KmpDependInfoRD =
C.buildImplicitRecord(
"kmp_depend_info");
4061 KmpDependInfoTy =
C.getCanonicalTagType(KmpDependInfoRD);
4065std::pair<llvm::Value *, LValue>
4078 CGF,
Base.getAddress(),
4079 llvm::ConstantInt::get(CGF.
IntPtrTy, -1,
true));
4085 *std::next(KmpDependInfoRD->field_begin(),
4086 static_cast<unsigned int>(RTLDependInfoFields::BaseAddr)));
4088 return std::make_pair(NumDeps,
Base);
4092 llvm::PointerUnion<unsigned *, LValue *> Pos,
4102 OMPIteratorGeneratorScope IteratorScope(
4103 CGF, cast_or_null<OMPIteratorExpr>(
4104 Data.IteratorExpr ?
Data.IteratorExpr->IgnoreParenImpCasts()
4106 for (
const Expr *E :
Data.DepExprs) {
4116 Size = llvm::ConstantInt::get(CGF.
SizeTy, 0);
4119 if (
unsigned *P = dyn_cast<unsigned *>(Pos)) {
4123 assert(E &&
"Expected a non-null expression");
4132 *std::next(KmpDependInfoRD->field_begin(),
4133 static_cast<unsigned int>(RTLDependInfoFields::BaseAddr)));
4137 Base, *std::next(KmpDependInfoRD->field_begin(),
4138 static_cast<unsigned int>(RTLDependInfoFields::Len)));
4144 *std::next(KmpDependInfoRD->field_begin(),
4145 static_cast<unsigned int>(RTLDependInfoFields::Flags)));
4147 llvm::ConstantInt::get(LLVMFlagsTy,
static_cast<unsigned int>(DepKind)),
4149 if (
unsigned *P = dyn_cast<unsigned *>(Pos)) {
4154 Idx = CGF.
Builder.CreateNUWAdd(Idx,
4155 llvm::ConstantInt::get(Idx->getType(), 1));
4164 assert(
Data.DepKind == OMPC_DEPEND_depobj &&
4165 "Expected depobj dependency kind.");
4170 OMPIteratorGeneratorScope IteratorScope(
4171 CGF, cast_or_null<OMPIteratorExpr>(
4172 Data.IteratorExpr ?
Data.IteratorExpr->IgnoreParenImpCasts()
4174 for (
const Expr *E :
Data.DepExprs) {
4175 llvm::Value *NumDeps;
4178 std::tie(NumDeps,
Base) =
4182 C.getUIntPtrType());
4186 llvm::Value *Add = CGF.
Builder.CreateNUWAdd(PrevVal, NumDeps);
4188 SizeLVals.push_back(NumLVal);
4191 for (
unsigned I = 0, E = SizeLVals.size(); I < E; ++I) {
4194 Sizes.push_back(Size);
4204 assert(
Data.DepKind == OMPC_DEPEND_depobj &&
4205 "Expected depobj dependency kind.");
4208 OMPIteratorGeneratorScope IteratorScope(
4209 CGF, cast_or_null<OMPIteratorExpr>(
4210 Data.IteratorExpr ?
Data.IteratorExpr->IgnoreParenImpCasts()
4212 for (
const Expr *E :
Data.DepExprs) {
4213 llvm::Value *NumDeps;
4216 std::tie(NumDeps,
Base) =
4220 llvm::Value *Size = CGF.
Builder.CreateNUWMul(
4229 llvm::Value *Add = CGF.
Builder.CreateNUWAdd(Pos, NumDeps);
4245 llvm::Value *NumOfElements =
nullptr;
4246 unsigned NumDependencies = std::accumulate(
4247 Dependencies.begin(), Dependencies.end(), 0,
4249 return D.DepKind == OMPC_DEPEND_depobj
4251 : (V + (D.IteratorExpr ? 0 : D.DepExprs.size()));
4255 bool HasDepobjDeps =
false;
4256 bool HasRegularWithIterators =
false;
4257 llvm::Value *NumOfDepobjElements = llvm::ConstantInt::get(CGF.
IntPtrTy, 0);
4258 llvm::Value *NumOfRegularWithIterators =
4259 llvm::ConstantInt::get(CGF.
IntPtrTy, 0);
4263 if (D.
DepKind == OMPC_DEPEND_depobj) {
4266 for (llvm::Value *Size : Sizes) {
4267 NumOfDepobjElements =
4268 CGF.
Builder.CreateNUWAdd(NumOfDepobjElements, Size);
4270 HasDepobjDeps =
true;
4275 if (
const auto *IE = cast_or_null<OMPIteratorExpr>(D.
IteratorExpr)) {
4276 llvm::Value *ClauseIteratorSpace =
4277 llvm::ConstantInt::get(CGF.
IntPtrTy, 1);
4281 ClauseIteratorSpace = CGF.
Builder.CreateNUWMul(Sz, ClauseIteratorSpace);
4283 llvm::Value *NumClauseDeps = CGF.
Builder.CreateNUWMul(
4284 ClauseIteratorSpace,
4286 NumOfRegularWithIterators =
4287 CGF.
Builder.CreateNUWAdd(NumOfRegularWithIterators, NumClauseDeps);
4288 HasRegularWithIterators =
true;
4294 if (HasDepobjDeps || HasRegularWithIterators) {
4295 NumOfElements = llvm::ConstantInt::get(
CGM.IntPtrTy, NumDependencies,
4297 if (HasDepobjDeps) {
4299 CGF.
Builder.CreateNUWAdd(NumOfDepobjElements, NumOfElements);
4301 if (HasRegularWithIterators) {
4303 CGF.
Builder.CreateNUWAdd(NumOfRegularWithIterators, NumOfElements);
4306 Loc,
C.getIntTypeForBitwidth(64, 0),
4310 KmpDependInfoArrayTy =
4319 NumOfElements = CGF.
Builder.CreateIntCast(NumOfElements, CGF.
Int32Ty,
4322 KmpDependInfoArrayTy =
C.getConstantArrayType(
4328 NumOfElements = llvm::ConstantInt::get(
CGM.Int32Ty, NumDependencies,
4333 if (Dep.DepKind == OMPC_DEPEND_depobj || Dep.IteratorExpr)
4339 CGF.
CreateMemTemp(
C.getSizeType(),
"dep.counter.addr"),
C.getSizeType());
4342 if (Dep.DepKind == OMPC_DEPEND_depobj || !Dep.IteratorExpr)
4347 if (HasDepobjDeps) {
4349 if (Dep.DepKind != OMPC_DEPEND_depobj)
4356 return std::make_pair(NumOfElements, DependenciesArray);
4367 unsigned NumDependencies = Dependencies.
DepExprs.size();
4377 llvm::Value *NumDepsVal;
4379 if (
const auto *IE =
4380 cast_or_null<OMPIteratorExpr>(Dependencies.
IteratorExpr)) {
4381 NumDepsVal = llvm::ConstantInt::get(CGF.
SizeTy, 1);
4385 NumDepsVal = CGF.
Builder.CreateNUWMul(NumDepsVal, Sz);
4387 Size = CGF.
Builder.CreateNUWAdd(llvm::ConstantInt::get(CGF.
SizeTy, 1),
4391 llvm::Value *RecSize =
CGM.getSize(SizeInBytes);
4392 Size = CGF.
Builder.CreateNUWMul(Size, RecSize);
4396 QualType KmpDependInfoArrayTy =
C.getConstantArrayType(
4399 CharUnits Sz =
C.getTypeSizeInChars(KmpDependInfoArrayTy);
4401 NumDepsVal = llvm::ConstantInt::get(CGF.
IntPtrTy, NumDependencies);
4406 llvm::Value *Allocator = llvm::ConstantPointerNull::get(CGF.
VoidPtrTy);
4407 llvm::Value *Args[] = {ThreadID, Size, Allocator};
4411 CGM.getModule(), OMPRTL___kmpc_alloc),
4412 Args,
".dep.arr.addr");
4416 DependenciesArray =
Address(
Addr, KmpDependInfoLlvmTy, Align);
4422 *std::next(KmpDependInfoRD->field_begin(),
4423 static_cast<unsigned int>(RTLDependInfoFields::BaseAddr)));
4425 llvm::PointerUnion<unsigned *, LValue *> Pos;
4442 return DependenciesArray;
4457 Addr.getElementType(),
Addr.emitRawPointer(CGF),
4458 llvm::ConstantInt::get(CGF.
IntPtrTy, -1,
true));
4463 llvm::Value *Allocator = llvm::ConstantPointerNull::get(CGF.
VoidPtrTy);
4464 llvm::Value *Args[] = {ThreadID, DepObjAddr, Allocator};
4468 CGM.getModule(), OMPRTL___kmpc_free),
4480 llvm::Value *NumDeps;
4491 llvm::BasicBlock *EntryBB = CGF.
Builder.GetInsertBlock();
4493 llvm::PHINode *ElementPHI =
4498 Base.getTBAAInfo());
4502 Base, *std::next(KmpDependInfoRD->field_begin(),
4503 static_cast<unsigned int>(RTLDependInfoFields::Flags)));
4505 llvm::ConstantInt::get(LLVMFlagsTy,
static_cast<unsigned int>(DepKind)),
4509 llvm::Value *ElementNext =
4512 ElementPHI->addIncoming(ElementNext, CGF.
Builder.GetInsertBlock());
4513 llvm::Value *IsEmpty =
4514 CGF.
Builder.CreateICmpEQ(ElementNext, End,
"omp.isempty");
4515 CGF.
Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
4522 llvm::Function *TaskFunction,
4531 llvm::Value *NewTask =
Result.NewTask;
4532 llvm::Function *TaskEntry =
Result.TaskEntry;
4533 llvm::Value *NewTaskNewTaskTTy =
Result.NewTaskNewTaskTTy;
4538 llvm::Value *NumOfElements;
4539 std::tie(NumOfElements, DependenciesArray) =
4550 llvm::Value *TaskArgs[] = { UpLoc, ThreadID, NewTask };
4551 llvm::Value *DepTaskArgs[7];
4552 if (!
Data.Dependences.empty()) {
4553 DepTaskArgs[0] = UpLoc;
4554 DepTaskArgs[1] = ThreadID;
4555 DepTaskArgs[2] = NewTask;
4556 DepTaskArgs[3] = NumOfElements;
4558 DepTaskArgs[5] = CGF.
Builder.getInt32(0);
4559 DepTaskArgs[6] = llvm::ConstantPointerNull::get(CGF.
VoidPtrTy);
4561 auto &&ThenCodeGen = [
this, &
Data, TDBase, KmpTaskTQTyRD, &TaskArgs,
4564 auto PartIdFI = std::next(KmpTaskTQTyRD->
field_begin(), KmpTaskTPartId);
4568 if (!
Data.Dependences.empty()) {
4571 CGM.getModule(), OMPRTL___kmpc_omp_task_with_deps),
4575 CGM.getModule(), OMPRTL___kmpc_omp_task),
4581 Region->emitUntiedSwitch(CGF);
4584 llvm::Value *DepWaitTaskArgs[7];
4585 if (!
Data.Dependences.empty()) {
4586 DepWaitTaskArgs[0] = UpLoc;
4587 DepWaitTaskArgs[1] = ThreadID;
4588 DepWaitTaskArgs[2] = NumOfElements;
4590 DepWaitTaskArgs[4] = CGF.
Builder.getInt32(0);
4591 DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.
VoidPtrTy);
4592 DepWaitTaskArgs[6] =
4593 llvm::ConstantInt::get(CGF.
Int32Ty,
Data.HasNowaitClause);
4595 auto &M =
CGM.getModule();
4596 auto &&ElseCodeGen = [
this, &M, &TaskArgs, ThreadID, NewTaskNewTaskTTy,
4597 TaskEntry, &
Data, &DepWaitTaskArgs,
4604 if (!
Data.Dependences.empty())
4606 M, OMPRTL___kmpc_omp_taskwait_deps_51),
4609 auto &&
CodeGen = [TaskEntry, ThreadID, NewTaskNewTaskTTy,
4612 llvm::Value *OutlinedFnArgs[] = {ThreadID, NewTaskNewTaskTTy};
4613 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskEntry,
4622 CommonActionTy Action(
OMPBuilder.getOrCreateRuntimeFunction(
4623 M, OMPRTL___kmpc_omp_task_begin_if0),
4626 M, OMPRTL___kmpc_omp_task_complete_if0),
4642 llvm::Function *TaskFunction,
4662 IfVal = llvm::ConstantInt::getSigned(CGF.
IntTy, 1);
4667 *std::next(
Result.KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound));
4674 *std::next(
Result.KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound));
4681 *std::next(
Result.KmpTaskTQTyRD->field_begin(), KmpTaskTStride));
4689 *std::next(
Result.KmpTaskTQTyRD->field_begin(), KmpTaskTReductions));
4690 if (
Data.Reductions) {
4696 enum { NoSchedule = 0, Grainsize = 1, NumTasks = 2 };
4705 llvm::ConstantInt::getSigned(
4707 llvm::ConstantInt::getSigned(
4709 ?
Data.Schedule.getInt() ? NumTasks : Grainsize
4711 Data.Schedule.getPointer()
4714 : llvm::ConstantInt::get(CGF.
Int64Ty, 0)};
4715 if (
Data.HasModifier)
4716 TaskArgs.push_back(llvm::ConstantInt::get(CGF.
Int32Ty, 1));
4718 TaskArgs.push_back(
Result.TaskDupFn
4721 : llvm::ConstantPointerNull::get(CGF.
VoidPtrTy));
4723 CGM.getModule(),
Data.HasModifier
4724 ? OMPRTL___kmpc_taskloop_5
4725 : OMPRTL___kmpc_taskloop),
4742 const Expr *,
const Expr *)> &RedOpGen,
4743 const Expr *XExpr =
nullptr,
const Expr *EExpr =
nullptr,
4744 const Expr *UpExpr =
nullptr) {
4752 llvm::Value *NumElements = CGF.
emitArrayLength(ArrayTy, ElementTy, LHSAddr);
4757 llvm::Value *LHSEnd =
4762 llvm::Value *IsEmpty =
4763 CGF.
Builder.CreateICmpEQ(LHSBegin, LHSEnd,
"omp.arraycpy.isempty");
4764 CGF.
Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
4767 llvm::BasicBlock *EntryBB = CGF.
Builder.GetInsertBlock();
4772 llvm::PHINode *RHSElementPHI = CGF.
Builder.CreatePHI(
4773 RHSBegin->getType(), 2,
"omp.arraycpy.srcElementPast");
4774 RHSElementPHI->addIncoming(RHSBegin, EntryBB);
4779 llvm::PHINode *LHSElementPHI = CGF.
Builder.CreatePHI(
4780 LHSBegin->getType(), 2,
"omp.arraycpy.destElementPast");
4781 LHSElementPHI->addIncoming(LHSBegin, EntryBB);
4788 Scope.addPrivate(LHSVar, LHSElementCurrent);
4789 Scope.addPrivate(RHSVar, RHSElementCurrent);
4791 RedOpGen(CGF, XExpr, EExpr, UpExpr);
4792 Scope.ForceCleanup();
4795 llvm::Value *LHSElementNext = CGF.
Builder.CreateConstGEP1_32(
4797 "omp.arraycpy.dest.element");
4798 llvm::Value *RHSElementNext = CGF.
Builder.CreateConstGEP1_32(
4800 "omp.arraycpy.src.element");
4803 CGF.
Builder.CreateICmpEQ(LHSElementNext, LHSEnd,
"omp.arraycpy.done");
4804 CGF.
Builder.CreateCondBr(Done, DoneBB, BodyBB);
4805 LHSElementPHI->addIncoming(LHSElementNext, CGF.
Builder.GetInsertBlock());
4806 RHSElementPHI->addIncoming(RHSElementNext, CGF.
Builder.GetInsertBlock());
4816 const Expr *ReductionOp) {
4817 if (
const auto *CE = dyn_cast<CallExpr>(ReductionOp))
4818 if (
const auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee()))
4819 if (
const auto *DRE =
4820 dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts()))
4821 if (
const auto *DRD =
4822 dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl())) {
4823 std::pair<llvm::Function *, llvm::Function *>
Reduction =
4834 StringRef ReducerName,
SourceLocation Loc, llvm::Type *ArgsElemType,
4845 Args.push_back(&LHSArg);
4846 Args.push_back(&RHSArg);
4848 CGM.getTypes().arrangeBuiltinFunctionDeclaration(
C.VoidTy, Args);
4850 auto *Fn = llvm::Function::Create(
CGM.getTypes().GetFunctionType(CGFI),
4851 llvm::GlobalValue::InternalLinkage, Name,
4854 Fn->setDoesNotRecurse();
4873 const auto *IPriv = Privates.begin();
4875 for (
unsigned I = 0, E = ReductionOps.size(); I < E; ++I, ++IPriv, ++Idx) {
4876 const auto *RHSVar =
4879 const auto *LHSVar =
4882 QualType PrivTy = (*IPriv)->getType();
4897 IPriv = Privates.begin();
4898 const auto *ILHS = LHSExprs.begin();
4899 const auto *IRHS = RHSExprs.begin();
4900 for (
const Expr *E : ReductionOps) {
4901 if ((*IPriv)->getType()->isArrayType()) {
4906 CGF, (*IPriv)->getType(), LHSVar, RHSVar,
4908 emitReductionCombiner(CGF, E);
4918 Scope.ForceCleanup();
4924 const Expr *ReductionOp,
4925 const Expr *PrivateRef,
4933 CGF, PrivateRef->
getType(), LHSVar, RHSVar,
4935 emitReductionCombiner(CGF, ReductionOp);
4944 llvm::StringRef Prefix,
const Expr *Ref);
4948 const Expr *LHSExprs,
const Expr *RHSExprs,
const Expr *ReductionOps) {
4975 std::string ReductionVarNameStr;
4977 ReductionVarNameStr =
4980 ReductionVarNameStr =
"unnamed_priv_var";
4983 std::string SharedName =
4984 CGM.getOpenMPRuntime().getName({
"internal_pivate_", ReductionVarNameStr});
4985 llvm::GlobalVariable *SharedVar =
OMPBuilder.getOrCreateInternalVariable(
4986 LLVMType,
".omp.reduction." + SharedName);
4988 SharedVar->setAlignment(
4996 llvm::Value *BarrierArgs[] = {BarrierLoc, ThreadId};
5001 llvm::Value *IsWorker = CGF.
Builder.CreateICmpEQ(
5002 ThreadId, llvm::ConstantInt::get(ThreadId->getType(), 0));
5003 CGF.
Builder.CreateCondBr(IsWorker, InitBB, InitEndBB);
5007 auto EmitSharedInit = [&]() {
5010 std::pair<llvm::Function *, llvm::Function *> FnPair =
5012 llvm::Function *InitializerFn = FnPair.second;
5013 if (InitializerFn) {
5014 if (
const auto *CE =
5015 dyn_cast<CallExpr>(UDRInitExpr->IgnoreParenImpCasts())) {
5022 LocalScope.addPrivate(OutVD, SharedResult);
5024 (void)LocalScope.Privatize();
5025 if (
const auto *OVE = dyn_cast<OpaqueValueExpr>(
5026 CE->getCallee()->IgnoreParenImpCasts())) {
5052 if (
const auto *DRE = dyn_cast<DeclRefExpr>(Privates)) {
5053 if (
const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
5064 CGF.
Builder.CreateBr(InitEndBB);
5068 CGM.getModule(), OMPRTL___kmpc_barrier),
5071 const Expr *ReductionOp = ReductionOps;
5076 auto EmitCriticalReduction = [&](
auto ReductionGen) {
5077 std::string CriticalName =
getName({
"reduction_critical"});
5085 std::pair<llvm::Function *, llvm::Function *> FnPair =
5088 if (
const auto *CE = dyn_cast<CallExpr>(ReductionOp)) {
5100 (void)LocalScope.Privatize();
5105 EmitCriticalReduction(ReductionGen);
5110 if (
const auto *Cleanup = dyn_cast<ExprWithCleanups>(ReductionClauseExpr))
5113 const Expr *AssignRHS =
nullptr;
5114 if (
const auto *BinOp = dyn_cast<BinaryOperator>(ReductionClauseExpr)) {
5115 if (BinOp->getOpcode() == BO_Assign)
5116 AssignRHS = BinOp->getRHS();
5117 }
else if (
const auto *OpCall =
5118 dyn_cast<CXXOperatorCallExpr>(ReductionClauseExpr)) {
5119 if (OpCall->getOperator() == OO_Equal)
5120 AssignRHS = OpCall->getArg(1);
5124 "Private Variable Reduction : Invalid ReductionOp expression");
5129 const auto *OmpOutDRE =
5131 const auto *OmpInDRE =
5134 OmpOutDRE && OmpInDRE &&
5135 "Private Variable Reduction : LHSExpr/RHSExpr must be DeclRefExprs");
5139 LocalScope.addPrivate(OmpOutVD, SharedLV.
getAddress());
5140 LocalScope.addPrivate(OmpInVD, LHSLV.
getAddress());
5141 (void)LocalScope.Privatize();
5145 EmitCriticalReduction(ReductionGen);
5149 CGM.getModule(), OMPRTL___kmpc_barrier),
5155 llvm::Value *FinalResultVal =
nullptr;
5159 FinalResultAddr = SharedResult;
5173 CGM.getModule(), OMPRTL___kmpc_barrier),
5184 EmitCriticalReduction(OriginalListCombiner);
5236 if (SimpleReduction) {
5238 const auto *IPriv = OrgPrivates.begin();
5239 const auto *ILHS = OrgLHSExprs.begin();
5240 const auto *IRHS = OrgRHSExprs.begin();
5241 for (
const Expr *E : OrgReductionOps) {
5254 FilteredRHSExprs, FilteredReductionOps;
5255 for (
unsigned I : llvm::seq<unsigned>(
5256 std::min(OrgReductionOps.size(), OrgLHSExprs.size()))) {
5258 FilteredPrivates.emplace_back(OrgPrivates[I]);
5259 FilteredLHSExprs.emplace_back(OrgLHSExprs[I]);
5260 FilteredRHSExprs.emplace_back(OrgRHSExprs[I]);
5261 FilteredReductionOps.emplace_back(OrgReductionOps[I]);
5273 auto Size = RHSExprs.size();
5274 for (
const Expr *E : Privates) {
5279 llvm::APInt ArraySize(32, Size);
5280 QualType ReductionArrayTy =
C.getConstantArrayType(
5284 CGF.
CreateMemTemp(ReductionArrayTy,
".omp.reduction.red_list");
5285 const auto *IPriv = Privates.begin();
5287 for (
unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
5293 if ((*IPriv)->getType()->isVariablyModifiedType()) {
5297 llvm::Value *Size = CGF.
Builder.CreateIntCast(
5310 Privates, LHSExprs, RHSExprs, ReductionOps);
5313 std::string Name =
getName({
"reduction"});
5320 llvm::Value *ReductionArrayTySize = CGF.
getTypeSize(ReductionArrayTy);
5323 llvm::Value *Args[] = {
5326 CGF.
Builder.getInt32(RHSExprs.size()),
5327 ReductionArrayTySize,
5335 WithNowait ? OMPRTL___kmpc_reduce_nowait : OMPRTL___kmpc_reduce),
5339 llvm::BasicBlock *DefaultBB = CGF.
createBasicBlock(
".omp.reduction.default");
5340 llvm::SwitchInst *SwInst =
5341 CGF.
Builder.CreateSwitch(Res, DefaultBB, 2);
5350 SwInst->addCase(CGF.
Builder.getInt32(1), Case1BB);
5354 llvm::Value *EndArgs[] = {
5359 auto &&
CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps](
5362 const auto *IPriv = Privates.begin();
5363 const auto *ILHS = LHSExprs.begin();
5364 const auto *IRHS = RHSExprs.begin();
5365 for (
const Expr *E : ReductionOps) {
5374 CommonActionTy Action(
5377 CGM.getModule(), WithNowait ? OMPRTL___kmpc_end_reduce_nowait
5378 : OMPRTL___kmpc_end_reduce),
5391 SwInst->addCase(CGF.
Builder.getInt32(2), Case2BB);
5394 auto &&AtomicCodeGen = [Loc, Privates, LHSExprs, RHSExprs, ReductionOps](
5396 const auto *ILHS = LHSExprs.begin();
5397 const auto *IRHS = RHSExprs.begin();
5398 const auto *IPriv = Privates.begin();
5399 for (
const Expr *E : ReductionOps) {
5400 const Expr *XExpr =
nullptr;
5401 const Expr *EExpr =
nullptr;
5402 const Expr *UpExpr =
nullptr;
5404 if (
const auto *BO = dyn_cast<BinaryOperator>(E)) {
5405 if (BO->getOpcode() == BO_Assign) {
5406 XExpr = BO->getLHS();
5407 UpExpr = BO->getRHS();
5411 const Expr *RHSExpr = UpExpr;
5414 if (
const auto *ACO = dyn_cast<AbstractConditionalOperator>(
5418 RHSExpr = ACO->getCond();
5420 if (
const auto *BORHS =
5422 EExpr = BORHS->getRHS();
5423 BO = BORHS->getOpcode();
5428 auto &&AtomicRedGen = [BO, VD,
5430 const Expr *EExpr,
const Expr *UpExpr) {
5431 LValue X = CGF.EmitLValue(XExpr);
5434 E = CGF.EmitAnyExpr(EExpr);
5435 CGF.EmitOMPAtomicSimpleUpdateExpr(
5437 llvm::AtomicOrdering::Monotonic, Loc,
5438 [&CGF, UpExpr, VD, Loc](
RValue XRValue) {
5440 Address LHSTemp = CGF.CreateMemTemp(VD->getType());
5441 CGF.emitOMPSimpleStore(
5442 CGF.MakeAddrLValue(LHSTemp, VD->
getType()), XRValue,
5443 VD->getType().getNonReferenceType(), Loc);
5446 return CGF.EmitAnyExpr(UpExpr);
5449 if ((*IPriv)->getType()->isArrayType()) {
5451 const auto *RHSVar =
5454 AtomicRedGen, XExpr, EExpr, UpExpr);
5457 AtomicRedGen(CGF, XExpr, EExpr, UpExpr);
5464 std::string Name = RT.
getName({
"atomic_reduction"});
5473 if ((*IPriv)->getType()->isArrayType()) {
5474 const auto *LHSVar =
5476 const auto *RHSVar =
5481 CritRedGen(CGF,
nullptr,
nullptr,
nullptr);
5492 llvm::Value *EndArgs[] = {
5497 CommonActionTy Action(
nullptr, {},
5499 CGM.getModule(), OMPRTL___kmpc_end_reduce),
5509 assert(OrgLHSExprs.size() == OrgPrivates.size() &&
5510 "PrivateVarReduction: Privates size mismatch");
5511 assert(OrgLHSExprs.size() == OrgReductionOps.size() &&
5512 "PrivateVarReduction: ReductionOps size mismatch");
5513 for (
unsigned I : llvm::seq<unsigned>(
5514 std::min(OrgReductionOps.size(), OrgLHSExprs.size()))) {
5517 OrgRHSExprs[I], OrgReductionOps[I]);
5526 llvm::raw_svector_ostream Out(Buffer);
5534 Out << Prefix << Name <<
"_"
5536 return std::string(Out.str());
5558 Args.emplace_back(&Param);
5559 Args.emplace_back(&ParamOrig);
5560 const auto &FnInfo =
5564 auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
5567 Fn->setDoesNotRecurse();
5574 llvm::Value *Size =
nullptr;
5617 const Expr *ReductionOp,
5619 const Expr *PrivateRef) {
5628 Args.emplace_back(&ParamInOut);
5629 Args.emplace_back(&ParamIn);
5630 const auto &FnInfo =
5634 auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
5637 Fn->setDoesNotRecurse();
5640 llvm::Value *Size =
nullptr;
5661 C.getPointerType(LHSVD->getType())->castAs<
PointerType>()));
5668 C.getPointerType(RHSVD->getType())->castAs<
PointerType>()));
5697 Args.emplace_back(&Param);
5698 const auto &FnInfo =
5702 auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
5705 Fn->setDoesNotRecurse();
5710 llvm::Value *Size =
nullptr;
5745 RecordDecl *RD =
C.buildImplicitRecord(
"kmp_taskred_input_t");
5754 C, RD,
C.getIntTypeForBitwidth(32,
false));
5757 unsigned Size =
Data.ReductionVars.size();
5758 llvm::APInt ArraySize(64, Size);
5760 C.getConstantArrayType(RDType, ArraySize,
nullptr,
5765 Data.ReductionCopies,
Data.ReductionOps);
5766 for (
unsigned Cnt = 0; Cnt < Size; ++Cnt) {
5768 llvm::Value *Idxs[] = {llvm::ConstantInt::get(
CGM.SizeTy, 0),
5769 llvm::ConstantInt::get(
CGM.SizeTy, Cnt)};
5785 llvm::Value *SizeValInChars;
5786 llvm::Value *SizeVal;
5787 std::tie(SizeValInChars, SizeVal) = RCG.
getSizes(Cnt);
5793 bool DelayedCreation = !!SizeVal;
5794 SizeValInChars = CGF.
Builder.CreateIntCast(SizeValInChars,
CGM.SizeTy,
5805 llvm::Value *FiniAddr =
5806 Fini ? Fini : llvm::ConstantPointerNull::get(
CGM.VoidPtrTy);
5811 CGM, Loc, RCG, Cnt,
Data.ReductionOps[Cnt], LHSExprs[Cnt],
5812 RHSExprs[Cnt],
Data.ReductionCopies[Cnt]);
5816 if (DelayedCreation) {
5818 llvm::ConstantInt::get(
CGM.Int32Ty, 1,
true),
5823 if (
Data.IsReductionWithTaskMod) {
5829 llvm::Value *Args[] = {
5831 llvm::ConstantInt::get(
CGM.IntTy,
Data.IsWorksharingReduction ? 1 : 0,
5833 llvm::ConstantInt::get(
CGM.IntTy, Size,
true),
5838 CGM.getModule(), OMPRTL___kmpc_taskred_modifier_init),
5842 llvm::Value *Args[] = {
5845 llvm::ConstantInt::get(
CGM.IntTy, Size,
true),
5849 CGM.getModule(), OMPRTL___kmpc_taskred_init),
5855 bool IsWorksharingReduction) {
5861 llvm::Value *Args[] = {IdentTLoc, GTid,
5862 llvm::ConstantInt::get(
CGM.IntTy,
5863 IsWorksharingReduction ? 1 : 0,
5867 CGM.getModule(), OMPRTL___kmpc_task_reduction_modifier_fini),
5879 llvm::Value *SizeVal = CGF.
Builder.CreateIntCast(Sizes.second,
CGM.SizeTy,
5882 CGF,
CGM.getContext().getSizeType(),
5890 llvm::Value *ReductionsPtr,
5903 CGM.getModule(), OMPRTL___kmpc_task_reduction_get_th_data),
5919 auto &M =
CGM.getModule();
5921 llvm::Value *NumOfElements;
5922 std::tie(NumOfElements, DependenciesArray) =
5924 if (!
Data.Dependences.empty()) {
5925 llvm::Value *DepWaitTaskArgs[7];
5926 DepWaitTaskArgs[0] = UpLoc;
5927 DepWaitTaskArgs[1] = ThreadID;
5928 DepWaitTaskArgs[2] = NumOfElements;
5930 DepWaitTaskArgs[4] = CGF.
Builder.getInt32(0);
5931 DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.
VoidPtrTy);
5932 DepWaitTaskArgs[6] =
5933 llvm::ConstantInt::get(CGF.
Int32Ty,
Data.HasNowaitClause);
5942 M, OMPRTL___kmpc_omp_taskwait_deps_51),
5949 llvm::Value *Args[] = {UpLoc, ThreadID};
5952 OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_omp_taskwait),
5957 if (
auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.
CapturedStmtInfo))
5958 Region->emitUntiedSwitch(CGF);
5967 InlinedOpenMPRegionRAII Region(CGF,
CodeGen, InnerKind, HasCancel,
5968 InnerKind != OMPD_critical &&
5969 InnerKind != OMPD_master &&
5970 InnerKind != OMPD_masked);
5985 RTCancelKind CancelKind = CancelNoreq;
5986 if (CancelRegion == OMPD_parallel)
5987 CancelKind = CancelParallel;
5988 else if (CancelRegion == OMPD_for)
5989 CancelKind = CancelLoop;
5990 else if (CancelRegion == OMPD_sections)
5991 CancelKind = CancelSections;
5993 assert(CancelRegion == OMPD_taskgroup);
5994 CancelKind = CancelTaskgroup;
6006 if (
auto *OMPRegionInfo =
6010 if (CancelRegion == OMPD_taskgroup || OMPRegionInfo->hasCancel()) {
6011 llvm::Value *Args[] = {
6017 CGM.getModule(), OMPRTL___kmpc_cancellationpoint),
6026 CGF.
Builder.CreateCondBr(Cmp, ExitBB, ContBB);
6028 if (CancelRegion == OMPD_parallel)
6046 auto &M =
CGM.getModule();
6047 if (
auto *OMPRegionInfo =
6049 auto &&ThenGen = [
this, &M, Loc, CancelRegion,
6052 llvm::Value *Args[] = {
6056 llvm::Value *
Result = CGF.EmitRuntimeCall(
6057 OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_cancel), Args);
6062 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(
".cancel.exit");
6063 llvm::BasicBlock *ContBB = CGF.createBasicBlock(
".cancel.continue");
6064 llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(
Result);
6065 CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
6066 CGF.EmitBlock(ExitBB);
6067 if (CancelRegion == OMPD_parallel)
6071 CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
6072 CGF.EmitBranchThroughCleanup(CancelDest);
6073 CGF.EmitBlock(ContBB,
true);
6091 OMPUsesAllocatorsActionTy(
6092 ArrayRef<std::pair<const Expr *, const Expr *>> Allocators)
6093 : Allocators(Allocators) {}
6097 for (
const auto &AllocatorData : Allocators) {
6099 CGF, AllocatorData.first, AllocatorData.second);
6102 void Exit(CodeGenFunction &CGF)
override {
6105 for (
const auto &AllocatorData : Allocators) {
6107 AllocatorData.first);
6115 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
6117 assert(!ParentName.empty() &&
"Invalid target entry parent name!");
6121 for (
unsigned I = 0, E =
C->getNumberOfAllocators(); I < E; ++I) {
6128 OMPUsesAllocatorsActionTy UsesAllocatorAction(Allocators);
6129 CodeGen.setAction(UsesAllocatorAction);
6135 const Expr *Allocator,
6136 const Expr *AllocatorTraits) {
6138 ThreadId = CGF.
Builder.CreateIntCast(ThreadId, CGF.
IntTy,
true);
6140 llvm::Value *MemSpaceHandle = llvm::ConstantPointerNull::get(CGF.
VoidPtrTy);
6141 llvm::Value *NumTraits = llvm::ConstantInt::get(
6145 .getLimitedValue());
6152 llvm::Value *Traits =
Addr.emitRawPointer(CGF);
6154 llvm::Value *AllocatorVal =
6156 CGM.getModule(), OMPRTL___kmpc_init_allocator),
6157 {ThreadId, MemSpaceHandle, NumTraits, Traits});
6169 const Expr *Allocator) {
6171 ThreadId = CGF.
Builder.CreateIntCast(ThreadId, CGF.
IntTy,
true);
6173 llvm::Value *AllocatorVal =
6180 OMPRTL___kmpc_destroy_allocator),
6181 {ThreadId, AllocatorVal});
6186 llvm::OpenMPIRBuilder::TargetKernelDefaultAttrs &Attrs) {
6187 assert(Attrs.MaxTeams.size() == 1 && Attrs.MaxThreads.size() == 1 &&
6188 "invalid default attrs structure");
6189 int32_t &MaxTeamsVal = Attrs.MaxTeams.front();
6190 int32_t &MaxThreadsVal = Attrs.MaxThreads.front();
6197 for (
auto *A :
C->getAttrs()) {
6198 int32_t AttrMinThreadsVal = 1, AttrMaxThreadsVal = -1;
6199 int32_t AttrMinBlocksVal = 1, AttrMaxBlocksVal = -1;
6200 if (
auto *
Attr = dyn_cast<CUDALaunchBoundsAttr>(A))
6201 CGM.handleCUDALaunchBoundsAttr(
nullptr,
Attr, &AttrMaxThreadsVal,
6202 &AttrMinBlocksVal, &AttrMaxBlocksVal);
6203 else if (
auto *
Attr = dyn_cast<AMDGPUFlatWorkGroupSizeAttr>(A))
6204 CGM.handleAMDGPUFlatWorkGroupSizeAttr(
6205 nullptr,
Attr,
nullptr, &AttrMinThreadsVal,
6206 &AttrMaxThreadsVal);
6210 Attrs.MinThreads = std::max(Attrs.MinThreads, AttrMinThreadsVal);
6211 if (AttrMaxThreadsVal > 0)
6212 MaxThreadsVal = MaxThreadsVal > 0
6213 ? std::min(MaxThreadsVal, AttrMaxThreadsVal)
6214 : AttrMaxThreadsVal;
6215 Attrs.MinTeams = std::max(Attrs.MinTeams, AttrMinBlocksVal);
6216 if (AttrMaxBlocksVal > 0)
6217 MaxTeamsVal = MaxTeamsVal > 0 ? std::min(MaxTeamsVal, AttrMaxBlocksVal)
6225 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
6228 llvm::TargetRegionEntryInfo EntryInfo =
6232 llvm::OpenMPIRBuilder::FunctionGenCallback &&GenerateOutlinedFunction =
6233 [&CGF, &D, &
CodeGen](StringRef EntryFnName) {
6234 const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
6236 CGOpenMPTargetRegionInfo CGInfo(CS,
CodeGen, EntryFnName);
6241 cantFail(
OMPBuilder.emitTargetRegionFunction(
6242 EntryInfo, GenerateOutlinedFunction, IsOffloadEntry, OutlinedFn,
6248 CGM.getTargetCodeGenInfo().setTargetAttributes(
nullptr, OutlinedFn,
CGM);
6251 for (
auto *A :
C->getAttrs()) {
6252 if (
auto *
Attr = dyn_cast<AMDGPUWavesPerEUAttr>(A))
6253 CGM.handleAMDGPUWavesPerEUAttr(OutlinedFn,
Attr);
6271 while (
const auto *
C = dyn_cast_or_null<CompoundStmt>(Child)) {
6273 for (
const Stmt *S :
C->body()) {
6274 if (
const auto *E = dyn_cast<Expr>(S)) {
6283 if (
const auto *DS = dyn_cast<DeclStmt>(S)) {
6284 if (llvm::all_of(DS->decls(), [](
const Decl *D) {
6285 if (isa<EmptyDecl>(D) || isa<DeclContext>(D) ||
6286 isa<TypeDecl>(D) || isa<PragmaCommentDecl>(D) ||
6287 isa<PragmaDetectMismatchDecl>(D) || isa<UsingDecl>(D) ||
6288 isa<UsingDirectiveDecl>(D) ||
6289 isa<OMPDeclareReductionDecl>(D) ||
6290 isa<OMPThreadPrivateDecl>(D) || isa<OMPAllocateDecl>(D))
6292 const auto *VD = dyn_cast<VarDecl>(D);
6295 return VD->hasGlobalStorage() || !VD->isUsed();
6305 Child = Child->IgnoreContainers();
6312 int32_t &MaxTeamsVal) {
6316 "Expected target-based executable directive.");
6317 switch (DirectiveKind) {
6319 const auto *CS = D.getInnermostCapturedStmt();
6322 const Stmt *ChildStmt =
6324 if (
const auto *NestedDir =
6325 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
6334 MinTeamsVal = MaxTeamsVal = Constant->getExtValue();
6337 MinTeamsVal = MaxTeamsVal = 0;
6340 MinTeamsVal = MaxTeamsVal = 1;
6344 MinTeamsVal = MaxTeamsVal = -1;
6347 case OMPD_target_teams_loop:
6348 case OMPD_target_teams:
6349 case OMPD_target_teams_distribute:
6350 case OMPD_target_teams_distribute_simd:
6351 case OMPD_target_teams_distribute_parallel_for:
6352 case OMPD_target_teams_distribute_parallel_for_simd: {
6354 const Expr *NumTeams =
6358 MinTeamsVal = MaxTeamsVal = Constant->getExtValue();
6361 MinTeamsVal = MaxTeamsVal = 0;
6364 case OMPD_target_parallel:
6365 case OMPD_target_parallel_for:
6366 case OMPD_target_parallel_for_simd:
6367 case OMPD_target_parallel_loop:
6368 case OMPD_target_simd:
6369 MinTeamsVal = MaxTeamsVal = 1;
6373 case OMPD_parallel_for:
6374 case OMPD_parallel_loop:
6375 case OMPD_parallel_master:
6376 case OMPD_parallel_sections:
6378 case OMPD_parallel_for_simd:
6380 case OMPD_cancellation_point:
6382 case OMPD_threadprivate:
6393 case OMPD_taskyield:
6396 case OMPD_taskgroup:
6402 case OMPD_target_data:
6403 case OMPD_target_exit_data:
6404 case OMPD_target_enter_data:
6405 case OMPD_distribute:
6406 case OMPD_distribute_simd:
6407 case OMPD_distribute_parallel_for:
6408 case OMPD_distribute_parallel_for_simd:
6409 case OMPD_teams_distribute:
6410 case OMPD_teams_distribute_simd:
6411 case OMPD_teams_distribute_parallel_for:
6412 case OMPD_teams_distribute_parallel_for_simd:
6413 case OMPD_target_update:
6414 case OMPD_declare_simd:
6415 case OMPD_declare_variant:
6416 case OMPD_begin_declare_variant:
6417 case OMPD_end_declare_variant:
6418 case OMPD_declare_target:
6419 case OMPD_end_declare_target:
6420 case OMPD_declare_reduction:
6421 case OMPD_declare_mapper:
6423 case OMPD_taskloop_simd:
6424 case OMPD_master_taskloop:
6425 case OMPD_master_taskloop_simd:
6426 case OMPD_parallel_master_taskloop:
6427 case OMPD_parallel_master_taskloop_simd:
6429 case OMPD_metadirective:
6435 llvm_unreachable(
"Unexpected directive kind.");
6441 "Clauses associated with the teams directive expected to be emitted "
6442 "only for the host!");
6444 int32_t MinNT = -1, MaxNT = -1;
6445 const Expr *NumTeams =
6447 if (NumTeams !=
nullptr) {
6450 switch (DirectiveKind) {
6452 const auto *CS = D.getInnermostCapturedStmt();
6453 CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
6457 return Bld.CreateIntCast(NumTeamsVal, CGF.
Int32Ty,
6460 case OMPD_target_teams:
6461 case OMPD_target_teams_distribute:
6462 case OMPD_target_teams_distribute_simd:
6463 case OMPD_target_teams_distribute_parallel_for:
6464 case OMPD_target_teams_distribute_parallel_for_simd: {
6468 return Bld.CreateIntCast(NumTeamsVal, CGF.
Int32Ty,
6476 assert(MinNT == MaxNT &&
"Num threads ranges require handling here.");
6477 return llvm::ConstantInt::get(CGF.
Int32Ty, MinNT);
6485 const Expr **E, int32_t &UpperBound,
6486 bool UpperBoundOnly, llvm::Value **CondVal) {
6489 const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child);
6496 if (CondVal && Dir->hasClausesOfKind<
OMPIfClause>()) {
6497 CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
6500 for (
const auto *
C : Dir->getClausesOfKind<
OMPIfClause>()) {
6501 if (
C->getNameModifier() == OMPD_unknown ||
6502 C->getNameModifier() == OMPD_parallel) {
6517 if (
const auto *PreInit =
6519 for (
const auto *I : PreInit->decls()) {
6520 if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
6536 CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
6538 const auto *NumThreadsClause =
6540 const Expr *NTExpr = NumThreadsClause->getNumThreads();
6541 if (NTExpr->isIntegerConstantExpr(CGF.
getContext()))
6542 if (
auto Constant = NTExpr->getIntegerConstantExpr(CGF.
getContext()))
6545 ? Constant->getZExtValue()
6546 : std::min(UpperBound,
6547 static_cast<int32_t
>(Constant->getZExtValue()));
6550 if (UpperBound == -1)
6555 if (
const auto *PreInit =
6556 cast_or_null<DeclStmt>(NumThreadsClause->getPreInitStmt())) {
6557 for (
const auto *I : PreInit->decls()) {
6558 if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
6577 bool UpperBoundOnly, llvm::Value **CondVal,
const Expr **ThreadLimitExpr) {
6578 assert((!CGF.
getLangOpts().OpenMPIsTargetDevice || UpperBoundOnly) &&
6579 "Clauses associated with the teams directive expected to be emitted "
6580 "only for the host!");
6583 "Expected target-based executable directive.");
6585 const Expr *NT =
nullptr;
6586 const Expr **NTPtr = UpperBoundOnly ?
nullptr : &NT;
6588 auto CheckForConstExpr = [&](
const Expr *E,
const Expr **EPtr) {
6591 UpperBound = UpperBound ? Constant->getZExtValue()
6592 : std::min(UpperBound,
6593 int32_t(Constant->getZExtValue()));
6597 if (UpperBound == -1)
6603 auto ReturnSequential = [&]() {
6608 switch (DirectiveKind) {
6611 getNumThreads(CGF, CS, NTPtr, UpperBound, UpperBoundOnly, CondVal);
6617 if (
const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child)) {
6619 ThreadLimitClause = TLC;
6620 if (ThreadLimitExpr) {
6621 CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
6625 ThreadLimitClause->getThreadLimit().front()->getSourceRange());
6626 if (
const auto *PreInit =
6627 cast_or_null<DeclStmt>(ThreadLimitClause->getPreInitStmt())) {
6628 for (
const auto *I : PreInit->decls()) {
6629 if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
6641 if (ThreadLimitClause)
6642 CheckForConstExpr(ThreadLimitClause->getThreadLimit().front(),
6644 if (
const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child)) {
6647 CS = Dir->getInnermostCapturedStmt();
6650 Dir = dyn_cast_or_null<OMPExecutableDirective>(Child);
6653 CS = Dir->getInnermostCapturedStmt();
6654 getNumThreads(CGF, CS, NTPtr, UpperBound, UpperBoundOnly, CondVal);
6656 return ReturnSequential();
6660 case OMPD_target_teams: {
6664 CheckForConstExpr(ThreadLimitClause->getThreadLimit().front(),
6668 getNumThreads(CGF, CS, NTPtr, UpperBound, UpperBoundOnly, CondVal);
6671 if (
const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child)) {
6672 if (Dir->getDirectiveKind() == OMPD_distribute) {
6673 CS = Dir->getInnermostCapturedStmt();
6674 getNumThreads(CGF, CS, NTPtr, UpperBound, UpperBoundOnly, CondVal);
6679 case OMPD_target_teams_distribute:
6683 CheckForConstExpr(ThreadLimitClause->getThreadLimit().front(),
6686 getNumThreads(CGF, D.getInnermostCapturedStmt(), NTPtr, UpperBound,
6687 UpperBoundOnly, CondVal);
6689 case OMPD_target_teams_loop:
6690 case OMPD_target_parallel_loop:
6691 case OMPD_target_parallel:
6692 case OMPD_target_parallel_for:
6693 case OMPD_target_parallel_for_simd:
6694 case OMPD_target_teams_distribute_parallel_for:
6695 case OMPD_target_teams_distribute_parallel_for_simd: {
6696 if (CondVal && D.hasClausesOfKind<
OMPIfClause>()) {
6698 for (
const auto *
C : D.getClausesOfKind<
OMPIfClause>()) {
6699 if (
C->getNameModifier() == OMPD_unknown ||
6700 C->getNameModifier() == OMPD_parallel) {
6710 return ReturnSequential();
6720 CheckForConstExpr(ThreadLimitClause->getThreadLimit().front(),
6726 CheckForConstExpr(NumThreadsClause->getNumThreads(),
nullptr);
6727 return NumThreadsClause->getNumThreads();
6731 case OMPD_target_teams_distribute_simd:
6732 case OMPD_target_simd:
6733 return ReturnSequential();
6737 llvm_unreachable(
"Unsupported directive kind.");
6742 llvm::Value *NumThreadsVal =
nullptr;
6743 llvm::Value *CondVal =
nullptr;
6744 llvm::Value *ThreadLimitVal =
nullptr;
6745 const Expr *ThreadLimitExpr =
nullptr;
6746 int32_t UpperBound = -1;
6749 CGF, D, UpperBound,
false, &CondVal,
6753 if (ThreadLimitExpr) {
6756 ThreadLimitVal = CGF.
Builder.CreateIntCast(ThreadLimitVal, CGF.
Int32Ty,
6761 if (UpperBound == 1) {
6762 NumThreadsVal = CGF.
Builder.getInt32(UpperBound);
6765 NumThreadsVal = CGF.
Builder.CreateIntCast(NumThreadsVal, CGF.
Int32Ty,
6767 }
else if (ThreadLimitVal) {
6770 NumThreadsVal = ThreadLimitVal;
6771 ThreadLimitVal =
nullptr;
6774 assert(!ThreadLimitVal &&
"Default not applicable with thread limit value");
6775 NumThreadsVal = CGF.
Builder.getInt32(0);
6782 NumThreadsVal = CGF.
Builder.CreateSelect(CondVal, NumThreadsVal,
6788 if (ThreadLimitVal) {
6789 NumThreadsVal = CGF.
Builder.CreateSelect(
6790 CGF.
Builder.CreateICmpULT(ThreadLimitVal, NumThreadsVal),
6791 ThreadLimitVal, NumThreadsVal);
6794 return NumThreadsVal;
6804class MappableExprsHandler {
6810 struct AttachPtrExprComparator {
6811 const MappableExprsHandler *Handler =
nullptr;
6813 mutable llvm::DenseMap<std::pair<const Expr *, const Expr *>,
bool>
6814 CachedEqualityComparisons;
6816 AttachPtrExprComparator(
const MappableExprsHandler *H) : Handler(H) {}
6819 bool operator()(
const Expr *LHS,
const Expr *RHS)
const {
6824 const auto ItLHS = Handler->AttachPtrComponentDepthMap.find(LHS);
6825 const auto ItRHS = Handler->AttachPtrComponentDepthMap.find(RHS);
6827 std::optional<size_t> DepthLHS =
6828 (ItLHS != Handler->AttachPtrComponentDepthMap.end()) ? ItLHS->second
6830 std::optional<size_t> DepthRHS =
6831 (ItRHS != Handler->AttachPtrComponentDepthMap.end()) ? ItRHS->second
6835 if (!DepthLHS.has_value() && !DepthRHS.has_value()) {
6837 if (areEqual(LHS, RHS))
6840 return wasComputedBefore(LHS, RHS);
6842 if (!DepthLHS.has_value())
6844 if (!DepthRHS.has_value())
6848 if (DepthLHS.value() != DepthRHS.value())
6849 return DepthLHS.value() < DepthRHS.value();
6852 if (areEqual(LHS, RHS))
6855 return wasComputedBefore(LHS, RHS);
6861 bool areEqual(
const Expr *LHS,
const Expr *RHS)
const {
6863 const auto CachedResultIt = CachedEqualityComparisons.find({LHS, RHS});
6864 if (CachedResultIt != CachedEqualityComparisons.end())
6865 return CachedResultIt->second;
6879 bool wasComputedBefore(
const Expr *LHS,
const Expr *RHS)
const {
6880 const size_t &OrderLHS = Handler->AttachPtrComputationOrderMap.at(LHS);
6881 const size_t &OrderRHS = Handler->AttachPtrComputationOrderMap.at(RHS);
6883 return OrderLHS < OrderRHS;
6892 bool areSemanticallyEqual(
const Expr *LHS,
const Expr *RHS)
const {
6914 if (
const auto *LD = dyn_cast<DeclRefExpr>(LHS)) {
6915 const auto *RD = dyn_cast<DeclRefExpr>(RHS);
6918 return LD->getDecl()->getCanonicalDecl() ==
6919 RD->getDecl()->getCanonicalDecl();
6923 if (
const auto *LA = dyn_cast<ArraySubscriptExpr>(LHS)) {
6924 const auto *RA = dyn_cast<ArraySubscriptExpr>(RHS);
6927 return areSemanticallyEqual(LA->getBase(), RA->getBase()) &&
6928 areSemanticallyEqual(LA->getIdx(), RA->getIdx());
6932 if (
const auto *LM = dyn_cast<MemberExpr>(LHS)) {
6933 const auto *RM = dyn_cast<MemberExpr>(RHS);
6936 if (LM->getMemberDecl()->getCanonicalDecl() !=
6937 RM->getMemberDecl()->getCanonicalDecl())
6939 return areSemanticallyEqual(LM->getBase(), RM->getBase());
6943 if (
const auto *LU = dyn_cast<UnaryOperator>(LHS)) {
6944 const auto *RU = dyn_cast<UnaryOperator>(RHS);
6947 if (LU->getOpcode() != RU->getOpcode())
6949 return areSemanticallyEqual(LU->getSubExpr(), RU->getSubExpr());
6953 if (
const auto *LB = dyn_cast<BinaryOperator>(LHS)) {
6954 const auto *RB = dyn_cast<BinaryOperator>(RHS);
6957 if (LB->getOpcode() != RB->getOpcode())
6959 return areSemanticallyEqual(LB->getLHS(), RB->getLHS()) &&
6960 areSemanticallyEqual(LB->getRHS(), RB->getRHS());
6966 if (
const auto *LAS = dyn_cast<ArraySectionExpr>(LHS)) {
6967 const auto *RAS = dyn_cast<ArraySectionExpr>(RHS);
6970 return areSemanticallyEqual(LAS->getBase(), RAS->getBase()) &&
6971 areSemanticallyEqual(LAS->getLowerBound(),
6972 RAS->getLowerBound()) &&
6973 areSemanticallyEqual(LAS->getLength(), RAS->getLength());
6977 if (
const auto *LC = dyn_cast<CastExpr>(LHS)) {
6978 const auto *RC = dyn_cast<CastExpr>(RHS);
6981 if (LC->getCastKind() != RC->getCastKind())
6983 return areSemanticallyEqual(LC->getSubExpr(), RC->getSubExpr());
6991 if (
const auto *LI = dyn_cast<IntegerLiteral>(LHS)) {
6992 const auto *RI = dyn_cast<IntegerLiteral>(RHS);
6995 return LI->getValue() == RI->getValue();
6999 if (
const auto *LC = dyn_cast<CharacterLiteral>(LHS)) {
7000 const auto *RC = dyn_cast<CharacterLiteral>(RHS);
7003 return LC->getValue() == RC->getValue();
7007 if (
const auto *LF = dyn_cast<FloatingLiteral>(LHS)) {
7008 const auto *RF = dyn_cast<FloatingLiteral>(RHS);
7012 return LF->getValue().bitwiseIsEqual(RF->getValue());
7016 if (
const auto *LS = dyn_cast<StringLiteral>(LHS)) {
7017 const auto *RS = dyn_cast<StringLiteral>(RHS);
7020 return LS->getString() == RS->getString();
7028 if (
const auto *LB = dyn_cast<CXXBoolLiteralExpr>(LHS)) {
7029 const auto *RB = dyn_cast<CXXBoolLiteralExpr>(RHS);
7032 return LB->getValue() == RB->getValue();
7041 static unsigned getFlagMemberOffset() {
7042 unsigned Offset = 0;
7043 for (uint64_t Remain =
7044 static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>
>(
7045 OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF);
7046 !(Remain & 1); Remain = Remain >> 1)
7053 class MappingExprInfo {
7055 const ValueDecl *MapDecl =
nullptr;
7058 const Expr *MapExpr =
nullptr;
7061 MappingExprInfo(
const ValueDecl *MapDecl,
const Expr *MapExpr =
nullptr)
7062 : MapDecl(MapDecl), MapExpr(MapExpr) {}
7064 const ValueDecl *getMapDecl()
const {
return MapDecl; }
7065 const Expr *getMapExpr()
const {
return MapExpr; }
7068 using DeviceInfoTy = llvm::OpenMPIRBuilder::DeviceInfoTy;
7069 using MapBaseValuesArrayTy = llvm::OpenMPIRBuilder::MapValuesArrayTy;
7070 using MapValuesArrayTy = llvm::OpenMPIRBuilder::MapValuesArrayTy;
7071 using MapFlagsArrayTy = llvm::OpenMPIRBuilder::MapFlagsArrayTy;
7072 using MapDimArrayTy = llvm::OpenMPIRBuilder::MapDimArrayTy;
7073 using MapNonContiguousArrayTy =
7074 llvm::OpenMPIRBuilder::MapNonContiguousArrayTy;
7075 using MapExprsArrayTy = SmallVector<MappingExprInfo, 4>;
7076 using MapValueDeclsArrayTy = SmallVector<const ValueDecl *, 4>;
7080 bool ,
const ValueDecl *,
const Expr *>;
7081 using MapDataArrayTy = SmallVector<MapData, 4>;
7086 struct MapCombinedInfoTy : llvm::OpenMPIRBuilder::MapInfosTy {
7087 MapExprsArrayTy Exprs;
7088 MapValueDeclsArrayTy Mappers;
7089 MapValueDeclsArrayTy DevicePtrDecls;
7092 void append(MapCombinedInfoTy &CurInfo) {
7093 Exprs.append(CurInfo.Exprs.begin(), CurInfo.Exprs.end());
7094 DevicePtrDecls.append(CurInfo.DevicePtrDecls.begin(),
7095 CurInfo.DevicePtrDecls.end());
7096 Mappers.append(CurInfo.Mappers.begin(), CurInfo.Mappers.end());
7097 llvm::OpenMPIRBuilder::MapInfosTy::append(CurInfo);
7105 struct StructRangeInfoTy {
7106 MapCombinedInfoTy PreliminaryMapData;
7107 std::pair<
unsigned , Address > LowestElem = {
7109 std::pair<
unsigned , Address > HighestElem = {
7113 bool IsArraySection =
false;
7114 bool HasCompleteRecord =
false;
7119 struct AttachInfoTy {
7122 const ValueDecl *AttachPtrDecl =
nullptr;
7123 const Expr *AttachMapExpr =
nullptr;
7125 bool isValid()
const {
7132 bool hasAttachEntryForCapturedVar(
const ValueDecl *VD)
const {
7133 for (
const auto &AttachEntry : AttachPtrExprMap) {
7134 if (AttachEntry.second) {
7137 if (
const auto *DRE = dyn_cast<DeclRefExpr>(AttachEntry.second))
7138 if (DRE->getDecl() == VD)
7146 const Expr *getAttachPtrExpr(
7149 const auto It = AttachPtrExprMap.find(Components);
7150 if (It != AttachPtrExprMap.end())
7161 ArrayRef<OpenMPMapModifierKind> MapModifiers;
7162 ArrayRef<OpenMPMotionModifierKind> MotionModifiers;
7163 bool ReturnDevicePointer =
false;
7164 bool IsImplicit =
false;
7165 const ValueDecl *Mapper =
nullptr;
7166 const Expr *VarRef =
nullptr;
7167 bool ForDeviceAddr =
false;
7169 MapInfo() =
default;
7173 ArrayRef<OpenMPMapModifierKind> MapModifiers,
7174 ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
7175 bool ReturnDevicePointer,
bool IsImplicit,
7176 const ValueDecl *Mapper =
nullptr,
const Expr *VarRef =
nullptr,
7177 bool ForDeviceAddr =
false)
7178 : Components(Components), MapType(MapType), MapModifiers(MapModifiers),
7179 MotionModifiers(MotionModifiers),
7180 ReturnDevicePointer(ReturnDevicePointer), IsImplicit(IsImplicit),
7181 Mapper(Mapper), VarRef(VarRef), ForDeviceAddr(ForDeviceAddr) {}
7187 struct DeferredDevicePtrEntryTy {
7188 const Expr *IE =
nullptr;
7189 const ValueDecl *VD =
nullptr;
7190 bool ForDeviceAddr =
false;
7192 DeferredDevicePtrEntryTy(
const Expr *IE,
const ValueDecl *VD,
7194 : IE(IE), VD(VD), ForDeviceAddr(ForDeviceAddr) {}
7199 llvm::PointerUnion<
const OMPExecutableDirective *,
7200 const OMPDeclareMapperDecl *>
7204 CodeGenFunction &CGF;
7209 llvm::DenseMap<CanonicalDeclPtr<const VarDecl>,
bool> FirstPrivateDecls;
7215 SmallVector<OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>>
7222 SmallVector<OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>>
7226 llvm::DenseMap<const ValueDecl *, const OMPMapClause *> LambdasMap;
7241 llvm::DenseMap<const Expr *, std::optional<size_t>>
7242 AttachPtrComponentDepthMap = {{
nullptr, std::nullopt}};
7246 llvm::DenseMap<const Expr *, size_t> AttachPtrComputationOrderMap = {
7249 llvm::Value *getExprTypeSize(
const Expr *E)
const {
7253 if (
const auto *OAE = dyn_cast<OMPArrayShapingExpr>(E)) {
7255 CGF.
getTypeSize(OAE->getBase()->getType()->getPointeeType());
7256 for (
const Expr *SE : OAE->getDimensions()) {
7267 if (
const auto *RefTy = ExprTy->
getAs<ReferenceType>())
7273 if (
const auto *OAE = dyn_cast<ArraySectionExpr>(E)) {
7275 OAE->getBase()->IgnoreParenImpCasts())
7281 if (!OAE->getLength() && OAE->getColonLocFirst().isValid() &&
7282 !OAE->getLowerBound())
7285 llvm::Value *ElemSize;
7286 if (
const auto *PTy = BaseTy->
getAs<PointerType>()) {
7287 ElemSize = CGF.
getTypeSize(PTy->getPointeeType().getCanonicalType());
7290 assert(ATy &&
"Expecting array type if not a pointer type.");
7291 ElemSize = CGF.
getTypeSize(ATy->getElementType().getCanonicalType());
7296 if (!OAE->getLength() && OAE->getColonLocFirst().isInvalid())
7299 if (
const Expr *LenExpr = OAE->getLength()) {
7303 LenExpr->getExprLoc());
7304 return CGF.
Builder.CreateNUWMul(LengthVal, ElemSize);
7306 assert(!OAE->getLength() && OAE->getColonLocFirst().isValid() &&
7307 OAE->getLowerBound() &&
"expected array_section[lb:].");
7313 OAE->getLowerBound()->getExprLoc());
7314 LBVal = CGF.
Builder.CreateNUWMul(LBVal, ElemSize);
7315 llvm::Value *Cmp = CGF.
Builder.CreateICmpUGT(LengthVal, LBVal);
7316 llvm::Value *TrueVal = CGF.
Builder.CreateNUWSub(LengthVal, LBVal);
7317 LengthVal = CGF.
Builder.CreateSelect(
7318 Cmp, TrueVal, llvm::ConstantInt::get(CGF.
SizeTy, 0));
7328 OpenMPOffloadMappingFlags getMapTypeBits(
7330 ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
bool IsImplicit,
7331 bool AddPtrFlag,
bool AddIsTargetParamFlag,
bool IsNonContiguous)
const {
7332 OpenMPOffloadMappingFlags Bits =
7333 IsImplicit ? OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT
7334 : OpenMPOffloadMappingFlags::OMP_MAP_NONE;
7336 case OMPC_MAP_alloc:
7337 case OMPC_MAP_release:
7344 Bits |= OpenMPOffloadMappingFlags::OMP_MAP_TO;
7347 Bits |= OpenMPOffloadMappingFlags::OMP_MAP_FROM;
7349 case OMPC_MAP_tofrom:
7350 Bits |= OpenMPOffloadMappingFlags::OMP_MAP_TO |
7351 OpenMPOffloadMappingFlags::OMP_MAP_FROM;
7353 case OMPC_MAP_delete:
7354 Bits |= OpenMPOffloadMappingFlags::OMP_MAP_DELETE;
7357 llvm_unreachable(
"Unexpected map type!");
7360 Bits |= OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ;
7361 if (AddIsTargetParamFlag)
7362 Bits |= OpenMPOffloadMappingFlags::OMP_MAP_TARGET_PARAM;
7363 if (llvm::is_contained(MapModifiers, OMPC_MAP_MODIFIER_always))
7364 Bits |= OpenMPOffloadMappingFlags::OMP_MAP_ALWAYS;
7365 if (llvm::is_contained(MapModifiers, OMPC_MAP_MODIFIER_close))
7366 Bits |= OpenMPOffloadMappingFlags::OMP_MAP_CLOSE;
7367 if (llvm::is_contained(MapModifiers, OMPC_MAP_MODIFIER_present) ||
7368 llvm::is_contained(MotionModifiers, OMPC_MOTION_MODIFIER_present))
7369 Bits |= OpenMPOffloadMappingFlags::OMP_MAP_PRESENT;
7370 if (llvm::is_contained(MapModifiers, OMPC_MAP_MODIFIER_ompx_hold))
7371 Bits |= OpenMPOffloadMappingFlags::OMP_MAP_OMPX_HOLD;
7372 if (IsNonContiguous)
7373 Bits |= OpenMPOffloadMappingFlags::OMP_MAP_NON_CONTIG;
7379 bool isFinalArraySectionExpression(
const Expr *E)
const {
7380 const auto *OASE = dyn_cast<ArraySectionExpr>(E);
7387 if (OASE->getColonLocFirst().isInvalid())
7390 const Expr *Length = OASE->getLength();
7397 OASE->getBase()->IgnoreParenImpCasts())
7399 if (
const auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.
getTypePtr()))
7400 return ATy->getSExtSize() != 1;
7412 llvm::APSInt ConstLength =
Result.Val.getInt();
7413 return ConstLength.getSExtValue() != 1;
7420 class CopyOverlappedEntryGaps {
7421 CodeGenFunction &CGF;
7422 MapCombinedInfoTy &CombinedInfo;
7423 OpenMPOffloadMappingFlags Flags = OpenMPOffloadMappingFlags::OMP_MAP_NONE;
7424 const ValueDecl *MapDecl =
nullptr;
7425 const Expr *MapExpr =
nullptr;
7427 bool IsNonContiguous =
false;
7431 const RecordDecl *LastParent =
nullptr;
7433 unsigned LastIndex = -1u;
7437 CopyOverlappedEntryGaps(CodeGenFunction &CGF,
7438 MapCombinedInfoTy &CombinedInfo,
7439 OpenMPOffloadMappingFlags Flags,
7440 const ValueDecl *MapDecl,
const Expr *MapExpr,
7441 Address BP, Address LB,
bool IsNonContiguous,
7443 : CGF(CGF), CombinedInfo(CombinedInfo), Flags(Flags), MapDecl(MapDecl),
7444 MapExpr(MapExpr), BP(BP), IsNonContiguous(IsNonContiguous),
7445 DimSize(DimSize), LB(LB) {}
7448 const OMPClauseMappableExprCommon::MappableComponent &MC,
7449 const FieldDecl *FD,
7450 llvm::function_ref<LValue(CodeGenFunction &,
const MemberExpr *)>
7451 EmitMemberExprBase) {
7461 LValue BaseLVal = EmitMemberExprBase(CGF, ME);
7473 copyUntilField(FD, ComponentLB);
7476 if (((int64_t)FieldOffset - (int64_t)Cursor) > 0)
7477 copyUntilField(FD, ComponentLB);
7479 Cursor = FieldOffset + FieldSize;
7484 void copyUntilField(
const FieldDecl *FD, Address ComponentLB) {
7488 CGF.
Builder.CreatePtrDiff(CGF.
Int8Ty, ComponentLBPtr, LBPtr);
7489 copySizedChunk(LBPtr, Size);
7492 void copyUntilEnd(Address HB) {
7494 const ASTRecordLayout &RL =
7503 copySizedChunk(LBPtr, Size);
7506 void copySizedChunk(llvm::Value *Base, llvm::Value *Size) {
7507 CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
7509 CombinedInfo.DevicePtrDecls.push_back(
nullptr);
7510 CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
7511 CombinedInfo.Pointers.push_back(Base);
7512 CombinedInfo.Sizes.push_back(
7514 CombinedInfo.Types.push_back(Flags);
7515 CombinedInfo.Mappers.push_back(
nullptr);
7516 CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize : 1);
7525 void generateInfoForComponentList(
7527 ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
7529 MapCombinedInfoTy &CombinedInfo,
7530 MapCombinedInfoTy &StructBaseCombinedInfo,
7531 StructRangeInfoTy &PartialStruct,
bool IsFirstComponentList,
7532 bool IsImplicit,
bool GenerateAllInfoForClauses,
7533 const ValueDecl *Mapper =
nullptr,
bool ForDeviceAddr =
false,
7534 const ValueDecl *BaseDecl =
nullptr,
const Expr *MapExpr =
nullptr,
7535 ArrayRef<OMPClauseMappableExprCommon::MappableExprComponentListRef>
7536 OverlappedElements = {},
7537 bool AreBothBasePtrAndPteeMapped =
false)
const {
7719 bool IsCaptureFirstInfo = IsFirstComponentList;
7723 bool RequiresReference =
false;
7726 auto CI = Components.rbegin();
7727 auto CE = Components.rend();
7732 bool IsExpressionFirstInfo =
true;
7733 bool FirstPointerInComplexData =
false;
7735 const Expr *AssocExpr = I->getAssociatedExpression();
7736 const auto *AE = dyn_cast<ArraySubscriptExpr>(AssocExpr);
7737 const auto *OASE = dyn_cast<ArraySectionExpr>(AssocExpr);
7738 const auto *OAShE = dyn_cast<OMPArrayShapingExpr>(AssocExpr);
7740 if (AreBothBasePtrAndPteeMapped && std::next(I) == CE)
7746 }
else if ((AE &&
isa<CXXThisExpr>(AE->getBase()->IgnoreParenImpCasts())) ||
7760 if (
const auto *VD =
7761 dyn_cast_or_null<VarDecl>(I->getAssociatedDeclaration())) {
7762 if (std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
7763 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
7764 if ((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
7765 ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
7766 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
7768 RequiresReference =
true;
7778 I->getAssociatedDeclaration()->
getType().getNonReferenceType();
7783 const auto *VD = dyn_cast<VarDecl>(I->getAssociatedDeclaration());
7784 if (!AreBothBasePtrAndPteeMapped &&
7786 !VD || VD->hasLocalStorage()))
7789 FirstPointerInComplexData =
true;
7808 bool ShouldBeMemberOf =
false;
7817 const MemberExpr *EncounteredME =
nullptr;
7829 bool IsNonContiguous =
7830 CombinedInfo.NonContigInfo.IsNonContiguous ||
7831 any_of(Components, [&](
const auto &Component) {
7833 dyn_cast<ArraySectionExpr>(Component.getAssociatedExpression());
7837 const Expr *StrideExpr = OASE->getStride();
7841 const auto Constant =
7846 return !Constant->isOne();
7849 bool IsPrevMemberReference =
false;
7851 bool IsPartialMapped =
7852 !PartialStruct.PreliminaryMapData.BasePointers.empty();
7859 bool IsMappingWholeStruct =
true;
7860 if (!GenerateAllInfoForClauses) {
7861 IsMappingWholeStruct =
false;
7863 for (
auto TempI = I; TempI != CE; ++TempI) {
7864 const MemberExpr *PossibleME =
7865 dyn_cast<MemberExpr>(TempI->getAssociatedExpression());
7867 IsMappingWholeStruct =
false;
7873 for (; I != CE; ++I) {
7875 if (!EncounteredME) {
7876 EncounteredME = dyn_cast<MemberExpr>(I->getAssociatedExpression());
7879 if (EncounteredME) {
7880 ShouldBeMemberOf =
true;
7883 if (FirstPointerInComplexData) {
7884 QualType Ty = std::prev(I)
7885 ->getAssociatedDeclaration()
7887 .getNonReferenceType();
7889 FirstPointerInComplexData =
false;
7894 auto Next = std::next(I);
7904 bool IsFinalArraySection =
7906 isFinalArraySectionExpression(I->getAssociatedExpression());
7910 const ValueDecl *MapDecl = (I->getAssociatedDeclaration())
7911 ? I->getAssociatedDeclaration()
7913 MapExpr = (I->getAssociatedExpression()) ? I->getAssociatedExpression()
7920 dyn_cast<ArraySectionExpr>(I->getAssociatedExpression());
7922 dyn_cast<OMPArrayShapingExpr>(I->getAssociatedExpression());
7923 const auto *UO = dyn_cast<UnaryOperator>(I->getAssociatedExpression());
7924 const auto *BO = dyn_cast<BinaryOperator>(I->getAssociatedExpression());
7930 I->getAssociatedExpression()->getType()->isAnyPointerType();
7931 bool IsMemberReference =
isa<MemberExpr>(I->getAssociatedExpression()) &&
7934 bool IsNonDerefPointer = IsPointer &&
7935 !(UO && UO->getOpcode() != UO_Deref) && !BO &&
7941 if (
Next == CE || IsMemberReference || IsNonDerefPointer ||
7942 IsFinalArraySection) {
7945 assert((
Next == CE ||
7952 "Unexpected expression");
7956 auto &&EmitMemberExprBase = [](CodeGenFunction &CGF,
7957 const MemberExpr *E) {
7958 const Expr *BaseExpr = E->getBase();
7963 LValueBaseInfo BaseInfo;
7964 TBAAAccessInfo TBAAInfo;
7978 OAShE->getBase()->getType()->getPointeeType()),
7980 OAShE->getBase()->getType()));
7981 }
else if (IsMemberReference) {
7983 LValue BaseLVal = EmitMemberExprBase(CGF, ME);
7998 bool IsMemberPointerOrAddr =
8000 (((IsPointer || ForDeviceAddr) &&
8001 I->getAssociatedExpression() == EncounteredME) ||
8002 (IsPrevMemberReference && !IsPointer) ||
8003 (IsMemberReference &&
Next != CE &&
8004 !
Next->getAssociatedExpression()->getType()->isPointerType()));
8005 if (!OverlappedElements.empty() &&
Next == CE) {
8007 assert(!PartialStruct.Base.isValid() &&
"The base element is set.");
8008 assert(!IsPointer &&
8009 "Unexpected base element with the pointer type.");
8012 PartialStruct.LowestElem = {0, LowestElem};
8014 I->getAssociatedExpression()->getType());
8019 PartialStruct.HighestElem = {
8020 std::numeric_limits<
decltype(
8021 PartialStruct.HighestElem.first)>
::max(),
8023 PartialStruct.Base = BP;
8024 PartialStruct.LB = LB;
8026 PartialStruct.PreliminaryMapData.BasePointers.empty() &&
8027 "Overlapped elements must be used only once for the variable.");
8028 std::swap(PartialStruct.PreliminaryMapData, CombinedInfo);
8030 OpenMPOffloadMappingFlags Flags =
8031 OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF |
8032 getMapTypeBits(MapType, MapModifiers, MotionModifiers, IsImplicit,
8034 false, IsNonContiguous);
8035 CopyOverlappedEntryGaps CopyGaps(CGF, CombinedInfo, Flags, MapDecl,
8036 MapExpr, BP, LB, IsNonContiguous,
8040 Component : OverlappedElements) {
8041 for (
const OMPClauseMappableExprCommon::MappableComponent &MC :
8044 if (
const auto *FD = dyn_cast<FieldDecl>(VD)) {
8045 CopyGaps.processField(MC, FD, EmitMemberExprBase);
8050 CopyGaps.copyUntilEnd(HB);
8053 llvm::Value *
Size = getExprTypeSize(I->getAssociatedExpression());
8060 if ((!IsMemberPointerOrAddr && !IsPartialMapped) ||
8062 if (!IsMappingWholeStruct) {
8063 CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
8065 CombinedInfo.DevicePtrDecls.push_back(
nullptr);
8066 CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
8068 CombinedInfo.Sizes.push_back(CGF.
Builder.CreateIntCast(
8070 CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize
8073 StructBaseCombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
8074 StructBaseCombinedInfo.BasePointers.push_back(
8076 StructBaseCombinedInfo.DevicePtrDecls.push_back(
nullptr);
8077 StructBaseCombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
8078 StructBaseCombinedInfo.Pointers.push_back(LB.
emitRawPointer(CGF));
8079 StructBaseCombinedInfo.Sizes.push_back(CGF.
Builder.CreateIntCast(
8081 StructBaseCombinedInfo.NonContigInfo.Dims.push_back(
8082 IsNonContiguous ? DimSize : 1);
8086 bool HasMapper = Mapper &&
Next == CE;
8087 if (!IsMappingWholeStruct)
8088 CombinedInfo.Mappers.push_back(HasMapper ? Mapper :
nullptr);
8090 StructBaseCombinedInfo.Mappers.push_back(HasMapper ? Mapper
8097 OpenMPOffloadMappingFlags Flags =
8098 getMapTypeBits(MapType, MapModifiers, MotionModifiers, IsImplicit,
8099 !IsExpressionFirstInfo || RequiresReference ||
8100 FirstPointerInComplexData || IsMemberReference,
8101 AreBothBasePtrAndPteeMapped ||
8102 (IsCaptureFirstInfo && !RequiresReference),
8105 if (!IsExpressionFirstInfo || IsMemberReference) {
8108 if (IsPointer || (IsMemberReference &&
Next != CE))
8109 Flags &= ~(OpenMPOffloadMappingFlags::OMP_MAP_TO |
8110 OpenMPOffloadMappingFlags::OMP_MAP_FROM |
8111 OpenMPOffloadMappingFlags::OMP_MAP_ALWAYS |
8112 OpenMPOffloadMappingFlags::OMP_MAP_DELETE |
8113 OpenMPOffloadMappingFlags::OMP_MAP_CLOSE);
8115 if (ShouldBeMemberOf) {
8118 Flags |= OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF;
8121 ShouldBeMemberOf =
false;
8125 if (!IsMappingWholeStruct)
8126 CombinedInfo.Types.push_back(Flags);
8128 StructBaseCombinedInfo.Types.push_back(Flags);
8134 if (EncounteredME) {
8139 if (!PartialStruct.Base.isValid()) {
8140 PartialStruct.LowestElem = {FieldIndex, LowestElem};
8141 if (IsFinalArraySection && OASE) {
8145 PartialStruct.HighestElem = {FieldIndex, HB};
8147 PartialStruct.HighestElem = {FieldIndex, LowestElem};
8149 PartialStruct.Base = BP;
8150 PartialStruct.LB = BP;
8151 }
else if (FieldIndex < PartialStruct.LowestElem.first) {
8152 PartialStruct.LowestElem = {FieldIndex, LowestElem};
8153 }
else if (FieldIndex > PartialStruct.HighestElem.first) {
8154 if (IsFinalArraySection && OASE) {
8158 PartialStruct.HighestElem = {FieldIndex, HB};
8160 PartialStruct.HighestElem = {FieldIndex, LowestElem};
8166 if (IsFinalArraySection || IsNonContiguous)
8167 PartialStruct.IsArraySection =
true;
8170 if (IsFinalArraySection)
8175 BP = IsMemberReference ? LowestElem : LB;
8176 if (!IsPartialMapped)
8177 IsExpressionFirstInfo =
false;
8178 IsCaptureFirstInfo =
false;
8179 FirstPointerInComplexData =
false;
8180 IsPrevMemberReference = IsMemberReference;
8181 }
else if (FirstPointerInComplexData) {
8182 QualType Ty = Components.rbegin()
8183 ->getAssociatedDeclaration()
8185 .getNonReferenceType();
8187 FirstPointerInComplexData =
false;
8193 PartialStruct.HasCompleteRecord =
true;
8195 if (!IsNonContiguous)
8198 const ASTContext &Context = CGF.
getContext();
8202 MapValuesArrayTy CurOffsets = {llvm::ConstantInt::get(CGF.
CGM.
Int64Ty, 0)};
8203 MapValuesArrayTy CurCounts = {llvm::ConstantInt::get(CGF.
CGM.
Int64Ty, 1)};
8204 MapValuesArrayTy CurStrides;
8205 MapValuesArrayTy DimSizes{llvm::ConstantInt::get(CGF.
CGM.
Int64Ty, 1)};
8211 for (
const OMPClauseMappableExprCommon::MappableComponent &Component :
8213 const Expr *AssocExpr = Component.getAssociatedExpression();
8214 const auto *OASE = dyn_cast<ArraySectionExpr>(AssocExpr);
8224 assert((VAT || CAT || &Component == &*Components.begin()) &&
8225 "Should be either ConstantArray or VariableArray if not the "
8229 if (CurStrides.empty()) {
8230 const Type *ElementType =
nullptr;
8232 ElementType = CAT->getElementType().getTypePtr();
8234 ElementType = VAT->getElementType().getTypePtr();
8236 assert(&Component == &*Components.begin() &&
8237 "Only expect pointer (non CAT or VAT) when this is the "
8245 if (&Component != &*Components.begin())
8249 CurStrides.push_back(
8250 llvm::ConstantInt::get(CGF.
Int64Ty, ElementTypeSize));
8255 if (DimSizes.size() < Components.size() - 1) {
8258 llvm::ConstantInt::get(CGF.
Int64Ty, CAT->getZExtSize()));
8260 DimSizes.push_back(CGF.
Builder.CreateIntCast(
8267 auto *DI = DimSizes.begin() + 1;
8269 llvm::Value *DimProd =
8270 llvm::ConstantInt::get(CGF.
CGM.
Int64Ty, ElementTypeSize);
8279 for (
const OMPClauseMappableExprCommon::MappableComponent &Component :
8281 const Expr *AssocExpr = Component.getAssociatedExpression();
8283 if (
const auto *AE = dyn_cast<ArraySubscriptExpr>(AssocExpr)) {
8284 llvm::Value *Offset = CGF.
Builder.CreateIntCast(
8287 CurOffsets.push_back(Offset);
8288 CurCounts.push_back(llvm::ConstantInt::get(CGF.
Int64Ty, 1));
8289 CurStrides.push_back(CurStrides.back());
8293 const auto *OASE = dyn_cast<ArraySectionExpr>(AssocExpr);
8299 const Expr *OffsetExpr = OASE->getLowerBound();
8300 llvm::Value *Offset =
nullptr;
8303 Offset = llvm::ConstantInt::get(CGF.
Int64Ty, 0);
8309 CurOffsets.push_back(Offset);
8312 const Expr *CountExpr = OASE->getLength();
8313 llvm::Value *Count =
nullptr;
8319 if (!OASE->getColonLocFirst().isValid() &&
8320 !OASE->getColonLocSecond().isValid()) {
8321 Count = llvm::ConstantInt::get(CGF.
Int64Ty, 1);
8327 const Expr *StrideExpr = OASE->getStride();
8328 llvm::Value *Stride =
8334 Count = CGF.
Builder.CreateUDiv(
8335 CGF.
Builder.CreateNUWSub(*DI, Offset), Stride);
8337 Count = CGF.
Builder.CreateNUWSub(*DI, Offset);
8343 CurCounts.push_back(Count);
8352 const Expr *StrideExpr = OASE->getStride();
8353 llvm::Value *Stride =
8358 DimProd = CGF.
Builder.CreateNUWMul(DimProd, *(DI - 1));
8360 CurStrides.push_back(CGF.
Builder.CreateNUWMul(DimProd, Stride));
8362 CurStrides.push_back(DimProd);
8363 if (DI != DimSizes.end())
8367 CombinedInfo.NonContigInfo.Offsets.push_back(CurOffsets);
8368 CombinedInfo.NonContigInfo.Counts.push_back(CurCounts);
8369 CombinedInfo.NonContigInfo.Strides.push_back(CurStrides);
8375 OpenMPOffloadMappingFlags
8376 getMapModifiersForPrivateClauses(
const CapturedStmt::Capture &Cap)
const {
8384 return OpenMPOffloadMappingFlags::OMP_MAP_TO |
8385 OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ;
8386 return OpenMPOffloadMappingFlags::OMP_MAP_PRIVATE |
8387 OpenMPOffloadMappingFlags::OMP_MAP_TO;
8390 if (I != LambdasMap.end())
8392 return getMapTypeBits(
8393 I->getSecond()->getMapType(), I->getSecond()->getMapTypeModifiers(),
8394 {}, I->getSecond()->isImplicit(),
8398 return OpenMPOffloadMappingFlags::OMP_MAP_TO |
8399 OpenMPOffloadMappingFlags::OMP_MAP_FROM;
8402 void getPlainLayout(
const CXXRecordDecl *RD,
8403 llvm::SmallVectorImpl<const FieldDecl *> &Layout,
8404 bool AsBase)
const {
8407 llvm::StructType *St =
8410 unsigned NumElements = St->getNumElements();
8412 llvm::PointerUnion<const CXXRecordDecl *, const FieldDecl *>, 4>
8413 RecordLayout(NumElements);
8416 for (
const auto &I : RD->
bases()) {
8420 QualType BaseTy = I.getType();
8431 RecordLayout[FieldIndex] =
Base;
8434 for (
const auto &I : RD->
vbases()) {
8435 QualType BaseTy = I.getType();
8442 if (RecordLayout[FieldIndex])
8444 RecordLayout[FieldIndex] =
Base;
8447 assert(!RD->
isUnion() &&
"Unexpected union.");
8448 for (
const auto *Field : RD->
fields()) {
8451 if (!
Field->isBitField() &&
8454 RecordLayout[FieldIndex] =
Field;
8457 for (
const llvm::PointerUnion<const CXXRecordDecl *, const FieldDecl *>
8458 &
Data : RecordLayout) {
8461 if (
const auto *Base = dyn_cast<const CXXRecordDecl *>(
Data))
8462 getPlainLayout(Base, Layout,
true);
8469 static Address getAttachPtrAddr(
const Expr *PointerExpr,
8470 CodeGenFunction &CGF) {
8471 assert(PointerExpr &&
"Cannot get addr from null attach-ptr expr");
8474 if (
auto *DRE = dyn_cast<DeclRefExpr>(PointerExpr)) {
8477 }
else if (
auto *OASE = dyn_cast<ArraySectionExpr>(PointerExpr)) {
8480 }
else if (
auto *ASE = dyn_cast<ArraySubscriptExpr>(PointerExpr)) {
8482 }
else if (
auto *ME = dyn_cast<MemberExpr>(PointerExpr)) {
8484 }
else if (
auto *UO = dyn_cast<UnaryOperator>(PointerExpr)) {
8485 assert(UO->getOpcode() == UO_Deref &&
8486 "Unexpected unary-operator on attach-ptr-expr");
8489 assert(AttachPtrAddr.
isValid() &&
8490 "Failed to get address for attach pointer expression");
8491 return AttachPtrAddr;
8498 static std::pair<Address, Address>
8499 getAttachPtrAddrAndPteeBaseAddr(
const Expr *AttachPtrExpr,
8500 CodeGenFunction &CGF) {
8505 Address AttachPtrAddr = getAttachPtrAddr(AttachPtrExpr, CGF);
8506 assert(AttachPtrAddr.
isValid() &&
"Invalid attach pointer addr");
8508 QualType AttachPtrType =
8513 AttachPtrAddr, AttachPtrType->
castAs<PointerType>());
8514 assert(AttachPteeBaseAddr.
isValid() &&
"Invalid attach pointee base addr");
8516 return {AttachPtrAddr, AttachPteeBaseAddr};
8522 shouldEmitAttachEntry(
const Expr *PointerExpr,
const ValueDecl *MapBaseDecl,
8523 CodeGenFunction &CGF,
8524 llvm::PointerUnion<
const OMPExecutableDirective *,
8525 const OMPDeclareMapperDecl *>
8535 ->getDirectiveKind());
8544 void collectAttachPtrExprInfo(
8546 llvm::PointerUnion<
const OMPExecutableDirective *,
8547 const OMPDeclareMapperDecl *>
8552 ? OMPD_declare_mapper
8555 const auto &[AttachPtrExpr, Depth] =
8559 AttachPtrComputationOrderMap.try_emplace(
8560 AttachPtrExpr, AttachPtrComputationOrderMap.size());
8561 AttachPtrComponentDepthMap.try_emplace(AttachPtrExpr, Depth);
8562 AttachPtrExprMap.try_emplace(Components, AttachPtrExpr);
8570 void generateAllInfoForClauses(
8571 ArrayRef<const OMPClause *> Clauses, MapCombinedInfoTy &CombinedInfo,
8572 llvm::OpenMPIRBuilder &OMPBuilder,
8573 const llvm::DenseSet<CanonicalDeclPtr<const Decl>> &SkipVarSet =
8574 llvm::DenseSet<CanonicalDeclPtr<const Decl>>())
const {
8579 llvm::MapVector<CanonicalDeclPtr<const Decl>,
8580 SmallVector<SmallVector<MapInfo, 8>, 4>>
8586 [&Info, &SkipVarSet](
8587 const ValueDecl *D, MapKind
Kind,
8590 ArrayRef<OpenMPMapModifierKind> MapModifiers,
8591 ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
8592 bool ReturnDevicePointer,
bool IsImplicit,
const ValueDecl *Mapper,
8593 const Expr *VarRef =
nullptr,
bool ForDeviceAddr =
false) {
8594 if (SkipVarSet.contains(D))
8596 auto It = Info.try_emplace(D, Total).first;
8597 It->second[
Kind].emplace_back(
8598 L, MapType, MapModifiers, MotionModifiers, ReturnDevicePointer,
8599 IsImplicit, Mapper, VarRef, ForDeviceAddr);
8602 for (
const auto *
Cl : Clauses) {
8603 const auto *
C = dyn_cast<OMPMapClause>(
Cl);
8607 if (llvm::is_contained(
C->getMapTypeModifiers(),
8608 OMPC_MAP_MODIFIER_present))
8610 else if (
C->getMapType() == OMPC_MAP_alloc)
8612 const auto *EI =
C->getVarRefs().begin();
8613 for (
const auto L :
C->component_lists()) {
8614 const Expr *E = (
C->getMapLoc().isValid()) ? *EI :
nullptr;
8615 InfoGen(std::get<0>(L), Kind, std::get<1>(L),
C->getMapType(),
8616 C->getMapTypeModifiers(), {},
8617 false,
C->isImplicit(), std::get<2>(L),
8622 for (
const auto *
Cl : Clauses) {
8623 const auto *
C = dyn_cast<OMPToClause>(
Cl);
8627 if (llvm::is_contained(
C->getMotionModifiers(),
8628 OMPC_MOTION_MODIFIER_present))
8630 const auto *EI =
C->getVarRefs().begin();
8631 for (
const auto L :
C->component_lists()) {
8632 InfoGen(std::get<0>(L), Kind, std::get<1>(L), OMPC_MAP_to, {},
8633 C->getMotionModifiers(),
false,
8634 C->isImplicit(), std::get<2>(L), *EI);
8638 for (
const auto *
Cl : Clauses) {
8639 const auto *
C = dyn_cast<OMPFromClause>(
Cl);
8643 if (llvm::is_contained(
C->getMotionModifiers(),
8644 OMPC_MOTION_MODIFIER_present))
8646 const auto *EI =
C->getVarRefs().begin();
8647 for (
const auto L :
C->component_lists()) {
8648 InfoGen(std::get<0>(L), Kind, std::get<1>(L), OMPC_MAP_from, {},
8649 C->getMotionModifiers(),
8650 false,
C->isImplicit(), std::get<2>(L),
8663 llvm::MapVector<CanonicalDeclPtr<const Decl>,
8664 SmallVector<DeferredDevicePtrEntryTy, 4>>
8666 MapCombinedInfoTy UseDeviceDataCombinedInfo;
8668 auto &&UseDeviceDataCombinedInfoGen =
8669 [&UseDeviceDataCombinedInfo](
const ValueDecl *VD, llvm::Value *Ptr,
8670 CodeGenFunction &CGF,
bool IsDevAddr) {
8671 UseDeviceDataCombinedInfo.Exprs.push_back(VD);
8672 UseDeviceDataCombinedInfo.BasePointers.emplace_back(Ptr);
8673 UseDeviceDataCombinedInfo.DevicePtrDecls.emplace_back(VD);
8674 UseDeviceDataCombinedInfo.DevicePointers.emplace_back(
8675 IsDevAddr ? DeviceInfoTy::Address : DeviceInfoTy::Pointer);
8676 UseDeviceDataCombinedInfo.Pointers.push_back(Ptr);
8677 UseDeviceDataCombinedInfo.Sizes.push_back(
8678 llvm::Constant::getNullValue(CGF.Int64Ty));
8679 UseDeviceDataCombinedInfo.Types.push_back(
8680 OpenMPOffloadMappingFlags::OMP_MAP_RETURN_PARAM);
8681 UseDeviceDataCombinedInfo.Mappers.push_back(
nullptr);
8685 [&DeferredInfo, &UseDeviceDataCombinedInfoGen,
8686 &InfoGen](CodeGenFunction &CGF,
const Expr *IE,
const ValueDecl *VD,
8689 bool IsImplicit,
bool IsDevAddr) {
8702 false, IsImplicit,
nullptr,
nullptr,
8704 DeferredInfo[
nullptr].emplace_back(IE, VD, IsDevAddr);
8708 if (IE->isGLValue())
8709 Ptr = CGF.EmitLValue(IE).getPointer(CGF);
8711 Ptr = CGF.EmitScalarExpr(IE);
8713 Ptr = CGF.EmitLoadOfScalar(CGF.EmitLValue(IE), IE->getExprLoc());
8715 UseDeviceDataCombinedInfoGen(VD, Ptr, CGF, IsDevAddr);
8719 auto &&IsMapInfoExist = [&Info](CodeGenFunction &CGF,
const ValueDecl *VD,
8720 const Expr *IE,
bool IsDevAddr) ->
bool {
8728 if (It != Info.end()) {
8730 for (
auto &
Data : It->second) {
8731 auto *CI = llvm::find_if(
Data, [VD](
const MapInfo &MI) {
8732 return MI.Components.back().getAssociatedDeclaration() == VD;
8740 if (CI !=
Data.end()) {
8742 CI->ForDeviceAddr = IsDevAddr;
8743 CI->ReturnDevicePointer =
true;
8747 auto PrevCI = std::next(CI->Components.rbegin());
8748 const auto *VarD = dyn_cast<VarDecl>(VD);
8751 !VD->getType().getNonReferenceType()->isPointerType() ||
8752 PrevCI == CI->Components.rend() ||
8754 VarD->hasLocalStorage()) {
8755 CI->ForDeviceAddr = IsDevAddr;
8756 CI->ReturnDevicePointer =
true;
8774 for (
const auto *
Cl : Clauses) {
8775 const auto *
C = dyn_cast<OMPUseDevicePtrClause>(
Cl);
8778 for (
const auto L :
C->component_lists()) {
8781 assert(!Components.empty() &&
8782 "Not expecting empty list of components!");
8783 const ValueDecl *VD = Components.back().getAssociatedDeclaration();
8785 const Expr *IE = Components.back().getAssociatedExpression();
8786 if (IsMapInfoExist(CGF, VD, IE,
false))
8788 MapInfoGen(CGF, IE, VD, Components,
C->isImplicit(),
8793 llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed;
8794 for (
const auto *
Cl : Clauses) {
8795 const auto *
C = dyn_cast<OMPUseDeviceAddrClause>(
Cl);
8798 for (
const auto L :
C->component_lists()) {
8801 assert(!std::get<1>(L).empty() &&
8802 "Not expecting empty list of components!");
8803 const ValueDecl *VD = std::get<1>(L).back().getAssociatedDeclaration();
8804 if (!Processed.insert(VD).second)
8807 const Expr *IE = std::get<1>(L).back().getAssociatedExpression();
8808 if (IsMapInfoExist(CGF, VD, IE,
true))
8810 MapInfoGen(CGF, IE, VD, Components,
C->isImplicit(),
8815 for (
const auto &
Data : Info) {
8816 StructRangeInfoTy PartialStruct;
8818 MapCombinedInfoTy CurInfo;
8820 MapCombinedInfoTy StructBaseCurInfo;
8822 const ValueDecl *VD = cast_or_null<ValueDecl>(D);
8823 bool HasMapBasePtr =
false;
8824 bool HasMapArraySec =
false;
8826 for (
const auto &M :
Data.second) {
8827 HasMapBasePtr = any_of(M, [](
const MapInfo &L) {
8828 return isa_and_present<DeclRefExpr>(L.VarRef);
8830 HasMapArraySec = any_of(M, [](
const MapInfo &L) {
8831 return isa_and_present<ArraySectionExpr, ArraySubscriptExpr>(
8834 if (HasMapBasePtr && HasMapArraySec)
8838 for (
const auto &M :
Data.second) {
8839 for (
const MapInfo &L : M) {
8840 assert(!L.Components.empty() &&
8841 "Not expecting declaration with no component lists.");
8844 unsigned CurrentBasePointersIdx = CurInfo.BasePointers.size();
8845 unsigned StructBasePointersIdx =
8846 StructBaseCurInfo.BasePointers.size();
8847 CurInfo.NonContigInfo.IsNonContiguous =
8848 L.Components.back().isNonContiguous();
8849 generateInfoForComponentList(
8850 L.MapType, L.MapModifiers, L.MotionModifiers, L.Components,
8851 CurInfo, StructBaseCurInfo, PartialStruct,
8852 false, L.IsImplicit,
8853 true, L.Mapper, L.ForDeviceAddr, VD,
8855 HasMapBasePtr && HasMapArraySec);
8859 if (L.ReturnDevicePointer) {
8863 assert((CurrentBasePointersIdx < CurInfo.BasePointers.size() ||
8864 StructBasePointersIdx <
8865 StructBaseCurInfo.BasePointers.size()) &&
8866 "Unexpected number of mapped base pointers.");
8869 const ValueDecl *RelevantVD =
8870 L.Components.back().getAssociatedDeclaration();
8871 assert(RelevantVD &&
8872 "No relevant declaration related with device pointer??");
8879 if (StructBasePointersIdx < StructBaseCurInfo.BasePointers.size()) {
8880 StructBaseCurInfo.DevicePtrDecls[StructBasePointersIdx] =
8882 StructBaseCurInfo.DevicePointers[StructBasePointersIdx] =
8883 L.ForDeviceAddr ? DeviceInfoTy::Address
8884 : DeviceInfoTy::Pointer;
8885 StructBaseCurInfo.Types[StructBasePointersIdx] |=
8886 OpenMPOffloadMappingFlags::OMP_MAP_RETURN_PARAM;
8888 CurInfo.DevicePtrDecls[CurrentBasePointersIdx] = RelevantVD;
8889 CurInfo.DevicePointers[CurrentBasePointersIdx] =
8890 L.ForDeviceAddr ? DeviceInfoTy::Address
8891 : DeviceInfoTy::Pointer;
8892 CurInfo.Types[CurrentBasePointersIdx] |=
8893 OpenMPOffloadMappingFlags::OMP_MAP_RETURN_PARAM;
8901 auto CI = DeferredInfo.find(
Data.first);
8902 if (CI != DeferredInfo.end()) {
8903 for (
const DeferredDevicePtrEntryTy &L : CI->second) {
8904 llvm::Value *BasePtr;
8906 if (L.ForDeviceAddr) {
8907 if (L.IE->isGLValue())
8915 CurInfo.Types.push_back(
8916 OpenMPOffloadMappingFlags::OMP_MAP_RETURN_PARAM |
8917 OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF);
8921 L.IE->getExprLoc());
8925 CurInfo.Types.push_back(
8926 OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ |
8927 OpenMPOffloadMappingFlags::OMP_MAP_RETURN_PARAM |
8928 OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF);
8930 CurInfo.Exprs.push_back(L.VD);
8931 CurInfo.BasePointers.emplace_back(BasePtr);
8932 CurInfo.DevicePtrDecls.emplace_back(L.VD);
8933 CurInfo.DevicePointers.emplace_back(
8934 L.ForDeviceAddr ? DeviceInfoTy::Address : DeviceInfoTy::Pointer);
8935 CurInfo.Pointers.push_back(Ptr);
8936 CurInfo.Sizes.push_back(
8937 llvm::Constant::getNullValue(this->CGF.
Int64Ty));
8938 CurInfo.Mappers.push_back(
nullptr);
8944 MapCombinedInfoTy UnionCurInfo;
8945 UnionCurInfo.append(StructBaseCurInfo);
8946 UnionCurInfo.append(CurInfo);
8950 if (PartialStruct.Base.isValid()) {
8951 UnionCurInfo.NonContigInfo.Dims.push_back(0);
8953 emitCombinedEntry(CombinedInfo, UnionCurInfo.Types, PartialStruct,
8954 !VD, OMPBuilder, VD);
8958 CombinedInfo.append(UnionCurInfo);
8961 CombinedInfo.append(UseDeviceDataCombinedInfo);
8965 MappableExprsHandler(
const OMPExecutableDirective &Dir, CodeGenFunction &CGF)
8966 : CurDir(&Dir), CGF(CGF) {
8968 for (
const auto *
C : Dir.getClausesOfKind<OMPFirstprivateClause>())
8969 for (
const auto *D :
C->varlist())
8970 FirstPrivateDecls.try_emplace(
8973 for (
const auto *
C : Dir.getClausesOfKind<OMPUsesAllocatorsClause>()) {
8974 for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) {
8975 OMPUsesAllocatorsClause::Data D = C->getAllocatorData(I);
8976 if (const auto *DRE = dyn_cast_or_null<DeclRefExpr>(D.AllocatorTraits))
8977 FirstPrivateDecls.try_emplace(cast<VarDecl>(DRE->getDecl()),
8979 else if (const auto *VD = dyn_cast<VarDecl>(
8980 cast<DeclRefExpr>(D.Allocator->IgnoreParenImpCasts())
8982 FirstPrivateDecls.try_emplace(VD, true);
8986 for (
const auto *
C : Dir.getClausesOfKind<OMPIsDevicePtrClause>())
8987 for (
auto L :
C->component_lists())
8988 DevPointersMap[std::get<0>(L)].push_back(std::get<1>(L));
8990 for (
const auto *
C : Dir.getClausesOfKind<OMPHasDeviceAddrClause>())
8991 for (
auto L :
C->component_lists())
8992 HasDevAddrsMap[std::get<0>(L)].push_back(std::get<1>(L));
8994 for (
const auto *
C : Dir.getClausesOfKind<OMPMapClause>()) {
8995 if (C->getMapType() != OMPC_MAP_to)
8997 for (auto L : C->component_lists()) {
8998 const ValueDecl *VD = std::get<0>(L);
8999 const auto *RD = VD ? VD->getType()
9001 .getNonReferenceType()
9002 ->getAsCXXRecordDecl()
9004 if (RD && RD->isLambda())
9005 LambdasMap.try_emplace(std::get<0>(L), C);
9011 MappableExprsHandler(
const OMPDeclareMapperDecl &Dir,
CodeGenFunction &CGF)
9012 : CurDir(&Dir), CGF(CGF) {}
9017 void emitCombinedEntry(MapCombinedInfoTy &CombinedInfo,
9018 MapFlagsArrayTy &CurTypes,
9019 const StructRangeInfoTy &PartialStruct,
bool IsMapThis,
9020 llvm::OpenMPIRBuilder &OMPBuilder,
9021 const ValueDecl *VD =
nullptr,
9022 unsigned OffsetForMemberOfFlag = 0,
9023 bool NotTargetParams =
true)
const {
9024 if (CurTypes.size() == 1 &&
9025 ((CurTypes.back() & OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF) !=
9026 OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF) &&
9027 !PartialStruct.IsArraySection)
9029 Address LBAddr = PartialStruct.LowestElem.second;
9030 Address HBAddr = PartialStruct.HighestElem.second;
9031 if (PartialStruct.HasCompleteRecord) {
9032 LBAddr = PartialStruct.LB;
9033 HBAddr = PartialStruct.LB;
9035 CombinedInfo.Exprs.push_back(VD);
9037 CombinedInfo.BasePointers.push_back(PartialStruct.Base.emitRawPointer(CGF));
9038 CombinedInfo.DevicePtrDecls.push_back(
nullptr);
9039 CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
9042 const CXXMethodDecl *MD =
9044 const CXXRecordDecl *RD = MD ? MD->
getParent() :
nullptr;
9045 bool HasBaseClass = RD && IsMapThis ? RD->
getNumBases() > 0 :
false;
9055 CombinedInfo.Pointers.push_back(PartialStruct.Base.emitRawPointer(CGF));
9060 CombinedInfo.Sizes.push_back(Size);
9062 CombinedInfo.Pointers.push_back(LB);
9065 llvm::Value *HAddr = CGF.
Builder.CreateConstGEP1_32(
9069 llvm::Value *Diff = CGF.
Builder.CreatePtrDiff(CGF.
Int8Ty, CHAddr, CLAddr);
9072 CombinedInfo.Sizes.push_back(Size);
9074 CombinedInfo.Mappers.push_back(
nullptr);
9076 CombinedInfo.Types.push_back(
9077 NotTargetParams ? OpenMPOffloadMappingFlags::OMP_MAP_NONE
9078 : !PartialStruct.PreliminaryMapData.BasePointers.empty()
9079 ? OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ
9080 : OpenMPOffloadMappingFlags::OMP_MAP_TARGET_PARAM);
9083 if (CurTypes.end() !=
9084 llvm::find_if(CurTypes, [](OpenMPOffloadMappingFlags
Type) {
9085 return static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
9086 Type & OpenMPOffloadMappingFlags::OMP_MAP_PRESENT);
9088 CombinedInfo.Types.back() |= OpenMPOffloadMappingFlags::OMP_MAP_PRESENT;
9090 (*CurTypes.begin()) &= ~OpenMPOffloadMappingFlags::OMP_MAP_TARGET_PARAM;
9097 if (CurTypes.end() !=
9098 llvm::find_if(CurTypes, [](OpenMPOffloadMappingFlags
Type) {
9099 return static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
9100 Type & OpenMPOffloadMappingFlags::OMP_MAP_OMPX_HOLD);
9102 CombinedInfo.Types.back() |= OpenMPOffloadMappingFlags::OMP_MAP_OMPX_HOLD;
9103 for (
auto &M : CurTypes)
9104 M |= OpenMPOffloadMappingFlags::OMP_MAP_OMPX_HOLD;
9110 OpenMPOffloadMappingFlags MemberOfFlag = OMPBuilder.getMemberOfFlag(
9111 OffsetForMemberOfFlag + CombinedInfo.BasePointers.size() - 1);
9112 for (
auto &M : CurTypes)
9113 OMPBuilder.setCorrectMemberOfFlag(M, MemberOfFlag);
9121 void generateAllInfo(
9122 MapCombinedInfoTy &CombinedInfo, llvm::OpenMPIRBuilder &OMPBuilder,
9123 const llvm::DenseSet<CanonicalDeclPtr<const Decl>> &SkipVarSet =
9124 llvm::DenseSet<CanonicalDeclPtr<const Decl>>())
const {
9126 "Expect a executable directive");
9128 generateAllInfoForClauses(CurExecDir->clauses(), CombinedInfo, OMPBuilder,
9135 void generateAllInfoForMapper(MapCombinedInfoTy &CombinedInfo,
9136 llvm::OpenMPIRBuilder &OMPBuilder)
const {
9138 "Expect a declare mapper directive");
9140 generateAllInfoForClauses(CurMapperDir->clauses(), CombinedInfo,
9145 void generateInfoForLambdaCaptures(
9146 const ValueDecl *VD, llvm::Value *Arg, MapCombinedInfoTy &CombinedInfo,
9147 llvm::DenseMap<llvm::Value *, llvm::Value *> &LambdaPointers)
const {
9155 llvm::DenseMap<const ValueDecl *, FieldDecl *> Captures;
9156 FieldDecl *ThisCapture =
nullptr;
9162 LambdaPointers.try_emplace(ThisLVal.getPointer(CGF),
9163 VDLVal.getPointer(CGF));
9164 CombinedInfo.Exprs.push_back(VD);
9165 CombinedInfo.BasePointers.push_back(ThisLVal.getPointer(CGF));
9166 CombinedInfo.DevicePtrDecls.push_back(
nullptr);
9167 CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
9168 CombinedInfo.Pointers.push_back(ThisLValVal.getPointer(CGF));
9169 CombinedInfo.Sizes.push_back(
9172 CombinedInfo.Types.push_back(
9173 OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ |
9174 OpenMPOffloadMappingFlags::OMP_MAP_LITERAL |
9175 OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF |
9176 OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT);
9177 CombinedInfo.Mappers.push_back(
nullptr);
9179 for (
const LambdaCapture &LC : RD->
captures()) {
9180 if (!LC.capturesVariable())
9185 auto It = Captures.find(VD);
9186 assert(It != Captures.end() &&
"Found lambda capture without field.");
9190 LambdaPointers.try_emplace(VarLVal.getPointer(CGF),
9191 VDLVal.getPointer(CGF));
9192 CombinedInfo.Exprs.push_back(VD);
9193 CombinedInfo.BasePointers.push_back(VarLVal.getPointer(CGF));
9194 CombinedInfo.DevicePtrDecls.push_back(
nullptr);
9195 CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
9196 CombinedInfo.Pointers.push_back(VarLValVal.getPointer(CGF));
9197 CombinedInfo.Sizes.push_back(CGF.
Builder.CreateIntCast(
9203 LambdaPointers.try_emplace(VarLVal.getPointer(CGF),
9204 VDLVal.getPointer(CGF));
9205 CombinedInfo.Exprs.push_back(VD);
9206 CombinedInfo.BasePointers.push_back(VarLVal.getPointer(CGF));
9207 CombinedInfo.DevicePtrDecls.push_back(
nullptr);
9208 CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
9209 CombinedInfo.Pointers.push_back(VarRVal.
getScalarVal());
9210 CombinedInfo.Sizes.push_back(llvm::ConstantInt::get(CGF.
Int64Ty, 0));
9212 CombinedInfo.Types.push_back(
9213 OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ |
9214 OpenMPOffloadMappingFlags::OMP_MAP_LITERAL |
9215 OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF |
9216 OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT);
9217 CombinedInfo.Mappers.push_back(
nullptr);
9222 void adjustMemberOfForLambdaCaptures(
9223 llvm::OpenMPIRBuilder &OMPBuilder,
9224 const llvm::DenseMap<llvm::Value *, llvm::Value *> &LambdaPointers,
9225 MapBaseValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers,
9226 MapFlagsArrayTy &Types)
const {
9227 for (
unsigned I = 0, E = Types.size(); I < E; ++I) {
9229 if (Types[I] != (OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ |
9230 OpenMPOffloadMappingFlags::OMP_MAP_LITERAL |
9231 OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF |
9232 OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT))
9234 llvm::Value *BasePtr = LambdaPointers.lookup(BasePointers[I]);
9235 assert(BasePtr &&
"Unable to find base lambda address.");
9237 for (
unsigned J = I; J > 0; --J) {
9238 unsigned Idx = J - 1;
9239 if (Pointers[Idx] != BasePtr)
9244 assert(TgtIdx != -1 &&
"Unable to find parent lambda.");
9248 OpenMPOffloadMappingFlags MemberOfFlag =
9249 OMPBuilder.getMemberOfFlag(TgtIdx);
9250 OMPBuilder.setCorrectMemberOfFlag(Types[I], MemberOfFlag);
9257 void generateInfoForCaptureFromClauseInfo(
9258 const CapturedStmt::Capture *Cap, llvm::Value *Arg,
9259 MapCombinedInfoTy &CurCaptureVarInfo, llvm::OpenMPIRBuilder &OMPBuilder,
9260 unsigned OffsetForMemberOfFlag)
const {
9262 "Not expecting to generate map info for a variable array type!");
9271 if (LambdasMap.count(VD))
9277 if (VD && (DevPointersMap.count(VD) || HasDevAddrsMap.count(VD))) {
9278 CurCaptureVarInfo.Exprs.push_back(VD);
9279 CurCaptureVarInfo.BasePointers.emplace_back(Arg);
9280 CurCaptureVarInfo.DevicePtrDecls.emplace_back(VD);
9281 CurCaptureVarInfo.DevicePointers.emplace_back(DeviceInfoTy::Pointer);
9282 CurCaptureVarInfo.Pointers.push_back(Arg);
9283 CurCaptureVarInfo.Sizes.push_back(CGF.
Builder.CreateIntCast(
9286 CurCaptureVarInfo.Types.push_back(
9287 OpenMPOffloadMappingFlags::OMP_MAP_LITERAL |
9288 OpenMPOffloadMappingFlags::OMP_MAP_TARGET_PARAM);
9289 CurCaptureVarInfo.Mappers.push_back(
nullptr);
9293 MapDataArrayTy DeclComponentLists;
9297 auto It = DevPointersMap.find(VD);
9298 if (It != DevPointersMap.end())
9299 for (
const auto &MCL : It->second)
9300 DeclComponentLists.emplace_back(MCL, OMPC_MAP_to,
Unknown,
9303 auto I = HasDevAddrsMap.find(VD);
9304 if (I != HasDevAddrsMap.end())
9305 for (
const auto &MCL : I->second)
9306 DeclComponentLists.emplace_back(MCL, OMPC_MAP_tofrom,
Unknown,
9310 "Expect a executable directive");
9312 bool HasMapBasePtr =
false;
9313 bool HasMapArraySec =
false;
9314 for (
const auto *
C : CurExecDir->getClausesOfKind<OMPMapClause>()) {
9315 const auto *EI =
C->getVarRefs().begin();
9316 for (
const auto L :
C->decl_component_lists(VD)) {
9317 const ValueDecl *VDecl, *Mapper;
9319 const Expr *E = (
C->getMapLoc().isValid()) ? *EI :
nullptr;
9321 std::tie(VDecl, Components, Mapper) = L;
9322 assert(VDecl == VD &&
"We got information for the wrong declaration??");
9323 assert(!Components.empty() &&
9324 "Not expecting declaration with no component lists.");
9326 HasMapBasePtr =
true;
9329 HasMapArraySec =
true;
9330 DeclComponentLists.emplace_back(Components,
C->getMapType(),
9331 C->getMapTypeModifiers(),
9332 C->isImplicit(), Mapper, E);
9336 llvm::stable_sort(DeclComponentLists, [](
const MapData &LHS,
9337 const MapData &RHS) {
9338 ArrayRef<OpenMPMapModifierKind> MapModifiers = std::get<2>(LHS);
9341 llvm::is_contained(MapModifiers, clang::OMPC_MAP_MODIFIER_present);
9342 bool HasAllocs = MapType == OMPC_MAP_alloc;
9343 MapModifiers = std::get<2>(RHS);
9344 MapType = std::get<1>(LHS);
9346 llvm::is_contained(MapModifiers, clang::OMPC_MAP_MODIFIER_present);
9347 bool HasAllocsR = MapType == OMPC_MAP_alloc;
9348 return (HasPresent && !HasPresentR) || (HasAllocs && !HasAllocsR);
9351 auto GenerateInfoForComponentLists =
9352 [&](ArrayRef<MapData> DeclComponentLists,
9353 bool IsEligibleForTargetParamFlag) {
9354 MapCombinedInfoTy CurInfoForComponentLists;
9355 StructRangeInfoTy PartialStruct;
9357 if (DeclComponentLists.empty())
9360 generateInfoForCaptureFromComponentLists(
9361 VD, DeclComponentLists, CurInfoForComponentLists, PartialStruct,
9362 IsEligibleForTargetParamFlag,
9363 HasMapBasePtr && HasMapArraySec);
9368 if (PartialStruct.Base.isValid()) {
9369 CurCaptureVarInfo.append(PartialStruct.PreliminaryMapData);
9371 CurCaptureVarInfo, CurInfoForComponentLists.Types,
9372 PartialStruct, Cap->
capturesThis(), OMPBuilder,
nullptr,
9373 OffsetForMemberOfFlag,
9374 !IsEligibleForTargetParamFlag);
9378 if (CurInfoForComponentLists.BasePointers.empty())
9381 CurCaptureVarInfo.append(CurInfoForComponentLists);
9384 GenerateInfoForComponentLists(DeclComponentLists,
9391 void generateInfoForCaptureFromComponentLists(
9392 const ValueDecl *VD, ArrayRef<MapData> DeclComponentLists,
9393 MapCombinedInfoTy &CurComponentListInfo, StructRangeInfoTy &PartialStruct,
9394 bool IsListEligibleForTargetParamFlag,
9395 bool AreBothBasePtrAndPteeMapped =
false)
const {
9397 llvm::SmallDenseMap<
9404 for (
const MapData &L : DeclComponentLists) {
9407 ArrayRef<OpenMPMapModifierKind> MapModifiers;
9409 const ValueDecl *Mapper;
9411 std::tie(Components, MapType, MapModifiers, IsImplicit, Mapper, VarRef) =
9414 for (
const MapData &L1 : ArrayRef(DeclComponentLists).slice(Count)) {
9416 std::tie(Components1, MapType, MapModifiers, IsImplicit, Mapper,
9418 auto CI = Components.rbegin();
9419 auto CE = Components.rend();
9420 auto SI = Components1.rbegin();
9421 auto SE = Components1.rend();
9422 for (; CI != CE && SI != SE; ++CI, ++SI) {
9423 if (CI->getAssociatedExpression()->getStmtClass() !=
9424 SI->getAssociatedExpression()->getStmtClass())
9427 if (CI->getAssociatedDeclaration() != SI->getAssociatedDeclaration())
9432 if (CI == CE || SI == SE) {
9434 if (CI == CE && SI == SE)
9436 const auto It = (SI == SE) ? CI : SI;
9443 (std::prev(It)->getAssociatedDeclaration() &&
9445 ->getAssociatedDeclaration()
9447 ->isPointerType()) ||
9448 (It->getAssociatedDeclaration() &&
9449 It->getAssociatedDeclaration()->getType()->isPointerType() &&
9450 std::next(It) != CE && std::next(It) != SE))
9452 const MapData &BaseData = CI == CE ? L : L1;
9454 SI == SE ? Components : Components1;
9455 OverlappedData[&BaseData].push_back(SubData);
9460 llvm::SmallVector<const FieldDecl *, 4> Layout;
9461 if (!OverlappedData.empty()) {
9464 while (BaseType != OrigType) {
9470 getPlainLayout(CRD, Layout,
false);
9476 for (
auto &Pair : OverlappedData) {
9483 auto CI = First.rbegin();
9484 auto CE = First.rend();
9485 auto SI = Second.rbegin();
9486 auto SE = Second.rend();
9487 for (; CI != CE && SI != SE; ++CI, ++SI) {
9488 if (CI->getAssociatedExpression()->getStmtClass() !=
9489 SI->getAssociatedExpression()->getStmtClass())
9492 if (CI->getAssociatedDeclaration() !=
9493 SI->getAssociatedDeclaration())
9498 if (CI == CE && SI == SE)
9502 if (CI == CE || SI == SE)
9507 if (FD1->getParent() == FD2->getParent())
9508 return FD1->getFieldIndex() < FD2->getFieldIndex();
9510 llvm::find_if(Layout, [FD1, FD2](
const FieldDecl *FD) {
9511 return FD == FD1 || FD == FD2;
9519 bool AddTargetParamFlag = IsListEligibleForTargetParamFlag;
9520 MapCombinedInfoTy StructBaseCombinedInfo;
9521 for (
const auto &Pair : OverlappedData) {
9522 const MapData &L = *Pair.getFirst();
9525 ArrayRef<OpenMPMapModifierKind> MapModifiers;
9527 const ValueDecl *Mapper;
9529 std::tie(Components, MapType, MapModifiers, IsImplicit, Mapper, VarRef) =
9531 ArrayRef<OMPClauseMappableExprCommon::MappableExprComponentListRef>
9532 OverlappedComponents = Pair.getSecond();
9533 generateInfoForComponentList(
9534 MapType, MapModifiers, {}, Components, CurComponentListInfo,
9535 StructBaseCombinedInfo, PartialStruct, AddTargetParamFlag, IsImplicit,
9537 false, VD, VarRef, OverlappedComponents);
9538 AddTargetParamFlag =
false;
9541 for (
const MapData &L : DeclComponentLists) {
9544 ArrayRef<OpenMPMapModifierKind> MapModifiers;
9546 const ValueDecl *Mapper;
9548 std::tie(Components, MapType, MapModifiers, IsImplicit, Mapper, VarRef) =
9550 auto It = OverlappedData.find(&L);
9551 if (It == OverlappedData.end())
9552 generateInfoForComponentList(
9553 MapType, MapModifiers, {}, Components, CurComponentListInfo,
9554 StructBaseCombinedInfo, PartialStruct, AddTargetParamFlag,
9555 IsImplicit,
false, Mapper,
9557 {}, AreBothBasePtrAndPteeMapped);
9558 AddTargetParamFlag =
false;
9564 void generateDefaultMapInfo(
const CapturedStmt::Capture &CI,
9565 const FieldDecl &RI, llvm::Value *CV,
9566 MapCombinedInfoTy &CombinedInfo)
const {
9567 bool IsImplicit =
true;
9570 CombinedInfo.Exprs.push_back(
nullptr);
9571 CombinedInfo.BasePointers.push_back(CV);
9572 CombinedInfo.DevicePtrDecls.push_back(
nullptr);
9573 CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
9574 CombinedInfo.Pointers.push_back(CV);
9576 CombinedInfo.Sizes.push_back(
9580 CombinedInfo.Types.push_back(OpenMPOffloadMappingFlags::OMP_MAP_TO |
9581 OpenMPOffloadMappingFlags::OMP_MAP_FROM);
9585 CombinedInfo.BasePointers.push_back(CV);
9586 CombinedInfo.DevicePtrDecls.push_back(
nullptr);
9587 CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
9588 CombinedInfo.Pointers.push_back(CV);
9592 CombinedInfo.Types.push_back(
9593 OpenMPOffloadMappingFlags::OMP_MAP_LITERAL);
9594 CombinedInfo.Sizes.push_back(CGF.
Builder.CreateIntCast(
9599 CombinedInfo.Types.push_back(OpenMPOffloadMappingFlags::OMP_MAP_NONE);
9600 CombinedInfo.Sizes.push_back(llvm::Constant::getNullValue(CGF.
Int64Ty));
9602 auto I = FirstPrivateDecls.find(VD);
9603 if (I != FirstPrivateDecls.end())
9604 IsImplicit = I->getSecond();
9609 CombinedInfo.Sizes.push_back(CGF.
Builder.CreateIntCast(
9614 CombinedInfo.Types.push_back(getMapModifiersForPrivateClauses(CI));
9616 auto I = FirstPrivateDecls.find(VD);
9618 CombinedInfo.BasePointers.push_back(CV);
9619 CombinedInfo.DevicePtrDecls.push_back(
nullptr);
9620 CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
9627 CombinedInfo.Pointers.push_back(CV);
9629 if (I != FirstPrivateDecls.end())
9630 IsImplicit = I->getSecond();
9633 CombinedInfo.Types.back() |=
9634 OpenMPOffloadMappingFlags::OMP_MAP_TARGET_PARAM;
9638 CombinedInfo.Types.back() |= OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT;
9641 CombinedInfo.Mappers.push_back(
nullptr);
9653 dyn_cast<MemberExpr>(OASE->getBase()->IgnoreParenImpCasts()))
9654 return ME->getMemberDecl();
9660static llvm::Constant *
9662 MappableExprsHandler::MappingExprInfo &MapExprs) {
9664 uint32_t SrcLocStrSize;
9665 if (!MapExprs.getMapDecl() && !MapExprs.getMapExpr())
9666 return OMPBuilder.getOrCreateDefaultSrcLocStr(SrcLocStrSize);
9669 if (!MapExprs.getMapDecl() && MapExprs.getMapExpr()) {
9673 Loc = MapExprs.getMapExpr()->getExprLoc();
9675 Loc = MapExprs.getMapDecl()->getLocation();
9678 std::string ExprName;
9679 if (MapExprs.getMapExpr()) {
9681 llvm::raw_string_ostream OS(ExprName);
9682 MapExprs.getMapExpr()->printPretty(OS,
nullptr, P);
9684 ExprName = MapExprs.getMapDecl()->getNameAsString();
9693 return OMPBuilder.getOrCreateSrcLocStr(
FileName, ExprName, PLoc.
getLine(),
9700 CodeGenFunction &CGF, MappableExprsHandler::MapCombinedInfoTy &CombinedInfo,
9702 bool IsNonContiguous =
false,
bool ForEndCall =
false) {
9705 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
9708 InsertPointTy CodeGenIP(CGF.
Builder.GetInsertBlock(),
9709 CGF.
Builder.GetInsertPoint());
9711 auto DeviceAddrCB = [&](
unsigned int I, llvm::Value *NewDecl) {
9712 if (
const ValueDecl *DevVD = CombinedInfo.DevicePtrDecls[I]) {
9717 auto CustomMapperCB = [&](
unsigned int I) {
9718 llvm::Function *MFunc =
nullptr;
9719 if (CombinedInfo.Mappers[I]) {
9720 Info.HasMapper =
true;
9726 cantFail(OMPBuilder.emitOffloadingArraysAndArgs(
9727 AllocaIP, CodeGenIP, Info, Info.RTArgs, CombinedInfo, CustomMapperCB,
9728 IsNonContiguous, ForEndCall, DeviceAddrCB));
9732static const OMPExecutableDirective *
9734 const auto *CS = D.getInnermostCapturedStmt();
9737 const Stmt *ChildStmt =
9740 if (
const auto *NestedDir =
9741 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
9743 switch (D.getDirectiveKind()) {
9749 if (DKind == OMPD_teams) {
9750 Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
9755 if (
const auto *NND =
9756 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
9757 DKind = NND->getDirectiveKind();
9763 case OMPD_target_teams:
9767 case OMPD_target_parallel:
9768 case OMPD_target_simd:
9769 case OMPD_target_parallel_for:
9770 case OMPD_target_parallel_for_simd:
9772 case OMPD_target_teams_distribute:
9773 case OMPD_target_teams_distribute_simd:
9774 case OMPD_target_teams_distribute_parallel_for:
9775 case OMPD_target_teams_distribute_parallel_for_simd:
9778 case OMPD_parallel_for:
9779 case OMPD_parallel_master:
9780 case OMPD_parallel_sections:
9782 case OMPD_parallel_for_simd:
9784 case OMPD_cancellation_point:
9786 case OMPD_threadprivate:
9797 case OMPD_taskyield:
9800 case OMPD_taskgroup:
9806 case OMPD_target_data:
9807 case OMPD_target_exit_data:
9808 case OMPD_target_enter_data:
9809 case OMPD_distribute:
9810 case OMPD_distribute_simd:
9811 case OMPD_distribute_parallel_for:
9812 case OMPD_distribute_parallel_for_simd:
9813 case OMPD_teams_distribute:
9814 case OMPD_teams_distribute_simd:
9815 case OMPD_teams_distribute_parallel_for:
9816 case OMPD_teams_distribute_parallel_for_simd:
9817 case OMPD_target_update:
9818 case OMPD_declare_simd:
9819 case OMPD_declare_variant:
9820 case OMPD_begin_declare_variant:
9821 case OMPD_end_declare_variant:
9822 case OMPD_declare_target:
9823 case OMPD_end_declare_target:
9824 case OMPD_declare_reduction:
9825 case OMPD_declare_mapper:
9827 case OMPD_taskloop_simd:
9828 case OMPD_master_taskloop:
9829 case OMPD_master_taskloop_simd:
9830 case OMPD_parallel_master_taskloop:
9831 case OMPD_parallel_master_taskloop_simd:
9833 case OMPD_metadirective:
9836 llvm_unreachable(
"Unexpected directive.");
9881 auto *MapperVarDecl =
9883 CharUnits ElementSize =
C.getTypeSizeInChars(Ty);
9884 llvm::Type *ElemTy =
CGM.getTypes().ConvertTypeForMem(Ty);
9887 MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
9888 auto PrivatizeAndGenMapInfoCB =
9889 [&](llvm::OpenMPIRBuilder::InsertPointTy CodeGenIP, llvm::Value *PtrPHI,
9890 llvm::Value *BeginArg) -> llvm::OpenMPIRBuilder::MapInfosTy & {
9891 MapperCGF.
Builder.restoreIP(CodeGenIP);
9901 Scope.addPrivate(MapperVarDecl, PtrCurrent);
9902 (void)
Scope.Privatize();
9905 MappableExprsHandler MEHandler(*D, MapperCGF);
9906 MEHandler.generateAllInfoForMapper(CombinedInfo,
OMPBuilder);
9908 auto FillInfoMap = [&](MappableExprsHandler::MappingExprInfo &MapExpr) {
9911 if (
CGM.getCodeGenOpts().getDebugInfo() !=
9912 llvm::codegenoptions::NoDebugInfo) {
9913 CombinedInfo.Names.resize(CombinedInfo.Exprs.size());
9914 llvm::transform(CombinedInfo.Exprs, CombinedInfo.Names.begin(),
9918 return CombinedInfo;
9921 auto CustomMapperCB = [&](
unsigned I) {
9922 llvm::Function *MapperFunc =
nullptr;
9923 if (CombinedInfo.Mappers[I]) {
9927 assert(MapperFunc &&
"Expect a valid mapper function is available.");
9933 llvm::raw_svector_ostream Out(TyStr);
9934 CGM.getCXXABI().getMangleContext().mangleCanonicalTypeName(Ty, Out);
9937 llvm::Function *NewFn = cantFail(
OMPBuilder.emitUserDefinedMapper(
9938 PrivatizeAndGenMapInfoCB, ElemTy, Name, CustomMapperCB));
9939 UDMMap.try_emplace(D, NewFn);
9963 Kind != OMPD_target_teams_loop)
9966 return llvm::ConstantInt::get(CGF.
Int64Ty, 0);
9969 if (llvm::Value *NumIterations = SizeEmitter(CGF, *LD))
9970 return NumIterations;
9971 return llvm::ConstantInt::get(CGF.
Int64Ty, 0);
9980 if (OffloadingMandatory) {
9981 CGF.
Builder.CreateUnreachable();
9983 if (RequiresOuterTask) {
9984 CapturedVars.clear();
9993 llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier>
Device,
9996 llvm::Value *DeviceID;
9997 if (
Device.getPointer()) {
9999 Device.getInt() == OMPC_DEVICE_device_num) &&
10000 "Expected device_num modifier.");
10005 DeviceID = CGF.
Builder.getInt64(OMP_DEVICEID_UNDEF);
10012 llvm::Value *DynCGroupMem = CGF.
Builder.getInt32(0);
10017 DynMemClause->getSize(),
true);
10018 DynCGroupMem = CGF.
Builder.CreateIntCast(DynCGroupMemVal, CGF.
Int32Ty,
10021 return DynCGroupMem;
10026 llvm::OpenMPIRBuilder &OMPBuilder,
10028 MappableExprsHandler::MapCombinedInfoTy &CombinedInfo) {
10030 llvm::DenseMap<llvm::Value *, llvm::Value *> LambdaPointers;
10032 auto *CV = CapturedVars.begin();
10035 CI != CE; ++CI, ++RI, ++CV) {
10036 MappableExprsHandler::MapCombinedInfoTy CurInfo;
10041 CurInfo.Exprs.push_back(
nullptr);
10042 CurInfo.BasePointers.push_back(*CV);
10043 CurInfo.DevicePtrDecls.push_back(
nullptr);
10044 CurInfo.DevicePointers.push_back(
10045 MappableExprsHandler::DeviceInfoTy::None);
10046 CurInfo.Pointers.push_back(*CV);
10047 CurInfo.Sizes.push_back(CGF.
Builder.CreateIntCast(
10050 CurInfo.Types.push_back(OpenMPOffloadMappingFlags::OMP_MAP_LITERAL |
10051 OpenMPOffloadMappingFlags::OMP_MAP_TARGET_PARAM |
10052 OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT);
10053 CurInfo.Mappers.push_back(
nullptr);
10057 MEHandler.generateInfoForCaptureFromClauseInfo(
10058 CI, *CV, CurInfo, OMPBuilder,
10059 CombinedInfo.BasePointers.size());
10064 MappedVarSet.insert(
nullptr);
10066 if (CurInfo.BasePointers.empty())
10067 MEHandler.generateDefaultMapInfo(*CI, **RI, *CV, CurInfo);
10072 MEHandler.generateInfoForLambdaCaptures(CI->
getCapturedVar(), *CV,
10073 CurInfo, LambdaPointers);
10076 assert(!CurInfo.BasePointers.empty() &&
10077 "Non-existing map pointer for capture!");
10078 assert(CurInfo.BasePointers.size() == CurInfo.Pointers.size() &&
10079 CurInfo.BasePointers.size() == CurInfo.Sizes.size() &&
10080 CurInfo.BasePointers.size() == CurInfo.Types.size() &&
10081 CurInfo.BasePointers.size() == CurInfo.Mappers.size() &&
10082 "Inconsistent map information sizes!");
10085 CombinedInfo.append(CurInfo);
10088 MEHandler.adjustMemberOfForLambdaCaptures(
10089 OMPBuilder, LambdaPointers, CombinedInfo.BasePointers,
10090 CombinedInfo.Pointers, CombinedInfo.Types);
10094 MappableExprsHandler::MapCombinedInfoTy &CombinedInfo,
10095 llvm::OpenMPIRBuilder &OMPBuilder,
10102 MEHandler.generateAllInfo(CombinedInfo, OMPBuilder, SkippedVarSet);
10104 auto FillInfoMap = [&](MappableExprsHandler::MappingExprInfo &MapExpr) {
10108 llvm::codegenoptions::NoDebugInfo) {
10109 CombinedInfo.Names.resize(CombinedInfo.Exprs.size());
10110 llvm::transform(CombinedInfo.Exprs, CombinedInfo.Names.begin(),
10118 llvm::OpenMPIRBuilder &OMPBuilder,
10119 MappableExprsHandler::MapCombinedInfoTy &CombinedInfo) {
10121 MappableExprsHandler MEHandler(D, CGF);
10122 llvm::DenseSet<CanonicalDeclPtr<const Decl>> MappedVarSet;
10125 MappedVarSet, CombinedInfo);
10126 genMapInfo(MEHandler, CGF, CombinedInfo, OMPBuilder, MappedVarSet);
10129template <
typename ClauseTy>
10134 const auto *
C = D.getSingleClause<ClauseTy>();
10135 assert(!
C->varlist_empty() &&
10136 "ompx_bare requires explicit num_teams and thread_limit");
10138 for (
auto *E :
C->varlist()) {
10150 llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier>
Device,
10152 llvm::Value *&MapTypesArray, llvm::Value *&MapNamesArray,
10157 llvm::OpenMPIRBuilder &OMPBuilder = OMPRuntime->
getOMPBuilder();
10160 MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
10162 genMapInfo(D, CGF, CS, CapturedVars, OMPBuilder, CombinedInfo);
10176 MapTypesArray = Info.RTArgs.MapTypesArray;
10177 MapNamesArray = Info.RTArgs.MapNamesArray;
10179 auto &&ThenGen = [&OMPRuntime, OutlinedFn, &D, &CapturedVars,
10180 RequiresOuterTask, &CS, OffloadingMandatory,
Device,
10181 OutlinedFnID, &InputInfo, &MapTypesArray, &MapNamesArray,
10183 bool IsReverseOffloading =
Device.getInt() == OMPC_DEVICE_ancestor;
10185 if (IsReverseOffloading) {
10191 RequiresOuterTask, CS, OffloadingMandatory, CGF);
10196 unsigned NumTargetItems = InputInfo.NumberOfTargetItems;
10198 llvm::Value *BasePointersArray =
10199 InputInfo.BasePointersArray.emitRawPointer(CGF);
10200 llvm::Value *PointersArray = InputInfo.PointersArray.emitRawPointer(CGF);
10201 llvm::Value *SizesArray = InputInfo.SizesArray.emitRawPointer(CGF);
10202 llvm::Value *MappersArray = InputInfo.MappersArray.emitRawPointer(CGF);
10204 auto &&EmitTargetCallFallbackCB =
10205 [&OMPRuntime, OutlinedFn, &D, &CapturedVars, RequiresOuterTask, &CS,
10206 OffloadingMandatory, &CGF](llvm::OpenMPIRBuilder::InsertPointTy IP)
10207 -> llvm::OpenMPIRBuilder::InsertPointTy {
10210 RequiresOuterTask, CS, OffloadingMandatory, CGF);
10223 NumThreads.push_back(
10229 llvm::Value *NumIterations =
10232 llvm::OpenMPIRBuilder::InsertPointTy AllocaIP(
10235 llvm::OpenMPIRBuilder::TargetDataRTArgs RTArgs(
10236 BasePointersArray, PointersArray, SizesArray, MapTypesArray,
10237 nullptr , MappersArray, MapNamesArray);
10239 llvm::OpenMPIRBuilder::TargetKernelArgs Args(
10240 NumTargetItems, RTArgs, NumIterations, NumTeams, NumThreads,
10241 DynCGGroupMem, HasNoWait);
10243 llvm::OpenMPIRBuilder::InsertPointTy AfterIP =
10245 CGF.
Builder, OutlinedFnID, EmitTargetCallFallbackCB, Args, DeviceID,
10247 CGF.
Builder.restoreIP(AfterIP);
10250 if (RequiresOuterTask)
10265 [&OMPRuntime, OutlinedFn, &D, &CapturedVars, RequiresOuterTask, &CS,
10268 RequiresOuterTask, CS, OffloadingMandatory, CGF);
10271 if (RequiresOuterTask) {
10281 llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID,
const Expr *IfCond,
10282 llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier>
Device,
10289 const bool OffloadingMandatory = !
CGM.getLangOpts().OpenMPIsTargetDevice &&
10290 CGM.getLangOpts().OpenMPOffloadMandatory;
10292 assert((OffloadingMandatory || OutlinedFn) &&
"Invalid outlined function!");
10294 const bool RequiresOuterTask =
10298 (
CGM.getLangOpts().OpenMP >= 51 &&
10302 const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
10310 llvm::Value *MapTypesArray =
nullptr;
10311 llvm::Value *MapNamesArray =
nullptr;
10313 auto &&TargetThenGen = [
this, OutlinedFn, &D, &CapturedVars,
10314 RequiresOuterTask, &CS, OffloadingMandatory,
Device,
10315 OutlinedFnID, &InputInfo, &MapTypesArray,
10319 RequiresOuterTask, CS, OffloadingMandatory,
10320 Device, OutlinedFnID, InputInfo, MapTypesArray,
10321 MapNamesArray, SizeEmitter, CGF,
CGM);
10324 auto &&TargetElseGen =
10325 [
this, OutlinedFn, &D, &CapturedVars, RequiresOuterTask, &CS,
10328 CS, OffloadingMandatory, CGF);
10335 if (OutlinedFnID) {
10337 emitIfClause(CGF, IfCond, TargetThenGen, TargetElseGen);
10349 StringRef ParentName) {
10354 bool RequiresDeviceCodegen =
10359 if (RequiresDeviceCodegen) {
10367 if (!
OMPBuilder.OffloadInfoManager.hasTargetRegionEntryInfo(EntryInfo))
10370 switch (E.getDirectiveKind()) {
10375 case OMPD_target_parallel:
10379 case OMPD_target_teams:
10383 case OMPD_target_teams_distribute:
10387 case OMPD_target_teams_distribute_simd:
10391 case OMPD_target_parallel_for:
10395 case OMPD_target_parallel_for_simd:
10399 case OMPD_target_simd:
10403 case OMPD_target_teams_distribute_parallel_for:
10408 case OMPD_target_teams_distribute_parallel_for_simd:
10414 case OMPD_target_teams_loop:
10418 case OMPD_target_parallel_loop:
10422 case OMPD_parallel:
10424 case OMPD_parallel_for:
10425 case OMPD_parallel_master:
10426 case OMPD_parallel_sections:
10427 case OMPD_for_simd:
10428 case OMPD_parallel_for_simd:
10430 case OMPD_cancellation_point:
10432 case OMPD_threadprivate:
10433 case OMPD_allocate:
10438 case OMPD_sections:
10442 case OMPD_critical:
10443 case OMPD_taskyield:
10445 case OMPD_taskwait:
10446 case OMPD_taskgroup:
10452 case OMPD_target_data:
10453 case OMPD_target_exit_data:
10454 case OMPD_target_enter_data:
10455 case OMPD_distribute:
10456 case OMPD_distribute_simd:
10457 case OMPD_distribute_parallel_for:
10458 case OMPD_distribute_parallel_for_simd:
10459 case OMPD_teams_distribute:
10460 case OMPD_teams_distribute_simd:
10461 case OMPD_teams_distribute_parallel_for:
10462 case OMPD_teams_distribute_parallel_for_simd:
10463 case OMPD_target_update:
10464 case OMPD_declare_simd:
10465 case OMPD_declare_variant:
10466 case OMPD_begin_declare_variant:
10467 case OMPD_end_declare_variant:
10468 case OMPD_declare_target:
10469 case OMPD_end_declare_target:
10470 case OMPD_declare_reduction:
10471 case OMPD_declare_mapper:
10472 case OMPD_taskloop:
10473 case OMPD_taskloop_simd:
10474 case OMPD_master_taskloop:
10475 case OMPD_master_taskloop_simd:
10476 case OMPD_parallel_master_taskloop:
10477 case OMPD_parallel_master_taskloop_simd:
10478 case OMPD_requires:
10479 case OMPD_metadirective:
10482 llvm_unreachable(
"Unknown target directive for OpenMP device codegen.");
10487 if (
const auto *E = dyn_cast<OMPExecutableDirective>(S)) {
10488 if (!E->hasAssociatedStmt() || !E->getAssociatedStmt())
10496 if (
const auto *L = dyn_cast<LambdaExpr>(S))
10505 std::optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
10506 OMPDeclareTargetDeclAttr::getDeviceType(VD);
10510 if (!IsDevice && DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
10513 if (IsDevice && DevTy == OMPDeclareTargetDeclAttr::DT_Host)
10521 if (!
CGM.getLangOpts().OpenMPIsTargetDevice) {
10522 if (
const auto *FD = dyn_cast<FunctionDecl>(GD.
getDecl()))
10524 CGM.getLangOpts().OpenMPIsTargetDevice))
10531 if (
const auto *FD = dyn_cast<FunctionDecl>(VD)) {
10532 StringRef Name =
CGM.getMangledName(GD);
10535 CGM.getLangOpts().OpenMPIsTargetDevice))
10540 return !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD) &&
10546 CGM.getLangOpts().OpenMPIsTargetDevice))
10549 if (!
CGM.getLangOpts().OpenMPIsTargetDevice)
10558 StringRef ParentName =
10563 StringRef ParentName =
10570 std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
10571 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
10573 if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link ||
10574 ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
10575 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
10584 llvm::Constant *
Addr) {
10585 if (
CGM.getLangOpts().OMPTargetTriples.empty() &&
10586 !
CGM.getLangOpts().OpenMPIsTargetDevice)
10589 std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
10590 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
10594 if (Res && *Res != OMPDeclareTargetDeclAttr::MT_Link &&
10599 if (
CGM.getLangOpts().OpenMPIsTargetDevice) {
10602 StringRef VarName =
CGM.getMangledName(VD);
10608 auto AddrOfGlobal = [&VD,
this]() {
return CGM.GetAddrOfGlobal(VD); };
10609 auto LinkageForVariable = [&VD,
this]() {
10610 return CGM.getLLVMLinkageVarDefinition(VD);
10613 std::vector<llvm::GlobalVariable *> GeneratedRefs;
10620 CGM.getMangledName(VD), GeneratedRefs,
CGM.getLangOpts().OpenMPSimd,
10621 CGM.getLangOpts().OMPTargetTriples, AddrOfGlobal, LinkageForVariable,
10622 CGM.getTypes().ConvertTypeForMem(
10623 CGM.getContext().getPointerType(VD->
getType())),
10626 for (
auto *ref : GeneratedRefs)
10627 CGM.addCompilerUsedGlobal(ref);
10640 std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
10641 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
10644 if ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
10645 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
10647 CGM.EmitGlobal(VD);
10649 assert((*Res == OMPDeclareTargetDeclAttr::MT_Link ||
10650 ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
10651 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
10653 "Expected link clause or to clause with unified memory.");
10654 (void)
CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
10662 " Expected target-based directive.");
10667 if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
10669 OMPBuilder.Config.setHasRequiresUnifiedSharedMemory(
true);
10670 }
else if (
const auto *AC =
10671 dyn_cast<OMPAtomicDefaultMemOrderClause>(Clause)) {
10672 switch (AC->getAtomicDefaultMemOrderKind()) {
10673 case OMPC_ATOMIC_DEFAULT_MEM_ORDER_acq_rel:
10676 case OMPC_ATOMIC_DEFAULT_MEM_ORDER_seq_cst:
10679 case OMPC_ATOMIC_DEFAULT_MEM_ORDER_relaxed:
10695 if (!VD || !VD->
hasAttr<OMPAllocateDeclAttr>())
10697 const auto *A = VD->
getAttr<OMPAllocateDeclAttr>();
10698 switch(A->getAllocatorType()) {
10699 case OMPAllocateDeclAttr::OMPNullMemAlloc:
10700 case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
10702 case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
10703 case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
10704 case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
10705 case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
10706 case OMPAllocateDeclAttr::OMPThreadMemAlloc:
10707 case OMPAllocateDeclAttr::OMPConstMemAlloc:
10708 case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
10711 case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
10712 llvm_unreachable(
"Expected predefined allocator for the variables with the "
10713 "static storage.");
10725 if (CGM.getLangOpts().OpenMPIsTargetDevice) {
10726 SavedShouldMarkAsGlobal = CGM.getOpenMPRuntime().ShouldMarkAsGlobal;
10727 CGM.getOpenMPRuntime().ShouldMarkAsGlobal = false;
10732 if (CGM.getLangOpts().OpenMPIsTargetDevice)
10733 CGM.getOpenMPRuntime().ShouldMarkAsGlobal = SavedShouldMarkAsGlobal;
10743 if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(D)) {
10745 if (
auto *F = dyn_cast_or_null<llvm::Function>(
10746 CGM.GetGlobalValue(
CGM.getMangledName(GD))))
10747 return !F->isDeclaration();
10759 llvm::Function *OutlinedFn,
10768 llvm::Value *Args[] = {
10770 CGF.
Builder.getInt32(CapturedVars.size()),
10773 RealArgs.append(std::begin(Args), std::end(Args));
10774 RealArgs.append(CapturedVars.begin(), CapturedVars.end());
10776 llvm::FunctionCallee RTLFn =
OMPBuilder.getOrCreateRuntimeFunction(
10777 CGM.getModule(), OMPRTL___kmpc_fork_teams);
10782 const Expr *NumTeams,
10783 const Expr *ThreadLimit,
10790 llvm::Value *NumTeamsVal =
10796 llvm::Value *ThreadLimitVal =
10803 llvm::Value *PushNumTeamsArgs[] = {RTLoc,
getThreadID(CGF, Loc), NumTeamsVal,
10806 CGM.getModule(), OMPRTL___kmpc_push_num_teams),
10811 const Expr *ThreadLimit,
10814 llvm::Value *ThreadLimitVal =
10821 llvm::Value *ThreadLimitArgs[] = {RTLoc,
getThreadID(CGF, Loc),
10824 CGM.getModule(), OMPRTL___kmpc_set_thread_limit),
10839 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
10841 llvm::Value *IfCondVal =
nullptr;
10846 llvm::Value *DeviceID =
nullptr;
10851 DeviceID = CGF.
Builder.getInt64(OMP_DEVICEID_UNDEF);
10855 MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
10856 auto GenMapInfoCB =
10857 [&](InsertPointTy CodeGenIP) -> llvm::OpenMPIRBuilder::MapInfosTy & {
10858 CGF.
Builder.restoreIP(CodeGenIP);
10860 MappableExprsHandler MEHandler(D, CGF);
10861 MEHandler.generateAllInfo(CombinedInfo,
OMPBuilder);
10863 auto FillInfoMap = [&](MappableExprsHandler::MappingExprInfo &MapExpr) {
10866 if (
CGM.getCodeGenOpts().getDebugInfo() !=
10867 llvm::codegenoptions::NoDebugInfo) {
10868 CombinedInfo.Names.resize(CombinedInfo.Exprs.size());
10869 llvm::transform(CombinedInfo.Exprs, CombinedInfo.Names.begin(),
10873 return CombinedInfo;
10875 using BodyGenTy = llvm::OpenMPIRBuilder::BodyGenTy;
10876 auto BodyCB = [&](InsertPointTy CodeGenIP, BodyGenTy BodyGenType) {
10877 CGF.
Builder.restoreIP(CodeGenIP);
10878 switch (BodyGenType) {
10879 case BodyGenTy::Priv:
10883 case BodyGenTy::DupNoPriv:
10885 CodeGen.setAction(NoPrivAction);
10889 case BodyGenTy::NoPriv:
10891 CodeGen.setAction(NoPrivAction);
10896 return InsertPointTy(CGF.
Builder.GetInsertBlock(),
10897 CGF.
Builder.GetInsertPoint());
10900 auto DeviceAddrCB = [&](
unsigned int I, llvm::Value *NewDecl) {
10901 if (
const ValueDecl *DevVD = CombinedInfo.DevicePtrDecls[I]) {
10906 auto CustomMapperCB = [&](
unsigned int I) {
10907 llvm::Function *MFunc =
nullptr;
10908 if (CombinedInfo.Mappers[I]) {
10909 Info.HasMapper =
true;
10921 InsertPointTy CodeGenIP(CGF.
Builder.GetInsertBlock(),
10922 CGF.
Builder.GetInsertPoint());
10923 llvm::OpenMPIRBuilder::LocationDescription OmpLoc(CodeGenIP);
10924 llvm::OpenMPIRBuilder::InsertPointTy AfterIP =
10926 OmpLoc, AllocaIP, CodeGenIP, DeviceID, IfCondVal, Info, GenMapInfoCB,
10928 nullptr, BodyCB, DeviceAddrCB, RTLoc));
10929 CGF.
Builder.restoreIP(AfterIP);
10941 "Expecting either target enter, exit data, or update directives.");
10944 llvm::Value *MapTypesArray =
nullptr;
10945 llvm::Value *MapNamesArray =
nullptr;
10947 auto &&ThenGen = [
this, &D,
Device, &InputInfo, &MapTypesArray,
10950 llvm::Value *DeviceID =
nullptr;
10955 DeviceID = CGF.
Builder.getInt64(OMP_DEVICEID_UNDEF);
10959 llvm::Constant *PointerNum =
10966 {RTLoc, DeviceID, PointerNum,
10975 RuntimeFunction RTLFn;
10976 switch (D.getDirectiveKind()) {
10977 case OMPD_target_enter_data:
10978 RTLFn = HasNowait ? OMPRTL___tgt_target_data_begin_nowait_mapper
10979 : OMPRTL___tgt_target_data_begin_mapper;
10981 case OMPD_target_exit_data:
10982 RTLFn = HasNowait ? OMPRTL___tgt_target_data_end_nowait_mapper
10983 : OMPRTL___tgt_target_data_end_mapper;
10985 case OMPD_target_update:
10986 RTLFn = HasNowait ? OMPRTL___tgt_target_data_update_nowait_mapper
10987 : OMPRTL___tgt_target_data_update_mapper;
10989 case OMPD_parallel:
10991 case OMPD_parallel_for:
10992 case OMPD_parallel_master:
10993 case OMPD_parallel_sections:
10994 case OMPD_for_simd:
10995 case OMPD_parallel_for_simd:
10997 case OMPD_cancellation_point:
10999 case OMPD_threadprivate:
11000 case OMPD_allocate:
11005 case OMPD_sections:
11009 case OMPD_critical:
11010 case OMPD_taskyield:
11012 case OMPD_taskwait:
11013 case OMPD_taskgroup:
11019 case OMPD_target_data:
11020 case OMPD_distribute:
11021 case OMPD_distribute_simd:
11022 case OMPD_distribute_parallel_for:
11023 case OMPD_distribute_parallel_for_simd:
11024 case OMPD_teams_distribute:
11025 case OMPD_teams_distribute_simd:
11026 case OMPD_teams_distribute_parallel_for:
11027 case OMPD_teams_distribute_parallel_for_simd:
11028 case OMPD_declare_simd:
11029 case OMPD_declare_variant:
11030 case OMPD_begin_declare_variant:
11031 case OMPD_end_declare_variant:
11032 case OMPD_declare_target:
11033 case OMPD_end_declare_target:
11034 case OMPD_declare_reduction:
11035 case OMPD_declare_mapper:
11036 case OMPD_taskloop:
11037 case OMPD_taskloop_simd:
11038 case OMPD_master_taskloop:
11039 case OMPD_master_taskloop_simd:
11040 case OMPD_parallel_master_taskloop:
11041 case OMPD_parallel_master_taskloop_simd:
11043 case OMPD_target_simd:
11044 case OMPD_target_teams_distribute:
11045 case OMPD_target_teams_distribute_simd:
11046 case OMPD_target_teams_distribute_parallel_for:
11047 case OMPD_target_teams_distribute_parallel_for_simd:
11048 case OMPD_target_teams:
11049 case OMPD_target_parallel:
11050 case OMPD_target_parallel_for:
11051 case OMPD_target_parallel_for_simd:
11052 case OMPD_requires:
11053 case OMPD_metadirective:
11056 llvm_unreachable(
"Unexpected standalone target data directive.");
11060 OffloadingArgs.push_back(llvm::Constant::getNullValue(CGF.
Int32Ty));
11061 OffloadingArgs.push_back(llvm::Constant::getNullValue(CGF.
VoidPtrTy));
11062 OffloadingArgs.push_back(llvm::Constant::getNullValue(CGF.
Int32Ty));
11063 OffloadingArgs.push_back(llvm::Constant::getNullValue(CGF.
VoidPtrTy));
11066 OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), RTLFn),
11070 auto &&TargetThenGen = [
this, &ThenGen, &D, &InputInfo, &MapTypesArray,
11074 MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
11076 MappableExprsHandler MEHandler(D, CGF);
11088 CGM.getPointerAlign());
11093 MapTypesArray = Info.RTArgs.MapTypesArray;
11094 MapNamesArray = Info.RTArgs.MapNamesArray;
11095 if (RequiresOuterTask)
11121struct ParamAttrTy {
11122 ParamKindTy Kind =
Vector;
11123 llvm::APSInt StrideOrArg;
11124 llvm::APSInt Alignment;
11125 bool HasVarStride =
false;
11158 unsigned Offset = 0;
11159 if (
const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
11160 if (ParamAttrs[Offset].Kind ==
Vector)
11161 CDT =
C.getPointerType(
C.getCanonicalTagType(MD->
getParent()));
11165 for (
unsigned I = 0, E = FD->
getNumParams(); I < E; ++I) {
11166 if (ParamAttrs[I + Offset].Kind ==
Vector) {
11178 return C.getTypeSize(CDT);
11186 llvm::raw_svector_ostream Out(Buffer);
11187 for (
const auto &ParamAttr : ParamAttrs) {
11188 switch (ParamAttr.Kind) {
11208 if (ParamAttr.HasVarStride)
11209 Out <<
"s" << ParamAttr.StrideOrArg;
11210 else if (ParamAttr.Kind == Linear || ParamAttr.Kind == LinearRef ||
11211 ParamAttr.Kind == LinearUVal || ParamAttr.Kind == LinearVal) {
11214 if (ParamAttr.StrideOrArg < 0)
11215 Out <<
'n' << -ParamAttr.StrideOrArg;
11216 else if (ParamAttr.StrideOrArg != 1)
11217 Out << ParamAttr.StrideOrArg;
11220 if (!!ParamAttr.Alignment)
11221 Out <<
'a' << ParamAttr.Alignment;
11224 return std::string(Out.str());
11229 const llvm::APSInt &VLENVal,
11231 OMPDeclareSimdDeclAttr::BranchStateTy State) {
11234 unsigned VecRegSize;
11236 ISADataTy ISAData[] = {
11252 case OMPDeclareSimdDeclAttr::BS_Undefined:
11253 Masked.push_back(
'N');
11254 Masked.push_back(
'M');
11256 case OMPDeclareSimdDeclAttr::BS_Notinbranch:
11257 Masked.push_back(
'N');
11259 case OMPDeclareSimdDeclAttr::BS_Inbranch:
11260 Masked.push_back(
'M');
11263 for (
char Mask : Masked) {
11264 for (
const ISADataTy &
Data : ISAData) {
11266 llvm::raw_svector_ostream Out(Buffer);
11267 Out <<
"_ZGV" <<
Data.ISA << Mask;
11270 assert(NumElts &&
"Non-zero simdlen/cdtsize expected");
11271 Out << llvm::APSInt::getUnsigned(
Data.VecRegSize / NumElts);
11276 Out <<
'_' << Fn->getName();
11277 Fn->addFnAttr(Out.str());
11295 if (Kind == ParamKindTy::Uniform)
11298 if (Kind == ParamKindTy::LinearUVal || Kind == ParamKindTy::LinearRef)
11301 if ((Kind == ParamKindTy::Linear || Kind == ParamKindTy::LinearVal) &&
11311 unsigned Size =
C.getTypeSize(QT);
11314 if (Size != 8 && Size != 16 && Size != 32 && Size != 64 && Size != 128)
11338 return C.getTypeSize(PTy);
11341 return C.getTypeSize(QT);
11343 return C.getTypeSize(
C.getUIntPtrType());
11349static std::tuple<unsigned, unsigned, bool>
11355 bool OutputBecomesInput =
false;
11359 Sizes.push_back(
getAArch64LS(RetType, ParamKindTy::Vector,
C));
11361 OutputBecomesInput =
true;
11363 for (
unsigned I = 0, E = FD->
getNumParams(); I < E; ++I) {
11368 assert(!Sizes.empty() &&
"Unable to determine NDS and WDS.");
11371 assert(llvm::all_of(Sizes,
11372 [](
unsigned Size) {
11373 return Size == 8 || Size == 16 || Size == 32 ||
11374 Size == 64 || Size == 128;
11378 return std::make_tuple(*llvm::min_element(Sizes), *llvm::max_element(Sizes),
11379 OutputBecomesInput);
11385template <
typename T>
11387 char ISA, StringRef ParSeq,
11388 StringRef MangledName,
bool OutputBecomesInput,
11389 llvm::Function *Fn) {
11391 llvm::raw_svector_ostream Out(Buffer);
11392 Out << Prefix << ISA << LMask << VLEN;
11393 if (OutputBecomesInput)
11395 Out << ParSeq <<
"_" << MangledName;
11396 Fn->addFnAttr(Out.str());
11402 StringRef Prefix,
char ISA,
11403 StringRef ParSeq, StringRef MangledName,
11404 bool OutputBecomesInput,
11405 llvm::Function *Fn) {
11409 OutputBecomesInput, Fn);
11411 OutputBecomesInput, Fn);
11415 OutputBecomesInput, Fn);
11417 OutputBecomesInput, Fn);
11421 OutputBecomesInput, Fn);
11423 OutputBecomesInput, Fn);
11428 OutputBecomesInput, Fn);
11431 llvm_unreachable(
"Scalar type is too wide.");
11439 OMPDeclareSimdDeclAttr::BranchStateTy State, StringRef MangledName,
11440 char ISA,
unsigned VecRegSize, llvm::Function *Fn,
SourceLocation SLoc) {
11444 const unsigned NDS = std::get<0>(
Data);
11445 const unsigned WDS = std::get<1>(
Data);
11446 const bool OutputBecomesInput = std::get<2>(
Data);
11450 if (UserVLEN == 1) {
11453 "The clause simdlen(1) has no effect when targeting aarch64.");
11460 if (ISA ==
'n' && UserVLEN && !llvm::isPowerOf2_32(UserVLEN)) {
11463 "power of 2 when targeting Advanced SIMD.");
11470 if (ISA ==
's' && UserVLEN != 0) {
11471 if ((UserVLEN * WDS > 2048) || (UserVLEN * WDS % 128 != 0)) {
11474 "lanes in the architectural constraints "
11475 "for SVE (min is 128-bit, max is "
11476 "2048-bit, by steps of 128-bit)");
11484 StringRef Prefix =
"_ZGV";
11490 OutputBecomesInput, Fn);
11492 assert(ISA ==
'n' &&
"Expected ISA either 's' or 'n'.");
11496 case OMPDeclareSimdDeclAttr::BS_Undefined:
11498 OutputBecomesInput, Fn);
11500 OutputBecomesInput, Fn);
11502 case OMPDeclareSimdDeclAttr::BS_Notinbranch:
11504 OutputBecomesInput, Fn);
11506 case OMPDeclareSimdDeclAttr::BS_Inbranch:
11508 OutputBecomesInput, Fn);
11518 OutputBecomesInput, Fn);
11520 assert(ISA ==
'n' &&
"Expected ISA either 's' or 'n'.");
11525 case OMPDeclareSimdDeclAttr::BS_Undefined:
11527 OutputBecomesInput, Fn);
11529 OutputBecomesInput, Fn);
11531 case OMPDeclareSimdDeclAttr::BS_Notinbranch:
11533 OutputBecomesInput, Fn);
11535 case OMPDeclareSimdDeclAttr::BS_Inbranch:
11537 OutputBecomesInput, Fn);
11545 llvm::Function *Fn) {
11550 llvm::DenseMap<const Decl *, unsigned> ParamPositions;
11552 ParamPositions.try_emplace(FD, 0);
11553 unsigned ParamPos = ParamPositions.size();
11555 ParamPositions.try_emplace(P->getCanonicalDecl(), ParamPos);
11561 for (
const Expr *E :
Attr->uniforms()) {
11565 Pos = ParamPositions[FD];
11568 ->getCanonicalDecl();
11569 auto It = ParamPositions.find(PVD);
11570 assert(It != ParamPositions.end() &&
"Function parameter not found");
11573 ParamAttrs[Pos].Kind = Uniform;
11576 auto *NI =
Attr->alignments_begin();
11577 for (
const Expr *E :
Attr->aligneds()) {
11582 Pos = ParamPositions[FD];
11586 ->getCanonicalDecl();
11587 auto It = ParamPositions.find(PVD);
11588 assert(It != ParamPositions.end() &&
"Function parameter not found");
11590 ParmTy = PVD->getType();
11592 ParamAttrs[Pos].Alignment =
11594 ? (*NI)->EvaluateKnownConstInt(
C)
11595 : llvm::APSInt::getUnsigned(
11596 C.toCharUnitsFromBits(
C.getOpenMPDefaultSimdAlign(ParmTy))
11601 auto *SI =
Attr->steps_begin();
11602 auto *MI =
Attr->modifiers_begin();
11603 for (
const Expr *E :
Attr->linears()) {
11606 bool IsReferenceType =
false;
11609 unsigned PtrRescalingFactor = 1;
11611 Pos = ParamPositions[FD];
11613 PtrRescalingFactor =
CGM.getContext()
11614 .getTypeSizeInChars(P->getPointeeType())
11618 ->getCanonicalDecl();
11619 auto It = ParamPositions.find(PVD);
11620 assert(It != ParamPositions.end() &&
"Function parameter not found");
11622 if (
auto *P = dyn_cast<PointerType>(PVD->getType()))
11623 PtrRescalingFactor =
CGM.getContext()
11624 .getTypeSizeInChars(P->getPointeeType())
11626 else if (PVD->getType()->isReferenceType()) {
11627 IsReferenceType =
true;
11628 PtrRescalingFactor =
11630 .getTypeSizeInChars(PVD->getType().getNonReferenceType())
11634 ParamAttrTy &ParamAttr = ParamAttrs[Pos];
11635 if (*MI == OMPC_LINEAR_ref)
11636 ParamAttr.Kind = LinearRef;
11637 else if (*MI == OMPC_LINEAR_uval)
11638 ParamAttr.Kind = LinearUVal;
11639 else if (IsReferenceType)
11640 ParamAttr.Kind = LinearVal;
11642 ParamAttr.Kind = Linear;
11644 ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(1);
11648 if (
const auto *DRE =
11650 if (
const auto *StridePVD =
11651 dyn_cast<ParmVarDecl>(DRE->getDecl())) {
11652 ParamAttr.HasVarStride =
true;
11653 auto It = ParamPositions.find(StridePVD->getCanonicalDecl());
11654 assert(It != ParamPositions.end() &&
11655 "Function parameter not found");
11656 ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(It->second);
11660 ParamAttr.StrideOrArg =
Result.Val.getInt();
11666 if (!ParamAttr.HasVarStride &&
11667 (ParamAttr.Kind == Linear || ParamAttr.Kind == LinearRef))
11668 ParamAttr.StrideOrArg = ParamAttr.StrideOrArg * PtrRescalingFactor;
11672 llvm::APSInt VLENVal;
11674 const Expr *VLENExpr =
Attr->getSimdlen();
11679 OMPDeclareSimdDeclAttr::BranchStateTy State =
Attr->getBranchState();
11680 if (
CGM.getTriple().isX86()) {
11682 }
else if (
CGM.getTriple().getArch() == llvm::Triple::aarch64) {
11683 unsigned VLEN = VLENVal.getExtValue();
11684 StringRef MangledName = Fn->getName();
11685 if (
CGM.getTarget().hasFeature(
"sve"))
11687 MangledName,
's', 128, Fn, ExprLoc);
11688 else if (
CGM.getTarget().hasFeature(
"neon"))
11690 MangledName,
'n', 128, Fn, ExprLoc);
11699class DoacrossCleanupTy final :
public EHScopeStack::Cleanup {
11701 static const int DoacrossFinArgs = 2;
11704 llvm::FunctionCallee RTLFn;
11705 llvm::Value *Args[DoacrossFinArgs];
11708 DoacrossCleanupTy(llvm::FunctionCallee RTLFn,
11711 assert(CallArgs.size() == DoacrossFinArgs);
11712 std::copy(CallArgs.begin(), CallArgs.end(), std::begin(Args));
11729 QualType Int64Ty =
C.getIntTypeForBitwidth(64,
true);
11737 RD =
C.buildImplicitRecord(
"kmp_dim");
11745 RD =
KmpDimTy->castAsRecordDecl();
11747 llvm::APInt Size(32, NumIterations.size());
11753 enum { LowerFD = 0, UpperFD, StrideFD };
11755 for (
unsigned I = 0, E = NumIterations.size(); I < E; ++I) {
11760 DimsLVal, *std::next(RD->
field_begin(), UpperFD));
11762 CGF.
EmitScalarExpr(NumIterations[I]), NumIterations[I]->getType(),
11763 Int64Ty, NumIterations[I]->getExprLoc());
11767 DimsLVal, *std::next(RD->
field_begin(), StrideFD));
11774 llvm::Value *Args[] = {
11777 llvm::ConstantInt::getSigned(
CGM.Int32Ty, NumIterations.size()),
11782 llvm::FunctionCallee RTLFn =
OMPBuilder.getOrCreateRuntimeFunction(
11783 CGM.getModule(), OMPRTL___kmpc_doacross_init);
11785 llvm::Value *FiniArgs[DoacrossCleanupTy::DoacrossFinArgs] = {
11787 llvm::FunctionCallee FiniRTLFn =
OMPBuilder.getOrCreateRuntimeFunction(
11788 CGM.getModule(), OMPRTL___kmpc_doacross_fini);
11793template <
typename T>
11795 const T *
C, llvm::Value *ULoc,
11796 llvm::Value *ThreadID) {
11799 llvm::APInt Size(32,
C->getNumLoops());
11803 for (
unsigned I = 0, E =
C->getNumLoops(); I < E; ++I) {
11804 const Expr *CounterVal =
C->getLoopData(I);
11805 assert(CounterVal);
11812 llvm::Value *Args[] = {
11815 llvm::FunctionCallee RTLFn;
11817 OMPDoacrossKind<T> ODK;
11818 if (ODK.isSource(
C)) {
11820 OMPRTL___kmpc_doacross_post);
11822 assert(ODK.isSink(
C) &&
"Expect sink modifier.");
11824 OMPRTL___kmpc_doacross_wait);
11844 llvm::FunctionCallee Callee,
11846 assert(Loc.
isValid() &&
"Outlined function call location must be valid.");
11849 if (
auto *Fn = dyn_cast<llvm::Function>(Callee.getCallee())) {
11850 if (Fn->doesNotThrow()) {
11861 emitCall(CGF, Loc, OutlinedFn, Args);
11865 if (
const auto *FD = dyn_cast<FunctionDecl>(D))
11866 if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(FD))
11872 const VarDecl *TargetParam)
const {
11879 const Expr *Allocator) {
11880 llvm::Value *AllocVal;
11890 AllocVal = llvm::Constant::getNullValue(
11900 if (!AllocateAlignment)
11903 return llvm::ConstantInt::get(
CGM.
SizeTy, AllocateAlignment->getQuantity());
11916 auto I = UntiedData.find(VD);
11917 if (I != UntiedData.end()) {
11918 UntiedAddr = I->second.first;
11919 UntiedRealAddr = I->second.second;
11923 if (CVD->
hasAttr<OMPAllocateDeclAttr>()) {
11932 Size = CGF.
Builder.CreateNUWAdd(
11934 Size = CGF.
Builder.CreateUDiv(Size,
CGM.getSize(Align));
11935 Size = CGF.
Builder.CreateNUWMul(Size,
CGM.getSize(Align));
11941 const auto *AA = CVD->
getAttr<OMPAllocateDeclAttr>();
11942 const Expr *Allocator = AA->getAllocator();
11946 Args.push_back(ThreadID);
11948 Args.push_back(Alignment);
11949 Args.push_back(Size);
11950 Args.push_back(AllocVal);
11951 llvm::omp::RuntimeFunction FnID =
11952 Alignment ? OMPRTL___kmpc_aligned_alloc : OMPRTL___kmpc_alloc;
11954 OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), FnID), Args,
11956 llvm::FunctionCallee FiniRTLFn =
OMPBuilder.getOrCreateRuntimeFunction(
11957 CGM.getModule(), OMPRTL___kmpc_free);
11965 class OMPAllocateCleanupTy final :
public EHScopeStack::Cleanup {
11966 llvm::FunctionCallee RTLFn;
11969 const Expr *AllocExpr;
11972 OMPAllocateCleanupTy(llvm::FunctionCallee RTLFn,
11974 const Expr *AllocExpr)
11975 : RTLFn(RTLFn), LocEncoding(LocEncoding),
Addr(
Addr),
11976 AllocExpr(AllocExpr) {}
11980 llvm::Value *Args[3];
11986 Args[2] = AllocVal;
11994 CGF.
EHStack.pushCleanup<OMPAllocateCleanupTy>(
11996 VDAddr, Allocator);
11997 if (UntiedRealAddr.
isValid())
12000 Region->emitUntiedSwitch(CGF);
12017 assert(CGM.getLangOpts().OpenMP &&
"Not in OpenMP mode.");
12021 CGM.getOpenMPRuntime().NontemporalDeclsStack.emplace_back();
12023 for (
const Stmt *Ref :
C->private_refs()) {
12024 const auto *SimpleRefExpr =
cast<Expr>(Ref)->IgnoreParenImpCasts();
12026 if (
const auto *DRE = dyn_cast<DeclRefExpr>(SimpleRefExpr)) {
12027 VD = DRE->getDecl();
12030 assert((ME->isImplicitCXXThis() ||
12032 "Expected member of current class.");
12033 VD = ME->getMemberDecl();
12043 CGM.getOpenMPRuntime().NontemporalDeclsStack.pop_back();
12049 std::pair<Address, Address>> &LocalVars)
12050 : CGM(CGF.CGM), NeedToPush(!LocalVars.empty()) {
12054 CGF.
CurFn, CGM.getOpenMPRuntime().UntiedLocalVarsStack.size());
12055 CGM.getOpenMPRuntime().UntiedLocalVarsStack.push_back(LocalVars);
12061 CGM.getOpenMPRuntime().UntiedLocalVarsStack.pop_back();
12065 assert(
CGM.getLangOpts().OpenMP &&
"Not in OpenMP mode.");
12067 return llvm::any_of(
12068 CGM.getOpenMPRuntime().NontemporalDeclsStack,
12072void CGOpenMPRuntime::LastprivateConditionalRAII::tryToDisableInnerAnalysis(
12076 llvm::DenseSet<CanonicalDeclPtr<const Decl>> NeedToCheckForLPCs;
12082 const CapturedStmt *CS = S.getCapturedStmt(CaptureRegions.front());
12090 for (
const Expr *Ref :
C->varlist()) {
12091 if (!Ref->getType()->isScalarType())
12093 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
12096 NeedToCheckForLPCs.insert(DRE->getDecl());
12099 for (
const auto *
C : S.getClausesOfKind<OMPFirstprivateClause>()) {
12100 for (
const Expr *Ref :
C->varlist()) {
12101 if (!Ref->getType()->isScalarType())
12103 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
12106 NeedToCheckForLPCs.insert(DRE->getDecl());
12109 for (
const auto *
C : S.getClausesOfKind<OMPLastprivateClause>()) {
12110 for (
const Expr *Ref :
C->varlist()) {
12111 if (!Ref->getType()->isScalarType())
12113 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
12116 NeedToCheckForLPCs.insert(DRE->getDecl());
12119 for (
const auto *
C : S.getClausesOfKind<OMPReductionClause>()) {
12120 for (
const Expr *Ref :
C->varlist()) {
12121 if (!Ref->getType()->isScalarType())
12123 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
12126 NeedToCheckForLPCs.insert(DRE->getDecl());
12129 for (
const auto *
C : S.getClausesOfKind<OMPLinearClause>()) {
12130 for (
const Expr *Ref :
C->varlist()) {
12131 if (!Ref->getType()->isScalarType())
12133 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
12136 NeedToCheckForLPCs.insert(DRE->getDecl());
12139 for (
const Decl *VD : NeedToCheckForLPCs) {
12141 llvm::reverse(
CGM.getOpenMPRuntime().LastprivateConditionalStack)) {
12142 if (
Data.DeclToUniqueName.count(VD) > 0) {
12143 if (!
Data.Disabled)
12144 NeedToAddForLPCsAsDisabled.insert(VD);
12151CGOpenMPRuntime::LastprivateConditionalRAII::LastprivateConditionalRAII(
12154 Action((CGM.getLangOpts().OpenMP >= 50 &&
12157 return C->getKind() ==
12158 OMPC_LASTPRIVATE_conditional;
12160 ? ActionToDo::PushAsLastprivateConditional
12161 : ActionToDo::DoNotPush) {
12162 assert(
CGM.getLangOpts().OpenMP &&
"Not in OpenMP mode.");
12163 if (
CGM.getLangOpts().OpenMP < 50 || Action == ActionToDo::DoNotPush)
12165 assert(Action == ActionToDo::PushAsLastprivateConditional &&
12166 "Expected a push action.");
12168 CGM.getOpenMPRuntime().LastprivateConditionalStack.emplace_back();
12170 if (
C->getKind() != OMPC_LASTPRIVATE_conditional)
12173 for (
const Expr *Ref :
C->varlist()) {
12174 Data.DeclToUniqueName.insert(std::make_pair(
12179 Data.IVLVal = IVLVal;
12183CGOpenMPRuntime::LastprivateConditionalRAII::LastprivateConditionalRAII(
12185 :
CGM(CGF.
CGM), Action(ActionToDo::DoNotPush) {
12189 llvm::DenseSet<CanonicalDeclPtr<const Decl>> NeedToAddForLPCsAsDisabled;
12190 tryToDisableInnerAnalysis(S, NeedToAddForLPCsAsDisabled);
12191 if (!NeedToAddForLPCsAsDisabled.empty()) {
12192 Action = ActionToDo::DisableLastprivateConditional;
12193 LastprivateConditionalData &
Data =
12195 for (
const Decl *VD : NeedToAddForLPCsAsDisabled)
12196 Data.DeclToUniqueName.try_emplace(VD);
12198 Data.Disabled =
true;
12202CGOpenMPRuntime::LastprivateConditionalRAII
12205 return LastprivateConditionalRAII(CGF, S);
12209 if (CGM.getLangOpts().OpenMP < 50)
12211 if (Action == ActionToDo::DisableLastprivateConditional) {
12212 assert(CGM.getOpenMPRuntime().LastprivateConditionalStack.back().Disabled &&
12213 "Expected list of disabled private vars.");
12214 CGM.getOpenMPRuntime().LastprivateConditionalStack.pop_back();
12216 if (Action == ActionToDo::PushAsLastprivateConditional) {
12218 !CGM.getOpenMPRuntime().LastprivateConditionalStack.back().Disabled &&
12219 "Expected list of lastprivate conditional vars.");
12220 CGM.getOpenMPRuntime().LastprivateConditionalStack.pop_back();
12232 auto VI = I->getSecond().find(VD);
12233 if (VI == I->getSecond().end()) {
12234 RecordDecl *RD =
C.buildImplicitRecord(
"lasprivate.conditional");
12239 NewType =
C.getCanonicalTagType(RD);
12242 I->getSecond().try_emplace(VD, NewType, VDField, FiredField, BaseLVal);
12244 NewType = std::get<0>(VI->getSecond());
12245 VDField = std::get<1>(VI->getSecond());
12246 FiredField = std::get<2>(VI->getSecond());
12247 BaseLVal = std::get<3>(VI->getSecond());
12259class LastprivateConditionalRefChecker final
12262 const Expr *FoundE =
nullptr;
12263 const Decl *FoundD =
nullptr;
12264 StringRef UniqueDeclName;
12266 llvm::Function *FoundFn =
nullptr;
12272 llvm::reverse(LPM)) {
12273 auto It = D.DeclToUniqueName.find(E->
getDecl());
12274 if (It == D.DeclToUniqueName.end())
12280 UniqueDeclName = It->second;
12285 return FoundE == E;
12291 llvm::reverse(LPM)) {
12293 if (It == D.DeclToUniqueName.end())
12299 UniqueDeclName = It->second;
12304 return FoundE == E;
12306 bool VisitStmt(
const Stmt *S) {
12307 for (
const Stmt *Child : S->
children()) {
12310 if (
const auto *E = dyn_cast<Expr>(Child))
12318 explicit LastprivateConditionalRefChecker(
12319 ArrayRef<CGOpenMPRuntime::LastprivateConditionalData> LPM)
12321 std::tuple<const Expr *, const Decl *, StringRef, LValue, llvm::Function *>
12322 getFoundData()
const {
12323 return std::make_tuple(FoundE, FoundD, UniqueDeclName, IVLVal, FoundFn);
12330 StringRef UniqueDeclName,
12336 llvm::Constant *LastIV =
OMPBuilder.getOrCreateInternalVariable(
12337 LLIVTy,
getName({UniqueDeclName,
"iv"}));
12345 llvm::GlobalVariable *
Last =
OMPBuilder.getOrCreateInternalVariable(
12361 auto &&
CodeGen = [&LastIVLVal, &IVLVal, IVVal, &LVal, &LastLVal,
12367 llvm::Value *CmpRes;
12369 CmpRes = CGF.
Builder.CreateICmpSLE(LastIVVal, IVVal);
12372 "Loop iteration variable must be integer.");
12373 CmpRes = CGF.
Builder.CreateICmpULE(LastIVVal, IVVal);
12377 CGF.
Builder.CreateCondBr(CmpRes, ThenBB, ExitBB);
12398 "Aggregates are not supported in lastprivate conditional.");
12407 if (
CGM.getLangOpts().OpenMPSimd) {
12421 if (!Checker.Visit(LHS))
12423 const Expr *FoundE;
12424 const Decl *FoundD;
12425 StringRef UniqueDeclName;
12427 llvm::Function *FoundFn;
12428 std::tie(FoundE, FoundD, UniqueDeclName, IVLVal, FoundFn) =
12429 Checker.getFoundData();
12430 if (FoundFn != CGF.
CurFn) {
12435 "Lastprivate conditional is not found in outer region.");
12436 QualType StructTy = std::get<0>(It->getSecond());
12437 const FieldDecl* FiredDecl = std::get<2>(It->getSecond());
12448 FiredLVal, llvm::AtomicOrdering::Unordered,
12466 auto It = llvm::find_if(
12468 if (It == Range.end() || It->Fn != CGF.
CurFn)
12472 "Lastprivates must be registered already.");
12475 const CapturedStmt *CS = D.getCapturedStmt(CaptureRegions.back());
12476 for (
const auto &Pair : It->DeclToUniqueName) {
12477 const auto *VD =
cast<VarDecl>(Pair.first->getCanonicalDecl());
12480 auto I = LPCI->getSecond().find(Pair.first);
12481 assert(I != LPCI->getSecond().end() &&
12482 "Lastprivate must be rehistered already.");
12484 LValue BaseLVal = std::get<3>(I->getSecond());
12488 llvm::Value *Cmp = CGF.
Builder.CreateIsNotNull(Res);
12492 CGF.
Builder.CreateCondBr(Cmp, ThenBB, DoneBB);
12517 "Unknown lastprivate conditional variable.");
12518 StringRef UniqueName = It->second;
12519 llvm::GlobalVariable *GV =
CGM.getModule().getNamedGlobal(UniqueName);
12533 llvm_unreachable(
"Not supported in SIMD-only mode");
12540 llvm_unreachable(
"Not supported in SIMD-only mode");
12547 bool Tied,
unsigned &NumberOfParts) {
12548 llvm_unreachable(
"Not supported in SIMD-only mode");
12556 llvm_unreachable(
"Not supported in SIMD-only mode");
12562 const Expr *Hint) {
12563 llvm_unreachable(
"Not supported in SIMD-only mode");
12569 llvm_unreachable(
"Not supported in SIMD-only mode");
12575 const Expr *Filter) {
12576 llvm_unreachable(
"Not supported in SIMD-only mode");
12581 llvm_unreachable(
"Not supported in SIMD-only mode");
12587 llvm_unreachable(
"Not supported in SIMD-only mode");
12595 llvm_unreachable(
"Not supported in SIMD-only mode");
12602 llvm_unreachable(
"Not supported in SIMD-only mode");
12609 bool ForceSimpleCall) {
12610 llvm_unreachable(
"Not supported in SIMD-only mode");
12617 llvm_unreachable(
"Not supported in SIMD-only mode");
12622 llvm_unreachable(
"Not supported in SIMD-only mode");
12628 llvm_unreachable(
"Not supported in SIMD-only mode");
12634 llvm_unreachable(
"Not supported in SIMD-only mode");
12641 llvm_unreachable(
"Not supported in SIMD-only mode");
12647 llvm_unreachable(
"Not supported in SIMD-only mode");
12652 unsigned IVSize,
bool IVSigned,
12655 llvm_unreachable(
"Not supported in SIMD-only mode");
12663 llvm_unreachable(
"Not supported in SIMD-only mode");
12667 ProcBindKind ProcBind,
12669 llvm_unreachable(
"Not supported in SIMD-only mode");
12676 llvm_unreachable(
"Not supported in SIMD-only mode");
12682 llvm_unreachable(
"Not supported in SIMD-only mode");
12687 llvm_unreachable(
"Not supported in SIMD-only mode");
12693 llvm::AtomicOrdering AO) {
12694 llvm_unreachable(
"Not supported in SIMD-only mode");
12699 llvm::Function *TaskFunction,
12701 const Expr *IfCond,
12703 llvm_unreachable(
"Not supported in SIMD-only mode");
12710 llvm_unreachable(
"Not supported in SIMD-only mode");
12717 assert(Options.
SimpleReduction &&
"Only simple reduction is expected.");
12719 ReductionOps, Options);
12725 llvm_unreachable(
"Not supported in SIMD-only mode");
12730 bool IsWorksharingReduction) {
12731 llvm_unreachable(
"Not supported in SIMD-only mode");
12738 llvm_unreachable(
"Not supported in SIMD-only mode");
12743 llvm::Value *ReductionsPtr,
12745 llvm_unreachable(
"Not supported in SIMD-only mode");
12751 llvm_unreachable(
"Not supported in SIMD-only mode");
12757 llvm_unreachable(
"Not supported in SIMD-only mode");
12763 llvm_unreachable(
"Not supported in SIMD-only mode");
12768 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
12770 llvm_unreachable(
"Not supported in SIMD-only mode");
12775 llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID,
const Expr *IfCond,
12776 llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier>
Device,
12780 llvm_unreachable(
"Not supported in SIMD-only mode");
12784 llvm_unreachable(
"Not supported in SIMD-only mode");
12788 llvm_unreachable(
"Not supported in SIMD-only mode");
12798 llvm::Function *OutlinedFn,
12800 llvm_unreachable(
"Not supported in SIMD-only mode");
12804 const Expr *NumTeams,
12805 const Expr *ThreadLimit,
12807 llvm_unreachable(
"Not supported in SIMD-only mode");
12814 llvm_unreachable(
"Not supported in SIMD-only mode");
12820 llvm_unreachable(
"Not supported in SIMD-only mode");
12826 llvm_unreachable(
"Not supported in SIMD-only mode");
12831 llvm_unreachable(
"Not supported in SIMD-only mode");
12836 llvm_unreachable(
"Not supported in SIMD-only mode");
12841 const VarDecl *NativeParam)
const {
12842 llvm_unreachable(
"Not supported in SIMD-only mode");
12848 const VarDecl *TargetParam)
const {
12849 llvm_unreachable(
"Not supported in SIMD-only mode");
static llvm::Value * emitCopyprivateCopyFunction(CodeGenModule &CGM, llvm::Type *ArgsElemType, ArrayRef< const Expr * > CopyprivateVars, ArrayRef< const Expr * > DestExprs, ArrayRef< const Expr * > SrcExprs, ArrayRef< const Expr * > AssignmentOps, SourceLocation Loc)
static StringRef getIdentStringFromSourceLocation(CodeGenFunction &CGF, SourceLocation Loc, SmallString< 128 > &Buffer)
static void emitOffloadingArraysAndArgs(CodeGenFunction &CGF, MappableExprsHandler::MapCombinedInfoTy &CombinedInfo, CGOpenMPRuntime::TargetDataInfo &Info, llvm::OpenMPIRBuilder &OMPBuilder, bool IsNonContiguous=false, bool ForEndCall=false)
Emit the arrays used to pass the captures and map information to the offloading runtime library.
static RecordDecl * createKmpTaskTWithPrivatesRecordDecl(CodeGenModule &CGM, QualType KmpTaskTQTy, ArrayRef< PrivateDataTy > Privates)
static void emitInitWithReductionInitializer(CodeGenFunction &CGF, const OMPDeclareReductionDecl *DRD, const Expr *InitOp, Address Private, Address Original, QualType Ty)
static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy, Address OriginalBaseAddress, llvm::Value *Addr)
static void emitPrivatesInit(CodeGenFunction &CGF, const OMPExecutableDirective &D, Address KmpTaskSharedsPtr, LValue TDBase, const RecordDecl *KmpTaskTWithPrivatesQTyRD, QualType SharedsTy, QualType SharedsPtrTy, const OMPTaskDataTy &Data, ArrayRef< PrivateDataTy > Privates, bool ForDup)
Emit initialization for private variables in task-based directives.
static void emitClauseForBareTargetDirective(CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::SmallVectorImpl< llvm::Value * > &Values)
static llvm::Value * emitDestructorsFunction(CodeGenModule &CGM, SourceLocation Loc, QualType KmpInt32Ty, QualType KmpTaskTWithPrivatesPtrQTy, QualType KmpTaskTWithPrivatesQTy)
static unsigned evaluateCDTSize(const FunctionDecl *FD, ArrayRef< ParamAttrTy > ParamAttrs)
static void EmitOMPAggregateReduction(CodeGenFunction &CGF, QualType Type, const VarDecl *LHSVar, const VarDecl *RHSVar, const llvm::function_ref< void(CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *)> &RedOpGen, const Expr *XExpr=nullptr, const Expr *EExpr=nullptr, const Expr *UpExpr=nullptr)
Emit reduction operation for each element of array (required for array sections) LHS op = RHS.
static void emitTargetCallFallback(CGOpenMPRuntime *OMPRuntime, llvm::Function *OutlinedFn, const OMPExecutableDirective &D, llvm::SmallVectorImpl< llvm::Value * > &CapturedVars, bool RequiresOuterTask, const CapturedStmt &CS, bool OffloadingMandatory, CodeGenFunction &CGF)
static llvm::Value * emitReduceInitFunction(CodeGenModule &CGM, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N)
Emits reduction initializer function:
static RTCancelKind getCancellationKind(OpenMPDirectiveKind CancelRegion)
static void emitDependData(CodeGenFunction &CGF, QualType &KmpDependInfoTy, llvm::PointerUnion< unsigned *, LValue * > Pos, const OMPTaskDataTy::DependData &Data, Address DependenciesArray)
static llvm::Value * emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc, const OMPTaskDataTy &Data, QualType PrivatesQTy, ArrayRef< PrivateDataTy > Privates)
Emit a privates mapping function for correct handling of private and firstprivate variables.
static llvm::Value * emitReduceCombFunction(CodeGenModule &CGM, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N, const Expr *ReductionOp, const Expr *LHS, const Expr *RHS, const Expr *PrivateRef)
Emits reduction combiner function:
static RecordDecl * createPrivatesRecordDecl(CodeGenModule &CGM, ArrayRef< PrivateDataTy > Privates)
static llvm::Value * getAllocatorVal(CodeGenFunction &CGF, const Expr *Allocator)
Return allocator value from expression, or return a null allocator (default when no allocator specifi...
static llvm::Function * emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc, OpenMPDirectiveKind Kind, QualType KmpInt32Ty, QualType KmpTaskTWithPrivatesPtrQTy, QualType KmpTaskTWithPrivatesQTy, QualType KmpTaskTQTy, QualType SharedsPtrTy, llvm::Function *TaskFunction, llvm::Value *TaskPrivatesMap)
Emit a proxy function which accepts kmp_task_t as the second argument.
static void addAArch64VectorName(T VLEN, StringRef LMask, StringRef Prefix, char ISA, StringRef ParSeq, StringRef MangledName, bool OutputBecomesInput, llvm::Function *Fn)
static bool isAllocatableDecl(const VarDecl *VD)
static llvm::Value * getAlignmentValue(CodeGenModule &CGM, const VarDecl *VD)
Return the alignment from an allocate directive if present.
static void emitTargetCallKernelLaunch(CGOpenMPRuntime *OMPRuntime, llvm::Function *OutlinedFn, const OMPExecutableDirective &D, llvm::SmallVectorImpl< llvm::Value * > &CapturedVars, bool RequiresOuterTask, const CapturedStmt &CS, bool OffloadingMandatory, llvm::PointerIntPair< const Expr *, 2, OpenMPDeviceClauseModifier > Device, llvm::Value *OutlinedFnID, CodeGenFunction::OMPTargetDataInfo &InputInfo, llvm::Value *&MapTypesArray, llvm::Value *&MapNamesArray, llvm::function_ref< llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter, CodeGenFunction &CGF, CodeGenModule &CGM)
static llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryKind convertCaptureClause(const VarDecl *VD)
static std::tuple< unsigned, unsigned, bool > getNDSWDS(const FunctionDecl *FD, ArrayRef< ParamAttrTy > ParamAttrs)
static const OMPExecutableDirective * getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D)
Check for inner distribute directive.
static std::pair< llvm::Value *, llvm::Value * > getPointerAndSize(CodeGenFunction &CGF, const Expr *E)
static const VarDecl * getBaseDecl(const Expr *Ref, const DeclRefExpr *&DE)
static bool isTrivial(ASTContext &Ctx, const Expr *E)
Checks if the expression is constant or does not have non-trivial function calls.
static OpenMPSchedType getRuntimeSchedule(OpenMPScheduleClauseKind ScheduleKind, bool Chunked, bool Ordered)
Map the OpenMP loop schedule to the runtime enumeration.
static void getNumThreads(CodeGenFunction &CGF, const CapturedStmt *CS, const Expr **E, int32_t &UpperBound, bool UpperBoundOnly, llvm::Value **CondVal)
Check for a num threads constant value (stored in DefaultVal), or expression (stored in E).
static llvm::Value * emitDeviceID(llvm::PointerIntPair< const Expr *, 2, OpenMPDeviceClauseModifier > Device, CodeGenFunction &CGF)
static const OMPDeclareReductionDecl * getReductionInit(const Expr *ReductionOp)
Check if the combiner is a call to UDR combiner and if it is so return the UDR decl used for reductio...
static bool checkInitIsRequired(CodeGenFunction &CGF, ArrayRef< PrivateDataTy > Privates)
Check if duplication function is required for taskloops.
static bool checkDestructorsRequired(const RecordDecl *KmpTaskTWithPrivatesQTyRD, ArrayRef< PrivateDataTy > Privates)
Checks if destructor function is required to be generated.
static llvm::TargetRegionEntryInfo getEntryInfoFromPresumedLoc(CodeGenModule &CGM, llvm::OpenMPIRBuilder &OMPBuilder, SourceLocation BeginLoc, llvm::StringRef ParentName="")
static void genMapInfo(MappableExprsHandler &MEHandler, CodeGenFunction &CGF, MappableExprsHandler::MapCombinedInfoTy &CombinedInfo, llvm::OpenMPIRBuilder &OMPBuilder, const llvm::DenseSet< CanonicalDeclPtr< const Decl > > &SkippedVarSet=llvm::DenseSet< CanonicalDeclPtr< const Decl > >())
static void emitForStaticInitCall(CodeGenFunction &CGF, llvm::Value *UpdateLocation, llvm::Value *ThreadId, llvm::FunctionCallee ForStaticInitFunction, OpenMPSchedType Schedule, OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, const CGOpenMPRuntime::StaticRTInput &Values)
static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy, LValue BaseLV)
static void getKmpAffinityType(ASTContext &C, QualType &KmpTaskAffinityInfoTy)
Builds kmp_depend_info, if it is not built yet, and builds flags type.
static llvm::Constant * emitMappingInformation(CodeGenFunction &CGF, llvm::OpenMPIRBuilder &OMPBuilder, MappableExprsHandler::MappingExprInfo &MapExprs)
Emit a string constant containing the names of the values mapped to the offloading runtime library.
static void getDependTypes(ASTContext &C, QualType &KmpDependInfoTy, QualType &FlagsTy)
Builds kmp_depend_info, if it is not built yet, and builds flags type.
static llvm::Value * emitTaskDupFunction(CodeGenModule &CGM, SourceLocation Loc, const OMPExecutableDirective &D, QualType KmpTaskTWithPrivatesPtrQTy, const RecordDecl *KmpTaskTWithPrivatesQTyRD, const RecordDecl *KmpTaskTQTyRD, QualType SharedsTy, QualType SharedsPtrTy, const OMPTaskDataTy &Data, ArrayRef< PrivateDataTy > Privates, bool WithLastIter)
Emit task_dup function (for initialization of private/firstprivate/lastprivate vars and last_iter fla...
static llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseKind convertDeviceClause(const VarDecl *VD)
static llvm::Value * emitReduceFiniFunction(CodeGenModule &CGM, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N)
Emits reduction finalizer function:
static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr, QualType Type, bool EmitDeclareReductionInit, const Expr *Init, const OMPDeclareReductionDecl *DRD, Address SrcAddr=Address::invalid())
Emit initialization of arrays of complex types.
static bool getAArch64PBV(QualType QT, ASTContext &C)
Pass By Value (PBV), as defined in 3.1.2 of the AAVFABI.
static unsigned getAArch64LS(QualType QT, ParamKindTy Kind, ASTContext &C)
Computes the lane size (LS) of a return type or of an input parameter, as defined by LS(P) in 3....
static void EmitDoacrossOrdered(CodeGenFunction &CGF, CodeGenModule &CGM, const T *C, llvm::Value *ULoc, llvm::Value *ThreadID)
static RTLDependenceKindTy translateDependencyKind(OpenMPDependClauseKind K)
Translates internal dependency kind into the runtime kind.
static void emitTargetCallElse(CGOpenMPRuntime *OMPRuntime, llvm::Function *OutlinedFn, const OMPExecutableDirective &D, llvm::SmallVectorImpl< llvm::Value * > &CapturedVars, bool RequiresOuterTask, const CapturedStmt &CS, bool OffloadingMandatory, CodeGenFunction &CGF)
static llvm::Function * emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty, const Expr *CombinerInitializer, const VarDecl *In, const VarDecl *Out, bool IsCombiner)
static void emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn, const llvm::APSInt &VLENVal, ArrayRef< ParamAttrTy > ParamAttrs, OMPDeclareSimdDeclAttr::BranchStateTy State)
static void emitReductionCombiner(CodeGenFunction &CGF, const Expr *ReductionOp)
Emit reduction combiner.
static std::string mangleVectorParameters(ArrayRef< ParamAttrTy > ParamAttrs)
Mangle the parameter part of the vector function name according to their OpenMP classification.
static std::string generateUniqueName(CodeGenModule &CGM, llvm::StringRef Prefix, const Expr *Ref)
static llvm::Function * emitParallelOrTeamsOutlinedFunction(CodeGenModule &CGM, const OMPExecutableDirective &D, const CapturedStmt *CS, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const StringRef OutlinedHelperName, const RegionCodeGenTy &CodeGen)
static void emitAArch64DeclareSimdFunction(CodeGenModule &CGM, const FunctionDecl *FD, unsigned UserVLEN, ArrayRef< ParamAttrTy > ParamAttrs, OMPDeclareSimdDeclAttr::BranchStateTy State, StringRef MangledName, char ISA, unsigned VecRegSize, llvm::Function *Fn, SourceLocation SLoc)
Emit vector function attributes for AArch64, as defined in the AAVFABI.
static Address emitAddrOfVarFromArray(CodeGenFunction &CGF, Address Array, unsigned Index, const VarDecl *Var)
Given an array of pointers to variables, project the address of a given variable.
static llvm::Value * emitDynCGGroupMem(const OMPExecutableDirective &D, CodeGenFunction &CGF)
static bool isAssumedToBeNotEmitted(const ValueDecl *VD, bool IsDevice)
static void addAArch64AdvSIMDNDSNames(unsigned NDS, StringRef Mask, StringRef Prefix, char ISA, StringRef ParSeq, StringRef MangledName, bool OutputBecomesInput, llvm::Function *Fn)
static FieldDecl * addFieldToRecordDecl(ASTContext &C, DeclContext *DC, QualType FieldTy)
static ValueDecl * getDeclFromThisExpr(const Expr *E)
static void genMapInfoForCaptures(MappableExprsHandler &MEHandler, CodeGenFunction &CGF, const CapturedStmt &CS, llvm::SmallVectorImpl< llvm::Value * > &CapturedVars, llvm::OpenMPIRBuilder &OMPBuilder, llvm::DenseSet< CanonicalDeclPtr< const Decl > > &MappedVarSet, MappableExprsHandler::MapCombinedInfoTy &CombinedInfo)
static RecordDecl * createKmpTaskTRecordDecl(CodeGenModule &CGM, OpenMPDirectiveKind Kind, QualType KmpInt32Ty, QualType KmpRoutineEntryPointerQTy)
static int addMonoNonMonoModifier(CodeGenModule &CGM, OpenMPSchedType Schedule, OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2)
static bool getAArch64MTV(QualType QT, ParamKindTy Kind)
Maps To Vector (MTV), as defined in 4.1.1 of the AAVFABI (2021Q1).
@ LLVM_MARK_AS_BITMASK_ENUM
This file defines OpenMP AST classes for clauses.
Defines some OpenMP-specific enums and functions.
Defines the SourceManager interface.
This file defines OpenMP AST classes for executable directives and clauses.
__DEVICE__ int max(int __a, int __b)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
SourceManager & getSourceManager()
const ConstantArrayType * getAsConstantArrayType(QualType T) const
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
bool hasSameType(QualType T1, QualType T2) const
Determine whether the given types T1 and T2 are equivalent.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize, const Expr *SizeExpr, ArraySizeModifier ASM, unsigned IndexTypeQuals) const
Return the unique reference to the type for a constant array of the specified element type.
const LangOptions & getLangOpts() const
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const VariableArrayType * getAsVariableArrayType(QualType T) const
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
unsigned getTypeAlign(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in bits.
CharUnits getSize() const
getSize - Get the record size in characters.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getNonVirtualSize() const
getNonVirtualSize - Get the non-virtual size (in chars) of an object, which is the size of the object...
static QualType getBaseOriginalType(const Expr *Base)
Return original type of the base expression for array section.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Attr - This represents one attribute.
Represents a C++ constructor within a class.
Represents a C++ destructor within a class.
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
QualType getFunctionObjectParameterType() const
bool isLambda() const
Determine whether this class describes a lambda function object.
void getCaptureFields(llvm::DenseMap< const ValueDecl *, FieldDecl * > &Captures, FieldDecl *&ThisCapture) const
For a closure type, retrieve the mapping from captured variables and this to the non-static data memb...
unsigned getNumBases() const
Retrieves the number of base classes of this class.
base_class_range vbases()
capture_const_range captures() const
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
CanProxy< U > castAs() const
A wrapper class around a pointer that always points to its canonical declaration.
Describes the capture of either a variable, or 'this', or variable-length array type.
bool capturesVariableByCopy() const
Determine whether this capture handles a variable by copy.
VarDecl * getCapturedVar() const
Retrieve the declaration of the variable being captured.
bool capturesVariableArrayType() const
Determine whether this capture handles a variable-length array type.
bool capturesThis() const
Determine whether this capture handles the C++ 'this' pointer.
bool capturesVariable() const
Determine whether this capture handles a variable (by reference).
This captures a statement into a function.
const Capture * const_capture_iterator
capture_iterator capture_end() const
Retrieve an iterator pointing past the end of the sequence of captures.
const RecordDecl * getCapturedRecordDecl() const
Retrieve the record declaration for captured variables.
Stmt * getCapturedStmt()
Retrieve the statement being captured.
bool capturesVariable(const VarDecl *Var) const
True if this variable has been captured.
capture_iterator capture_begin()
Retrieve an iterator pointing to the first capture.
CharUnits - This is an opaque type for sizes expressed in character units.
bool isZero() const
isZero - Test whether the quantity equals zero.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
CharUnits getAlignment() const
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Address withPointer(llvm::Value *NewPointer, KnownNonNull_t IsKnownNonNull) const
Return address with different pointer, but same element type and alignment.
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::PointerType * getType() const
Return the type of the pointer value.
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
static ApplyDebugLocation CreateDefaultArtificial(CodeGenFunction &CGF, SourceLocation TemporaryLocation)
Apply TemporaryLocation if it is valid.
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Address CreateConstGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = T* ... produce name = getelementptr inbounds addr, i64 index where i64 is actually the t...
CGFunctionInfo - Class to encapsulate the information about a function definition.
DisableAutoDeclareTargetRAII(CodeGenModule &CGM)
~DisableAutoDeclareTargetRAII()
~LastprivateConditionalRAII()
static LastprivateConditionalRAII disable(CodeGenFunction &CGF, const OMPExecutableDirective &S)
NontemporalDeclsRAII(CodeGenModule &CGM, const OMPLoopDirective &S)
Struct that keeps all the relevant information that should be kept throughout a 'target data' region.
llvm::DenseMap< const ValueDecl *, llvm::Value * > CaptureDeviceAddrMap
Map between the a declaration of a capture and the corresponding new llvm address where the runtime r...
~UntiedTaskLocalDeclsRAII()
UntiedTaskLocalDeclsRAII(CodeGenFunction &CGF, const llvm::MapVector< CanonicalDeclPtr< const VarDecl >, std::pair< Address, Address > > &LocalVars)
virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc)
Emits address of the word in a memory where current thread id is stored.
llvm::StringSet ThreadPrivateWithDefinition
Set of threadprivate variables with the generated initializer.
CGOpenMPRuntime(CodeGenModule &CGM)
virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data)
Emit task region for the task directive.
void createOffloadEntriesAndInfoMetadata()
Creates all the offload entries in the current compilation unit along with the associated metadata.
const Expr * getNumTeamsExprForTargetDirective(CodeGenFunction &CGF, const OMPExecutableDirective &D, int32_t &MinTeamsVal, int32_t &MaxTeamsVal)
Emit the number of teams for a target directive.
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
void emitDeferredTargetDecls() const
Emit deferred declare target variables marked for deferred emission.
virtual llvm::Value * emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST)
Call __kmpc_dispatch_next( ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, kmp_int[32|64] *p_lowe...
bool markAsGlobalTarget(GlobalDecl GD)
Marks the declaration as already emitted for the device code and returns true, if it was marked alrea...
virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef< llvm::Value * > CapturedVars, const Expr *IfCond, llvm::Value *NumThreads, OpenMPNumThreadsClauseModifier NumThreadsModifier=OMPC_NUMTHREADS_unknown, OpenMPSeverityClauseKind Severity=OMPC_SEVERITY_fatal, const Expr *Message=nullptr)
Emits code for parallel or serial call of the OutlinedFn with variables captured in a record which ad...
llvm::SmallDenseSet< CanonicalDeclPtr< const Decl > > NontemporalDeclsSet
virtual void emitTargetDataStandAloneCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device)
Emit the data mapping/movement code associated with the directive D that should be of the form 'targe...
virtual void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc, OpenMPNumThreadsClauseModifier Modifier=OMPC_NUMTHREADS_unknown, OpenMPSeverityClauseKind Severity=OMPC_SEVERITY_fatal, SourceLocation SeverityLoc=SourceLocation(), const Expr *Message=nullptr, SourceLocation MessageLoc=SourceLocation())
Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32global_tid, kmp_int32 num_threads) ...
QualType SavedKmpTaskloopTQTy
Saved kmp_task_t for taskloop-based directive.
virtual void emitSingleRegion(CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef< const Expr * > CopyprivateVars, ArrayRef< const Expr * > DestExprs, ArrayRef< const Expr * > SrcExprs, ArrayRef< const Expr * > AssignmentOps)
Emits a single region.
virtual bool emitTargetGlobal(GlobalDecl GD)
Emit the global GD if it is meaningful for the target.
void setLocThreadIdInsertPt(CodeGenFunction &CGF, bool AtCurrentPoint=false)
std::string getOutlinedHelperName(StringRef Name) const
Get the function name of an outlined region.
bool HasEmittedDeclareTargetRegion
Flag for keeping track of weather a device routine has been emitted.
llvm::Constant * getOrCreateThreadPrivateCache(const VarDecl *VD)
If the specified mangled name is not in the module, create and return threadprivate cache object.
virtual Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal)
Get the address of void * type of the privatue copy of the reduction item specified by the SharedLVal...
virtual void emitForDispatchDeinit(CodeGenFunction &CGF, SourceLocation Loc)
This is used for non static scheduled types and when the ordered clause is present on the loop constr...
void emitCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args={}) const
Emits Callee function call with arguments Args with location Loc.
virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind, const Expr *&ChunkExpr) const
Choose default schedule type and chunk value for the schedule clause.
virtual std::pair< llvm::Function *, llvm::Function * > getUserDefinedReduction(const OMPDeclareReductionDecl *D)
Get combiner/initializer for the specified user-defined reduction, if any.
virtual bool isGPU() const
Returns true if the current target is a GPU.
static const Stmt * getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body)
Checks if the Body is the CompoundStmt and returns its child statement iff there is only one that is ...
virtual void emitDeclareTargetFunction(const FunctionDecl *FD, llvm::GlobalValue *GV)
Emit code for handling declare target functions in the runtime.
bool HasRequiresUnifiedSharedMemory
Flag for keeping track of weather a requires unified_shared_memory directive is present.
llvm::Value * emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc, unsigned Flags=0, bool EmitLoc=false)
Emits object of ident_t type with info for source location.
bool isLocalVarInUntiedTask(CodeGenFunction &CGF, const VarDecl *VD) const
Returns true if the variable is a local variable in untied task.
virtual void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef< llvm::Value * > CapturedVars)
Emits code for teams call of the OutlinedFn with variables captured in a record which address is stor...
virtual void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion)
Emit code for 'cancellation point' construct.
virtual llvm::Function * emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF=nullptr)
Emit a code for initialization of threadprivate variable.
FunctionUDMMapTy FunctionUDMMap
virtual ConstantAddress getAddrOfDeclareTargetVar(const VarDecl *VD)
Returns the address of the variable marked as declare target with link clause OR as declare target wi...
llvm::Function * getOrCreateUserDefinedMapperFunc(const OMPDeclareMapperDecl *D)
Get the function for the specified user-defined mapper.
OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap
virtual void functionFinished(CodeGenFunction &CGF)
Cleans up references to the objects in finished function.
virtual llvm::Function * emitTeamsOutlinedFunction(CodeGenFunction &CGF, const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen)
Emits outlined function for the specified OpenMP teams directive D.
QualType KmpTaskTQTy
Type typedef struct kmp_task { void * shareds; /**< pointer to block of pointers to shared vars / k...
llvm::OpenMPIRBuilder OMPBuilder
An OpenMP-IR-Builder instance.
virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef< Expr * > NumIterations)
Emit initialization for doacross loop nesting support.
virtual void adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF, const OMPExecutableDirective &D) const
Adjust some parameters for the target-based directives, like addresses of the variables captured by r...
FunctionUDRMapTy FunctionUDRMap
virtual void emitTargetDataCalls(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, CGOpenMPRuntime::TargetDataInfo &Info)
Emit the target data mapping code associated with D.
virtual unsigned getDefaultLocationReserved2Flags() const
Returns additional flags that can be stored in reserved_2 field of the default location.
virtual Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const
Gets the address of the native argument basing on the address of the target-specific parameter.
void emitUsesAllocatorsFini(CodeGenFunction &CGF, const Expr *Allocator)
Destroys user defined allocators specified in the uses_allocators clause.
QualType KmpTaskAffinityInfoTy
Type typedef struct kmp_task_affinity_info { kmp_intptr_t base_addr; size_t len; struct { bool flag1 ...
void emitPrivateReduction(CodeGenFunction &CGF, SourceLocation Loc, const Expr *Privates, const Expr *LHSExprs, const Expr *RHSExprs, const Expr *ReductionOps)
Emits code for private variable reduction.
llvm::Value * emitNumTeamsForTargetDirective(CodeGenFunction &CGF, const OMPExecutableDirective &D)
virtual void emitTargetOutlinedFunctionHelper(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen)
Helper to emit outlined function for 'target' directive.
void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName)
Start scanning from statement S and emit all target regions found along the way.
SmallVector< llvm::Value *, 4 > emitDepobjElementsSizes(CodeGenFunction &CGF, QualType &KmpDependInfoTy, const OMPTaskDataTy::DependData &Data)
virtual llvm::Value * emitMessageClause(CodeGenFunction &CGF, const Expr *Message, SourceLocation Loc)
virtual void emitTaskgroupRegion(CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc)
Emit a taskgroup region.
llvm::DenseMap< llvm::Function *, llvm::DenseMap< CanonicalDeclPtr< const Decl >, std::tuple< QualType, const FieldDecl *, const FieldDecl *, LValue > > > LastprivateConditionalToTypes
Maps local variables marked as lastprivate conditional to their internal types.
virtual bool emitTargetGlobalVariable(GlobalDecl GD)
Emit the global variable if it is a valid device global variable.
virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc)
Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32global_tid, kmp_int32 num_teams,...
bool hasRequiresUnifiedSharedMemory() const
Return whether the unified_shared_memory has been specified.
virtual Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF, QualType VarType, StringRef Name)
Creates artificial threadprivate variable with name Name and type VarType.
void emitUserDefinedMapper(const OMPDeclareMapperDecl *D, CodeGenFunction *CGF=nullptr)
Emit the function for the user defined mapper construct.
bool HasEmittedTargetRegion
Flag for keeping track of weather a target region has been emitted.
void emitDepobjElements(CodeGenFunction &CGF, QualType &KmpDependInfoTy, LValue PosLVal, const OMPTaskDataTy::DependData &Data, Address DependenciesArray)
std::string getReductionFuncName(StringRef Name) const
Get the function name of a reduction function.
virtual void processRequiresDirective(const OMPRequiresDecl *D)
Perform check on requires decl to ensure that target architecture supports unified addressing.
llvm::DenseSet< CanonicalDeclPtr< const Decl > > AlreadyEmittedTargetDecls
List of the emitted declarations.
virtual llvm::Value * emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef< const Expr * > LHSExprs, ArrayRef< const Expr * > RHSExprs, const OMPTaskDataTy &Data)
Emit a code for initialization of task reduction clause.
llvm::Value * getThreadID(CodeGenFunction &CGF, SourceLocation Loc)
Gets thread id value for the current thread.
void emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal, OpenMPDependClauseKind NewDepKind, SourceLocation Loc)
Updates the dependency kind in the specified depobj object.
virtual void emitLastprivateConditionalFinalUpdate(CodeGenFunction &CGF, LValue PrivLVal, const VarDecl *VD, SourceLocation Loc)
Gets the address of the global copy used for lastprivate conditional update, if any.
llvm::MapVector< CanonicalDeclPtr< const VarDecl >, std::pair< Address, Address > > UntiedLocalVarsAddressesMap
virtual void emitErrorCall(CodeGenFunction &CGF, SourceLocation Loc, Expr *ME, bool IsFatal)
Emit __kmpc_error call for error directive extern void __kmpc_error(ident_t *loc, int severity,...
void clearLocThreadIdInsertPt(CodeGenFunction &CGF)
virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc)
Emits code for a taskyield directive.
std::string getName(ArrayRef< StringRef > Parts) const
Get the platform-specific name separator.
QualType KmpRoutineEntryPtrQTy
void computeMinAndMaxThreadsAndTeams(const OMPExecutableDirective &D, CodeGenFunction &CGF, llvm::OpenMPIRBuilder::TargetKernelDefaultAttrs &Attrs)
Helper to determine the min/max number of threads/teams for D.
virtual void emitFlush(CodeGenFunction &CGF, ArrayRef< const Expr * > Vars, SourceLocation Loc, llvm::AtomicOrdering AO)
Emit flush of the variables specified in 'omp flush' directive.
virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPTaskDataTy &Data)
Emit code for 'taskwait' directive.
virtual void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc)
Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32global_tid, int proc_bind) to generate...
void emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LValue IVLVal, StringRef UniqueDeclName, LValue LVal, SourceLocation Loc)
Emit update for lastprivate conditional data.
virtual void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data)
Emit task region for the taskloop directive.
virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks=true, bool ForceSimpleCall=false)
Emit an implicit/explicit barrier for OpenMP threads.
static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind)
Returns default flags for the barriers depending on the directive, for which this barier is going to ...
virtual bool emitTargetFunctions(GlobalDecl GD)
Emit the target regions enclosed in GD function definition or the function itself in case it is a val...
TaskResultTy emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const OMPTaskDataTy &Data)
Emit task region for the task directive.
llvm::Value * emitTargetNumIterationsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::function_ref< llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter)
Return the trip count of loops associated with constructs / 'target teams distribute' and 'teams dist...
llvm::StringMap< llvm::AssertingVH< llvm::GlobalVariable >, llvm::BumpPtrAllocator > InternalVars
An ordered map of auto-generated variables to their unique names.
virtual void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values)
llvm::SmallVector< UntiedLocalVarsAddressesMap, 4 > UntiedLocalVarsStack
virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind)
Call the appropriate runtime routine to notify that we finished all the work with current loop.
virtual void emitThreadLimitClause(CodeGenFunction &CGF, const Expr *ThreadLimit, SourceLocation Loc)
Emits call to void __kmpc_set_thread_limit(ident_t *loc, kmp_int32global_tid, kmp_int32 thread_limit)...
void emitIfClause(CodeGenFunction &CGF, const Expr *Cond, const RegionCodeGenTy &ThenGen, const RegionCodeGenTy &ElseGen)
Emits code for OpenMP 'if' clause using specified CodeGen function.
Address emitDepobjDependClause(CodeGenFunction &CGF, const OMPTaskDataTy::DependData &Dependencies, SourceLocation Loc)
Emits list of dependecies based on the provided data (array of dependence/expression pairs) for depob...
bool isNontemporalDecl(const ValueDecl *VD) const
Checks if the VD variable is marked as nontemporal declaration in current context.
virtual llvm::Function * emitParallelOutlinedFunction(CodeGenFunction &CGF, const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen)
Emits outlined function for the specified OpenMP parallel directive D.
const Expr * getNumThreadsExprForTargetDirective(CodeGenFunction &CGF, const OMPExecutableDirective &D, int32_t &UpperBound, bool UpperBoundOnly, llvm::Value **CondExpr=nullptr, const Expr **ThreadLimitExpr=nullptr)
Check for a number of threads upper bound constant value (stored in UpperBound), or expression (retur...
virtual llvm::Value * emitSeverityClause(OpenMPSeverityClauseKind Severity, SourceLocation Loc)
llvm::SmallVector< LastprivateConditionalData, 4 > LastprivateConditionalStack
Stack for list of addresses of declarations in current context marked as lastprivate conditional.
virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values)
Call the appropriate runtime routine to initialize it before start of loop.
virtual void emitDeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn)
Marks function Fn with properly mangled versions of vector functions.
llvm::AtomicOrdering getDefaultMemoryOrdering() const
Gets default memory ordering as specified in requires directive.
virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const
Check if the specified ScheduleKind is static non-chunked.
llvm::Value * getCriticalRegionLock(StringRef CriticalName)
Returns corresponding lock object for the specified critical region name.
virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion)
Emit code for 'cancel' construct.
QualType SavedKmpTaskTQTy
Saved kmp_task_t for task directive.
virtual void emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc)
Emits a master region.
virtual llvm::Function * emitTaskOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts)
Emits outlined function for the OpenMP task directive D.
llvm::DenseMap< llvm::Function *, unsigned > FunctionToUntiedTaskStackMap
Maps function to the position of the untied task locals stack.
void emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal, SourceLocation Loc)
Emits the code to destroy the dependency object provided in depobj directive.
virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N)
Required to resolve existing problems in the runtime.
llvm::ArrayType * KmpCriticalNameTy
Type kmp_critical_name, originally defined as typedef kmp_int32 kmp_critical_name[8];.
virtual void emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C)
Emit code for doacross ordered directive with 'depend' clause.
llvm::DenseMap< const OMPDeclareMapperDecl *, llvm::Function * > UDMMap
Map from the user-defined mapper declaration to its corresponding functions.
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS)
Checks if the provided LVal is lastprivate conditional and emits the code to update the value of the ...
std::pair< llvm::Value *, LValue > getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal, SourceLocation Loc)
Returns the number of the elements and the address of the depobj dependency array.
llvm::SmallDenseSet< const VarDecl * > DeferredGlobalVariables
List of variables that can become declare target implicitly and, thus, must be emitted.
void emitUsesAllocatorsInit(CodeGenFunction &CGF, const Expr *Allocator, const Expr *AllocatorTraits)
Initializes user defined allocators specified in the uses_allocators clauses.
llvm::Type * KmpRoutineEntryPtrTy
Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *);.
llvm::Type * getIdentTyPointerTy()
Returns pointer to ident_t type.
void emitSingleReductionCombiner(CodeGenFunction &CGF, const Expr *ReductionOp, const Expr *PrivateRef, const DeclRefExpr *LHS, const DeclRefExpr *RHS)
Emits single reduction combiner.
llvm::OpenMPIRBuilder & getOMPBuilder()
virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen)
Emit outilined function for 'target' directive.
virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint=nullptr)
Emits a critical region.
virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned)
Call the appropriate runtime routine to notify that we finished iteration of the ordered loop with th...
virtual void emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn, ArrayRef< llvm::Value * > Args={}) const
Emits call of the outlined function with the provided arguments, translating these arguments to corre...
llvm::Value * emitNumThreadsForTargetDirective(CodeGenFunction &CGF, const OMPExecutableDirective &D)
Emit an expression that denotes the number of threads a target region shall use.
void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor, llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc)
Emits initialization code for the threadprivate variables.
virtual void emitUserDefinedReduction(CodeGenFunction *CGF, const OMPDeclareReductionDecl *D)
Emit code for the specified user defined reduction construct.
virtual void checkAndEmitSharedLastprivateConditional(CodeGenFunction &CGF, const OMPExecutableDirective &D, const llvm::DenseSet< CanonicalDeclPtr< const VarDecl > > &IgnoredDecls)
Checks if the lastprivate conditional was updated in inner region and writes the value.
QualType KmpDimTy
struct kmp_dim { // loop bounds info casted to kmp_int64 kmp_int64 lo; // lower kmp_int64 up; // uppe...
virtual void emitInlinedDirective(CodeGenFunction &CGF, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool HasCancel=false)
Emit code for the directive that does not require outlining.
virtual void registerTargetGlobalVariable(const VarDecl *VD, llvm::Constant *Addr)
Checks if the provided global decl GD is a declare target variable and registers it when emitting cod...
virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D)
Emits OpenMP-specific function prolog.
void emitKmpRoutineEntryT(QualType KmpInt32Ty)
Build type kmp_routine_entry_t (if not built yet).
virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const
Check if the specified ScheduleKind is static chunked.
virtual void emitTargetCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, llvm::PointerIntPair< const Expr *, 2, OpenMPDeviceClauseModifier > Device, llvm::function_ref< llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter)
Emit the target offloading code associated with D.
virtual bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS)
Checks if the variable has associated OMPAllocateDeclAttr attribute with the predefined allocator and...
llvm::AtomicOrdering RequiresAtomicOrdering
Atomic ordering from the omp requires directive.
virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef< const Expr * > Privates, ArrayRef< const Expr * > LHSExprs, ArrayRef< const Expr * > RHSExprs, ArrayRef< const Expr * > ReductionOps, ReductionOptionsTy Options)
Emit a code for reduction clause.
std::pair< llvm::Value *, Address > emitDependClause(CodeGenFunction &CGF, ArrayRef< OMPTaskDataTy::DependData > Dependencies, SourceLocation Loc)
Emits list of dependecies based on the provided data (array of dependence/expression pairs).
llvm::StringMap< llvm::WeakTrackingVH > EmittedNonTargetVariables
List of the global variables with their addresses that should not be emitted for the target.
virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const
Check if the specified ScheduleKind is dynamic.
Address emitLastprivateConditionalInit(CodeGenFunction &CGF, const VarDecl *VD)
Create specialized alloca to handle lastprivate conditionals.
virtual void emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads)
Emit an ordered region.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD)
Gets the OpenMP-specific address of the local variable.
virtual void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc, bool IsWorksharingReduction)
Emits the following code for reduction clause with task modifier:
virtual void emitMaskedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MaskedOpGen, SourceLocation Loc, const Expr *Filter=nullptr)
Emits a masked region.
QualType KmpDependInfoTy
Type typedef struct kmp_depend_info { kmp_intptr_t base_addr; size_t len; struct { bool in:1; bool ou...
llvm::Function * emitReductionFunction(StringRef ReducerName, SourceLocation Loc, llvm::Type *ArgsElemType, ArrayRef< const Expr * > Privates, ArrayRef< const Expr * > LHSExprs, ArrayRef< const Expr * > RHSExprs, ArrayRef< const Expr * > ReductionOps)
Emits reduction function.
virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues)
Call the appropriate runtime routine to initialize it before start of loop.
Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal) override
Get the address of void * type of the privatue copy of the reduction item specified by the SharedLVal...
void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint=nullptr) override
Emits a critical region.
void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values) override
void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) override
Call the appropriate runtime routine to initialize it before start of loop.
bool emitTargetGlobalVariable(GlobalDecl GD) override
Emit the global variable if it is a valid device global variable.
llvm::Value * emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST) override
Call __kmpc_dispatch_next( ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, kmp_int[32|64] *p_lowe...
llvm::Function * emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF=nullptr) override
Emit a code for initialization of threadprivate variable.
void emitTargetDataStandAloneCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device) override
Emit the data mapping/movement code associated with the directive D that should be of the form 'targe...
llvm::Function * emitTeamsOutlinedFunction(CodeGenFunction &CGF, const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override
Emits outlined function for the specified OpenMP teams directive D.
void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef< llvm::Value * > CapturedVars, const Expr *IfCond, llvm::Value *NumThreads, OpenMPNumThreadsClauseModifier NumThreadsModifier=OMPC_NUMTHREADS_unknown, OpenMPSeverityClauseKind Severity=OMPC_SEVERITY_fatal, const Expr *Message=nullptr) override
Emits code for parallel or serial call of the OutlinedFn with variables captured in a record which ad...
void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef< const Expr * > Privates, ArrayRef< const Expr * > LHSExprs, ArrayRef< const Expr * > RHSExprs, ArrayRef< const Expr * > ReductionOps, ReductionOptionsTy Options) override
Emit a code for reduction clause.
void emitFlush(CodeGenFunction &CGF, ArrayRef< const Expr * > Vars, SourceLocation Loc, llvm::AtomicOrdering AO) override
Emit flush of the variables specified in 'omp flush' directive.
void emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C) override
Emit code for doacross ordered directive with 'depend' clause.
void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) override
Emits a masked region.
Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF, QualType VarType, StringRef Name) override
Creates artificial threadprivate variable with name Name and type VarType.
Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc) override
Returns address of the threadprivate variable for the current thread.
void emitSingleRegion(CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef< const Expr * > CopyprivateVars, ArrayRef< const Expr * > DestExprs, ArrayRef< const Expr * > SrcExprs, ArrayRef< const Expr * > AssignmentOps) override
Emits a single region.
void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N) override
Required to resolve existing problems in the runtime.
llvm::Function * emitParallelOutlinedFunction(CodeGenFunction &CGF, const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override
Emits outlined function for the specified OpenMP parallel directive D.
void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion) override
Emit code for 'cancellation point' construct.
void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks=true, bool ForceSimpleCall=false) override
Emit an implicit/explicit barrier for OpenMP threads.
Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const override
Gets the address of the native argument basing on the address of the target-specific parameter.
void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef< llvm::Value * > CapturedVars) override
Emits code for teams call of the OutlinedFn with variables captured in a record which address is stor...
void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned) override
Call the appropriate runtime routine to notify that we finished iteration of the ordered loop with th...
bool emitTargetGlobal(GlobalDecl GD) override
Emit the global GD if it is meaningful for the target.
void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc, bool IsWorksharingReduction) override
Emits the following code for reduction clause with task modifier:
void emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads) override
Emit an ordered region.
void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind) override
Call the appropriate runtime routine to notify that we finished all the work with current loop.
llvm::Value * emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef< const Expr * > LHSExprs, ArrayRef< const Expr * > RHSExprs, const OMPTaskDataTy &Data) override
Emit a code for initialization of task reduction clause.
void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc) override
Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32global_tid, int proc_bind) to generate...
void emitTargetOutlinedFunction(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) override
Emit outilined function for 'target' directive.
void emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc) override
Emits a master region.
void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc) override
Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32global_tid, kmp_int32 num_teams,...
void emitForDispatchDeinit(CodeGenFunction &CGF, SourceLocation Loc) override
This is used for non static scheduled types and when the ordered clause is present on the loop constr...
const VarDecl * translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const override
Translates the native parameter of outlined function if this is required for target.
void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc, OpenMPNumThreadsClauseModifier Modifier=OMPC_NUMTHREADS_unknown, OpenMPSeverityClauseKind Severity=OMPC_SEVERITY_fatal, SourceLocation SeverityLoc=SourceLocation(), const Expr *Message=nullptr, SourceLocation MessageLoc=SourceLocation()) override
Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32global_tid, kmp_int32 num_threads) ...
void emitMaskedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MaskedOpGen, SourceLocation Loc, const Expr *Filter=nullptr) override
Emits a masked region.
void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) override
Emit task region for the task directive.
void emitTargetCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, llvm::PointerIntPair< const Expr *, 2, OpenMPDeviceClauseModifier > Device, llvm::function_ref< llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter) override
Emit the target offloading code associated with D.
bool emitTargetFunctions(GlobalDecl GD) override
Emit the target regions enclosed in GD function definition or the function itself in case it is a val...
void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef< Expr * > NumIterations) override
Emit initialization for doacross loop nesting support.
void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion) override
Emit code for 'cancel' construct.
void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPTaskDataTy &Data) override
Emit code for 'taskwait' directive.
void emitTaskgroupRegion(CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc) override
Emit a taskgroup region.
void emitTargetDataCalls(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, CGOpenMPRuntime::TargetDataInfo &Info) override
Emit the target data mapping code associated with D.
void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues) override
This is used for non static scheduled types and when the ordered clause is present on the loop constr...
llvm::Function * emitTaskOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts) override
Emits outlined function for the OpenMP task directive D.
void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) override
Emit task region for the taskloop directive.
unsigned getNonVirtualBaseLLVMFieldNo(const CXXRecordDecl *RD) const
llvm::StructType * getLLVMType() const
Return the "complete object" LLVM type associated with this record.
llvm::StructType * getBaseSubobjectLLVMType() const
Return the "base subobject" LLVM type associated with this record.
unsigned getLLVMFieldNo(const FieldDecl *FD) const
Return llvm::StructType element number that corresponds to the field FD.
unsigned getVirtualBaseIndex(const CXXRecordDecl *base) const
Return the LLVM field index corresponding to the given virtual base.
API for captured statement code generation.
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
virtual const FieldDecl * lookup(const VarDecl *VD) const
Lookup the captured field decl for a variable.
RAII for correct setting/restoring of CapturedStmtInfo.
The scope used to remap some variables as private in the OpenMP loop body (or other captured region e...
bool Privatize()
Privatizes local variables previously registered as private.
bool addPrivate(const VarDecl *LocalVD, Address Addr)
Registers LocalVD variable as a private with Addr as the address of the corresponding private variabl...
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
LValue EmitLoadOfReferenceLValue(LValue RefLVal)
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
void emitDestroy(Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
emitDestroy - Immediately perform the destruction of the given object.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
static void EmitOMPTargetParallelDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetParallelDirective &S)
void EmitNullInitialization(Address DestPtr, QualType Ty)
EmitNullInitialization - Generate code to set a value of the given type to null, If the type contains...
Address LoadCXXThisAddress()
CGCapturedStmtInfo * CapturedStmtInfo
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
static void EmitOMPTargetDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetDirective &S)
Emit device code for the target directive.
static void EmitOMPTargetTeamsDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDirective &S)
Emit device code for the target teams directive.
static void EmitOMPTargetTeamsDistributeDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDistributeDirective &S)
Emit device code for the target teams distribute directive.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
const LangOptions & getLangOpts() const
AutoVarEmission EmitAutoVarAlloca(const VarDecl &var)
EmitAutoVarAlloca - Emit the alloca and debug information for a local variable.
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
pushDestroy - Push the standard destructor for the given type as at least a normal cleanup.
Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Load a pointer with type PtrTy stored at address Ptr.
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
void EmitAggregateAssign(LValue Dest, LValue Src, QualType EltTy)
Emit an aggregate assignment.
JumpDest ReturnBlock
ReturnBlock - Unified return block.
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field, bool IsInBounds=true)
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
void GenerateOpenMPCapturedVars(const CapturedStmt &S, SmallVectorImpl< llvm::Value * > &CapturedVars)
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
LValue EmitArraySectionExpr(const ArraySectionExpr *E, bool IsLowerBound=true)
LValue EmitOMPSharedLValue(const Expr *E)
Emits the lvalue for the expression with possibly captured variable.
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitOMPCopy(QualType OriginalType, Address DestAddr, Address SrcAddr, const VarDecl *DestVD, const VarDecl *SrcVD, const Expr *Copy)
Emit proper copying of data from one variable to another.
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
JumpDest getOMPCancelDestination(OpenMPDirectiveKind Kind)
llvm::Value * emitArrayLength(const ArrayType *arrayType, QualType &baseType, Address &addr)
emitArrayLength - Compute the length of an array, even if it's a VLA, and drill down to the base elem...
void EmitOMPAggregateAssign(Address DestAddr, Address SrcAddr, QualType OriginalType, const llvm::function_ref< void(Address, Address)> CopyGen)
Perform element by element copying of arrays with type OriginalType from SrcAddr to DestAddr using co...
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
CGDebugInfo * getDebugInfo()
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
LValue MakeRawAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment, AlignmentSource Source=AlignmentSource::Type)
Same as MakeAddrLValue above except that the pointer is known to be unsigned.
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
ASTContext & getContext() const
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
void EmitAutoVarCleanups(const AutoVarEmission &emission)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
bool needsEHCleanup(QualType::DestructionKind kind)
Determines whether an EH cleanup is required to destroy a type with the given destruction kind.
llvm::DenseMap< const ValueDecl *, FieldDecl * > LambdaCaptureFields
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Type * ConvertTypeForMem(QualType T)
static void EmitOMPTargetTeamsDistributeParallelForDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDistributeParallelForDirective &S)
static void EmitOMPTargetParallelForSimdDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetParallelForSimdDirective &S)
Emit device code for the target parallel for simd directive.
CodeGenTypes & getTypes() const
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitOMPTargetTaskBasedDirective(const OMPExecutableDirective &S, const RegionCodeGenTy &BodyGen, OMPTargetDataInfo &InputInfo)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
static void EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDistributeParallelForSimdDirective &S)
Emit device code for the target teams distribute parallel for simd directive.
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
llvm::Function * GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S, const OMPExecutableDirective &D)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Address EmitLoadOfReference(LValue RefLVal, LValueBaseInfo *PointeeBaseInfo=nullptr, TBAAAccessInfo *PointeeTBAAInfo=nullptr)
void EmitVarDecl(const VarDecl &D)
EmitVarDecl - Emit a local variable declaration.
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
static void EmitOMPTargetParallelGenericLoopDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetParallelGenericLoopDirective &S)
Emit device code for the target parallel loop directive.
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
static bool IsWrappedCXXThis(const Expr *E)
Check if E is a C++ "this" pointer wrapped in value-preserving casts.
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
static void EmitOMPTargetSimdDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetSimdDirective &S)
Emit device code for the target simd directive.
static void EmitOMPTargetParallelForDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetParallelForDirective &S)
Emit device code for the target parallel for directive.
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
static void EmitOMPTargetTeamsGenericLoopDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsGenericLoopDirective &S)
Emit device code for the target teams loop directive.
LValue EmitMemberExpr(const MemberExpr *E)
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
static void EmitOMPTargetTeamsDistributeSimdDeviceFunction(CodeGenModule &CGM, StringRef ParentName, const OMPTargetTeamsDistributeSimdDirective &S)
Emit device code for the target teams distribute simd directive.
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
bool isTrivialInitializer(const Expr *Init)
Determine whether the given initializer is trivial in the sense that it requires no code to be genera...
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
EmitExprAsInit - Emits the code necessary to initialize a location in memory with the given initializ...
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
This class organizes the cross-function state that is used while generating LLVM code.
void SetInternalFunctionAttributes(GlobalDecl GD, llvm::Function *F, const CGFunctionInfo &FI)
Set the attributes on the LLVM function for the given decl and function info.
llvm::Module & getModule() const
const IntrusiveRefCntPtr< llvm::vfs::FileSystem > & getFileSystem() const
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CodeGenTypes & getTypes()
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
TBAAAccessInfo getTBAAInfoForSubobject(LValue Base, QualType AccessType)
getTBAAInfoForSubobject - Get TBAA information for an access with a given base lvalue.
ASTContext & getContext() const
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
std::optional< CharUnits > getOMPAllocateAlignment(const VarDecl *VD)
Return the alignment specified in an allocate directive, if present.
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
A specialization of Address that requires the address to be an LLVM Constant.
static ConstantAddress invalid()
bool requiresLandingPad() const
void pushTerminate()
Push a terminate handler on the stack.
void popTerminate()
Pops a terminate handler off the stack.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
LValue - This represents an lvalue references.
CharUnits getAlignment() const
llvm::Value * getPointer(CodeGenFunction &CGF) const
const Qualifiers & getQuals() const
Address getAddress() const
LValueBaseInfo getBaseInfo() const
TBAAAccessInfo getTBAAInfo() const
A basic class for pre|post-action for advanced codegen sequence for OpenMP region.
virtual void Enter(CodeGenFunction &CGF)
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
static RValue get(llvm::Value *V)
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
An abstract representation of an aligned address.
llvm::Type * getElementType() const
Return the type of the values stored in this address.
llvm::Value * getPointer() const
static RawAddress invalid()
Class intended to support codegen of all kind of the reduction clauses.
LValue getSharedLValue(unsigned N) const
Returns LValue for the reduction item.
const Expr * getRefExpr(unsigned N) const
Returns the base declaration of the reduction item.
LValue getOrigLValue(unsigned N) const
Returns LValue for the original reduction item.
bool needCleanups(unsigned N)
Returns true if the private copy requires cleanups.
void emitAggregateType(CodeGenFunction &CGF, unsigned N)
Emits the code for the variable-modified type, if required.
const VarDecl * getBaseDecl(unsigned N) const
Returns the base declaration of the reduction item.
QualType getPrivateType(unsigned N) const
Return the type of the private item.
bool usesReductionInitializer(unsigned N) const
Returns true if the initialization of the reduction item uses initializer from declare reduction cons...
void emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N)
Emits lvalue for the shared and original reduction item.
void emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr, Address SharedAddr, llvm::function_ref< bool(CodeGenFunction &)> DefaultInit)
Performs initialization of the private copy for the reduction item.
std::pair< llvm::Value *, llvm::Value * > getSizes(unsigned N) const
Returns the size of the reduction item (in chars and total number of elements in the item),...
ReductionCodeGen(ArrayRef< const Expr * > Shareds, ArrayRef< const Expr * > Origs, ArrayRef< const Expr * > Privates, ArrayRef< const Expr * > ReductionOps)
void emitCleanups(CodeGenFunction &CGF, unsigned N, Address PrivateAddr)
Emits cleanup code for the reduction item.
Address adjustPrivateAddress(CodeGenFunction &CGF, unsigned N, Address PrivateAddr)
Adjusts PrivatedAddr for using instead of the original variable address in normal operations.
Class provides a way to call simple version of codegen for OpenMP region, or an advanced with possibl...
void operator()(CodeGenFunction &CGF) const
void setAction(PrePostActionTy &Action) const
ConstStmtVisitor - This class implements a simple visitor for Stmt subclasses.
DeclContext - This is used only as base class of specific decl types that can act as declaration cont...
void addDecl(Decl *D)
Add the declaration D into this context.
A reference to a declared variable, function, enum, etc.
Decl - This represents one declaration (or definition), e.g.
ASTContext & getASTContext() const LLVM_READONLY
virtual Stmt * getBody() const
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
SourceLocation getLocation() const
DeclContext * getDeclContext()
virtual Decl * getCanonicalDecl()
Retrieves the "canonical" declaration of the given declaration.
SourceLocation getBeginLoc() const LLVM_READONLY
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
unsigned getCustomDiagID(Level L, const char(&FormatString)[N])
Return an ID for a diagnostic with the specified format string and level.
This represents one expression.
bool isIntegerConstantExpr(const ASTContext &Ctx) const
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
@ SE_AllowSideEffects
Allow any unmodeled side effect.
@ SE_AllowUndefinedBehavior
Allow UB that we can give a value, but not arbitrary unmodeled side effects.
Expr * IgnoreParenCasts() LLVM_READONLY
Skip past any parentheses and casts which might surround this expression until reaching a fixed point...
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
std::optional< llvm::APSInt > getIntegerConstantExpr(const ASTContext &Ctx) const
isIntegerConstantExpr - Return the value if this expression is a valid integer constant expression.
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
bool EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsBooleanCondition - Return true if this is a constant which we can fold and convert to a boo...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
static bool isSameComparisonOperand(const Expr *E1, const Expr *E2)
Checks that the two Expr's will refer to the same value as a comparison operand.
bool hasNonTrivialCall(const ASTContext &Ctx) const
Determine whether this expression involves a call to any function that is not trivial.
Represents a member of a struct/union/class.
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
static FieldDecl * Create(const ASTContext &C, DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, const IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo, Expr *BW, bool Mutable, InClassInitStyle InitStyle)
Represents a function declaration or definition.
const ParmVarDecl * getParamDecl(unsigned i) const
QualType getReturnType() const
ArrayRef< ParmVarDecl * > parameters() const
FunctionDecl * getCanonicalDecl() override
Retrieves the "canonical" declaration of the given declaration.
FunctionDecl * getMostRecentDecl()
Returns the most recent (re)declaration of this declaration.
unsigned getNumParams() const
Return the number of parameters this function must have based on its FunctionType.
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
GlobalDecl - represents a global declaration.
const Decl * getDecl() const
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
static IntegerLiteral * Create(const ASTContext &C, const llvm::APInt &V, QualType type, SourceLocation l)
Returns a new integer literal with value 'V' and type 'type'.
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
bool isExternallyVisible() const
This represents clause 'affinity' in the 'pragma omp task'-based directives.
Expr * getAssociatedExpression() const
ValueDecl * getAssociatedDeclaration() const
ArrayRef< MappableComponent > MappableExprComponentListRef
static std::pair< const Expr *, std::optional< size_t > > findAttachPtrExpr(MappableExprComponentListRef Components, OpenMPDirectiveKind CurDirKind)
Find the attach pointer expression from a list of mappable expression components.
static QualType getComponentExprElementType(const Expr *Exp)
Get the type of an element of a ComponentList Expr Exp.
const Stmt * getPreInitStmt() const
Get pre-initialization statement for the clause.
This is a basic class for representing single OpenMP clause.
SourceLocation getBeginLoc() const
Returns the starting location of the clause.
This represents 'pragma omp declare mapper ...' directive.
Expr * getMapperVarRef()
Get the variable declared in the mapper.
This represents 'pragma omp declare reduction ...' directive.
Expr * getInitializer()
Get initializer expression (if specified) of the declare reduction construct.
Expr * getInitPriv()
Get Priv variable of the initializer.
Expr * getCombinerOut()
Get Out variable of the combiner.
Expr * getCombinerIn()
Get In variable of the combiner.
Expr * getCombiner()
Get combiner expression of the declare reduction construct.
Expr * getInitOrig()
Get Orig variable of the initializer.
OMPDeclareReductionInitKind getInitializerKind() const
Get initializer kind.
This represents implicit clause 'depend' for the 'pragma omp task' directive.
This represents 'detach' clause in the 'pragma omp task' directive.
This represents 'device' clause in the 'pragma omp ...' directive.
This represents the 'doacross' clause for the 'pragma omp ordered' directive.
This represents 'if' clause in the 'pragma omp ...' directive.
Expr * getCondition() const
Returns condition.
This represents clause 'in_reduction' in the 'pragma omp task' directives.
OMPIteratorHelperData & getHelper(unsigned I)
Fetches helper data for the specified iteration space.
unsigned numOfIterators() const
Returns number of iterator definitions.
This represents clause 'lastprivate' in the 'pragma omp ...' directives.
This represents the 'message' clause in the 'pragma omp error' and the 'pragma omp parallel' directiv...
Expr * getMessageString() const
Returns message string of the clause.
This represents clause 'nontemporal' in the 'pragma omp ...' directives.
This represents 'nowait' clause in the 'pragma omp ...' directive.
This represents 'num_teams' clause in the 'pragma omp ...' directive.
This represents 'num_threads' clause in the 'pragma omp ...' directive.
This represents 'ordered' clause in the 'pragma omp ...' directive.
This represents clause 'private' in the 'pragma omp ...' directives.
This represents 'pragma omp requires...' directive.
clauselist_range clauselists()
This represents the 'severity' clause in the 'pragma omp error' and the 'pragma omp parallel' directi...
OpenMPSeverityClauseKind getSeverityKind() const
Returns kind of the clause.
This represents 'thread_limit' clause in the 'pragma omp ...' directive.
This represents clause 'uses_allocators' in the 'pragma omp target'-based directives.
This represents 'ompx_attribute' clause in a directive that might generate an outlined function.
This represents 'ompx_bare' clause in the 'pragma omp target teams ...' directive.
This represents 'ompx_dyn_cgroup_mem' clause in the 'pragma omp target ...' directive.
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Represents a parameter to a function.
PointerType - C99 6.7.5.1 - Pointer Declarators.
Represents an unpacked "presumed" location which can be presented to the user.
unsigned getColumn() const
Return the presumed column number of this location.
const char * getFilename() const
Return the presumed filename of this location.
unsigned getLine() const
Return the presumed line number of this location.
A (possibly-)qualified type.
void addRestrict()
Add the restrict qualifier to this QualType.
QualType withRestrict() const
bool isNull() const
Return true if this QualType doesn't point to a type yet.
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
QualType getCanonicalType() const
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Represents a struct/union/class.
field_iterator field_end() const
field_range fields() const
virtual void completeDefinition()
Note that the definition of this type is now complete.
field_iterator field_begin() const
Scope - A scope is a transient data structure that is used while parsing the program.
Encodes a location in the source.
static SourceLocation getFromRawEncoding(UIntTy Encoding)
Turn a raw encoding of a SourceLocation object into a real SourceLocation.
bool isValid() const
Return true if this is a valid SourceLocation object.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
This class handles loading and caching of source files into memory.
PresumedLoc getPresumedLoc(SourceLocation Loc, bool UseLineDirectives=true) const
Returns the "presumed" location of a SourceLocation specifies.
Stmt - This represents one statement.
StmtClass getStmtClass() const
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Stmt * IgnoreContainers(bool IgnoreCaptured=false)
Skip no-op (attributed, compound) container stmts and skip captured stmt at the top,...
SourceLocation getBeginLoc() const LLVM_READONLY
void startDefinition()
Starts the definition of this tag declaration.
The base class of the type hierarchy.
const Type * getPointeeOrArrayElementType() const
If this is a pointer type, return the pointee type.
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
bool isPointerType() const
CanQualType getCanonicalTypeUnqualified() const
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
const T * castAs() const
Member-template castAs<specific type>.
bool isReferenceType() const
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isLValueReferenceType() const
bool isAggregateType() const
Determines whether the type is a C++ aggregate type or C aggregate or union type.
RecordDecl * castAsRecordDecl() const
QualType getCanonicalTypeInternal() const
const Type * getBaseElementTypeUnsafe() const
Get the base element type of this type, potentially discarding type qualifiers.
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
const ArrayType * getAsArrayTypeUnsafe() const
A variant of getAs<> for array types which silently discards qualifiers from the outermost type.
bool isFloatingType() const
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
bool isAnyPointerType() const
const T * getAs() const
Member-template getAs<specific type>'.
bool isRecordType() const
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Represents a variable declaration or definition.
VarDecl * getCanonicalDecl() override
Retrieves the "canonical" declaration of the given declaration.
VarDecl * getDefinition(ASTContext &)
Get the real (not just tentative) definition for this declaration.
const Expr * getInit() const
bool hasExternalStorage() const
Returns true if a variable has extern or private_extern storage.
@ DeclarationOnly
This declaration is only a declaration.
DefinitionKind hasDefinition(ASTContext &) const
Check whether this variable is defined in this translation unit.
bool isLocalVarDeclOrParm() const
Similar to isLocalVarDecl but also includes parameters.
const Expr * getAnyInitializer() const
Get the initializer for this variable, no matter which declaration it is attached to.
Represents a C array with a specified size that is not an integer-constant-expression.
Expr * getSizeExpr() const
specific_attr_iterator - Iterates over a subrange of an AttrVec, only providing attributes that are o...
bool isEmptyRecordForLayout(const ASTContext &Context, QualType T)
isEmptyRecordForLayout - Return true iff a structure contains only empty base classes (per isEmptyRec...
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
bool isEmptyFieldForLayout(const ASTContext &Context, const FieldDecl *FD)
isEmptyFieldForLayout - Return true iff the field is "empty", that is, either a zero-width bit-field ...
ComparisonResult
Indicates the result of a tentative comparison.
The JSON file list parser is used to communicate input to InstallAPI.
bool isOpenMPWorksharingDirective(OpenMPDirectiveKind DKind)
Checks if the specified directive is a worksharing directive.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool needsTaskBasedThreadLimit(OpenMPDirectiveKind DKind)
Checks if the specified target directive, combined or not, needs task based thread_limit.
@ Ctor_Complete
Complete object ctor.
bool isa(CodeGen::Address addr)
if(T->getSizeExpr()) TRY_TO(TraverseStmt(const_cast< Expr * >(T -> getSizeExpr())))
bool isOpenMPTargetDataManagementDirective(OpenMPDirectiveKind DKind)
Checks if the specified directive is a target data offload directive.
@ Conditional
A conditional (?:) operator.
@ ICIS_NoInit
No in-class initializer.
bool isOpenMPDistributeDirective(OpenMPDirectiveKind DKind)
Checks if the specified directive is a distribute directive.
@ LCK_ByRef
Capturing by reference.
LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE()
@ Private
'private' clause, allowed on 'parallel', 'serial', 'loop', 'parallel loop', and 'serial loop' constru...
@ Vector
'vector' clause, allowed on 'loop', Combined, and 'routine' directives.
@ Reduction
'reduction' clause, allowed on Parallel, Serial, Loop, and the combined constructs.
@ Present
'present' clause, allowed on Compute and Combined constructs, plus 'data' and 'declare'.
OpenMPScheduleClauseModifier
OpenMP modifiers for 'schedule' clause.
@ OMPC_SCHEDULE_MODIFIER_last
@ OMPC_SCHEDULE_MODIFIER_unknown
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
bool isOpenMPParallelDirective(OpenMPDirectiveKind DKind)
Checks if the specified directive is a parallel-kind directive.
OpenMPDistScheduleClauseKind
OpenMP attributes for 'dist_schedule' clause.
static bool classof(const Stmt *T)
bool isOpenMPTaskingDirective(OpenMPDirectiveKind Kind)
Checks if the specified directive kind is one of tasking directives - task, taskloop,...
bool isOpenMPTargetExecutionDirective(OpenMPDirectiveKind DKind)
Checks if the specified directive is a target code offload directive.
@ Result
The result type of a method or function.
bool isOpenMPTeamsDirective(OpenMPDirectiveKind DKind)
Checks if the specified directive is a teams-kind directive.
const FunctionProtoType * T
OpenMPDependClauseKind
OpenMP attributes for 'depend' clause.
@ Dtor_Complete
Complete object dtor.
@ Union
The "union" keyword.
bool isOpenMPTargetMapEnteringDirective(OpenMPDirectiveKind DKind)
Checks if the specified directive is a map-entering target directive.
@ Type
The name was classified as a type.
bool isOpenMPLoopDirective(OpenMPDirectiveKind DKind)
Checks if the specified directive is a directive with an associated loop construct.
OpenMPSeverityClauseKind
OpenMP attributes for 'severity' clause.
LangAS
Defines the address space values used by the address space qualifier of QualType.
llvm::omp::Directive OpenMPDirectiveKind
OpenMP directives.
bool isOpenMPSimdDirective(OpenMPDirectiveKind DKind)
Checks if the specified directive is a simd directive.
@ VK_PRValue
A pr-value expression (in the C++11 taxonomy) produces a temporary value.
@ VK_LValue
An l-value expression is a reference to an object with independent storage.
for(const auto &A :T->param_types())
void getOpenMPCaptureRegions(llvm::SmallVectorImpl< OpenMPDirectiveKind > &CaptureRegions, OpenMPDirectiveKind DKind)
Return the captured regions of an OpenMP directive.
OpenMPNumThreadsClauseModifier
@ OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown
U cast(CodeGen::Address addr)
OpenMPMapModifierKind
OpenMP modifier kind for 'map' clause.
@ OMPC_MAP_MODIFIER_unknown
@ Other
Other implicit parameter.
OpenMPScheduleClauseKind
OpenMP attributes for 'schedule' clause.
bool isOpenMPTaskLoopDirective(OpenMPDirectiveKind DKind)
Checks if the specified directive is a taskloop directive.
OpenMPMapClauseKind
OpenMP mapping kind for 'map' clause.
Diagnostic wrappers for TextAPI types for error reporting.
Maps the expression for the lastprivate variable to the global copy used to store new value because o...
llvm::SmallVector< bool, 8 > IsPrivateVarReduction
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
llvm::BasicBlock * getBlock() const
unsigned NumberOfTargetItems
Address BasePointersArray
llvm::PointerType * VoidPtrTy
llvm::IntegerType * Int64Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::CallingConv::ID getRuntimeCC() const
llvm::IntegerType * SizeTy
llvm::PointerType * VoidPtrPtrTy
llvm::IntegerType * Int32Ty
llvm::IntegerType * IntPtrTy
llvm::IntegerType * IntTy
int
CharUnits getPointerAlign() const
OpenMPDependClauseKind DepKind
const Expr * IteratorExpr
SmallVector< const Expr *, 4 > DepExprs
EvalResult is a struct with detailed info about an evaluated expression.
Extra information about a function prototype.
Expr * CounterUpdate
Updater for the internal counter: ++CounterVD;.
Data for list of allocators.
Expr * AllocatorTraits
Allocator traits.
Expr * Allocator
Allocator.
Scheduling data for loop-based OpenMP directives.
OpenMPScheduleClauseModifier M2
OpenMPScheduleClauseModifier M1
OpenMPScheduleClauseKind Schedule
Describes how types, statements, expressions, and declarations should be printed.