Thanks to visit codestin.com
Credit goes to clang.llvm.org

clang 22.0.0git
CGStmt.cpp
Go to the documentation of this file.
1//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Stmt nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGDebugInfo.h"
14#include "CGOpenMPRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "CodeGenPGO.h"
18#include "TargetInfo.h"
19#include "clang/AST/Attr.h"
20#include "clang/AST/Expr.h"
21#include "clang/AST/Stmt.h"
28#include "llvm/ADT/ArrayRef.h"
29#include "llvm/ADT/DenseMap.h"
30#include "llvm/ADT/SmallSet.h"
31#include "llvm/ADT/StringExtras.h"
32#include "llvm/IR/Assumptions.h"
33#include "llvm/IR/DataLayout.h"
34#include "llvm/IR/InlineAsm.h"
35#include "llvm/IR/Intrinsics.h"
36#include "llvm/IR/MDBuilder.h"
37#include "llvm/Support/SaveAndRestore.h"
38#include <optional>
39
40using namespace clang;
41using namespace CodeGen;
42
43//===----------------------------------------------------------------------===//
44// Statement Emission
45//===----------------------------------------------------------------------===//
46
47namespace llvm {
48extern cl::opt<bool> EnableSingleByteCoverage;
49} // namespace llvm
50
52 if (CGDebugInfo *DI = getDebugInfo()) {
54 Loc = S->getBeginLoc();
55 DI->EmitLocation(Builder, Loc);
56
57 LastStopPoint = Loc;
58 }
59}
60
62 assert(S && "Null statement?");
63 PGO->setCurrentStmt(S);
64
65 // These statements have their own debug info handling.
66 if (EmitSimpleStmt(S, Attrs))
67 return;
68
69 // Check if we are generating unreachable code.
70 if (!HaveInsertPoint()) {
71 // If so, and the statement doesn't contain a label, then we do not need to
72 // generate actual code. This is safe because (1) the current point is
73 // unreachable, so we don't need to execute the code, and (2) we've already
74 // handled the statements which update internal data structures (like the
75 // local variable map) which could be used by subsequent statements.
76 if (!ContainsLabel(S)) {
77 // Verify that any decl statements were handled as simple, they may be in
78 // scope of subsequent reachable statements.
79 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
80 PGO->markStmtMaybeUsed(S);
81 return;
82 }
83
84 // Otherwise, make a new block to hold the code.
86 }
87
88 // Generate a stoppoint if we are emitting debug info.
90
91 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
92 // enabled.
93 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
94 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
96 return;
97 }
98 }
99
100 switch (S->getStmtClass()) {
102 case Stmt::CXXCatchStmtClass:
103 case Stmt::SEHExceptStmtClass:
104 case Stmt::SEHFinallyStmtClass:
105 case Stmt::MSDependentExistsStmtClass:
106 llvm_unreachable("invalid statement class to emit generically");
107 case Stmt::NullStmtClass:
108 case Stmt::CompoundStmtClass:
109 case Stmt::DeclStmtClass:
110 case Stmt::LabelStmtClass:
111 case Stmt::AttributedStmtClass:
112 case Stmt::GotoStmtClass:
113 case Stmt::BreakStmtClass:
114 case Stmt::ContinueStmtClass:
115 case Stmt::DefaultStmtClass:
116 case Stmt::CaseStmtClass:
117 case Stmt::SEHLeaveStmtClass:
118 case Stmt::SYCLKernelCallStmtClass:
119 llvm_unreachable("should have emitted these statements as simple");
120
121#define STMT(Type, Base)
122#define ABSTRACT_STMT(Op)
123#define EXPR(Type, Base) \
124 case Stmt::Type##Class:
125#include "clang/AST/StmtNodes.inc"
126 {
127 // Remember the block we came in on.
128 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
129 assert(incoming && "expression emission must have an insertion point");
130
132
133 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
134 assert(outgoing && "expression emission cleared block!");
135
136 // The expression emitters assume (reasonably!) that the insertion
137 // point is always set. To maintain that, the call-emission code
138 // for noreturn functions has to enter a new block with no
139 // predecessors. We want to kill that block and mark the current
140 // insertion point unreachable in the common case of a call like
141 // "exit();". Since expression emission doesn't otherwise create
142 // blocks with no predecessors, we can just test for that.
143 // However, we must be careful not to do this to our incoming
144 // block, because *statement* emission does sometimes create
145 // reachable blocks which will have no predecessors until later in
146 // the function. This occurs with, e.g., labels that are not
147 // reachable by fallthrough.
148 if (incoming != outgoing && outgoing->use_empty()) {
149 outgoing->eraseFromParent();
150 Builder.ClearInsertionPoint();
151 }
152 break;
153 }
154
155 case Stmt::IndirectGotoStmtClass:
157
158 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
159 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
160 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
161 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
162
163 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
164
165 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
166 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
167 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
168 case Stmt::CoroutineBodyStmtClass:
170 break;
171 case Stmt::CoreturnStmtClass:
173 break;
174 case Stmt::CapturedStmtClass: {
175 const CapturedStmt *CS = cast<CapturedStmt>(S);
177 }
178 break;
179 case Stmt::ObjCAtTryStmtClass:
181 break;
182 case Stmt::ObjCAtCatchStmtClass:
183 llvm_unreachable(
184 "@catch statements should be handled by EmitObjCAtTryStmt");
185 case Stmt::ObjCAtFinallyStmtClass:
186 llvm_unreachable(
187 "@finally statements should be handled by EmitObjCAtTryStmt");
188 case Stmt::ObjCAtThrowStmtClass:
190 break;
191 case Stmt::ObjCAtSynchronizedStmtClass:
193 break;
194 case Stmt::ObjCForCollectionStmtClass:
196 break;
197 case Stmt::ObjCAutoreleasePoolStmtClass:
199 break;
200
201 case Stmt::CXXTryStmtClass:
203 break;
204 case Stmt::CXXForRangeStmtClass:
206 break;
207 case Stmt::SEHTryStmtClass:
209 break;
210 case Stmt::OMPMetaDirectiveClass:
212 break;
213 case Stmt::OMPCanonicalLoopClass:
215 break;
216 case Stmt::OMPParallelDirectiveClass:
218 break;
219 case Stmt::OMPSimdDirectiveClass:
221 break;
222 case Stmt::OMPTileDirectiveClass:
224 break;
225 case Stmt::OMPStripeDirectiveClass:
227 break;
228 case Stmt::OMPUnrollDirectiveClass:
230 break;
231 case Stmt::OMPReverseDirectiveClass:
233 break;
234 case Stmt::OMPInterchangeDirectiveClass:
236 break;
237 case Stmt::OMPForDirectiveClass:
239 break;
240 case Stmt::OMPForSimdDirectiveClass:
242 break;
243 case Stmt::OMPSectionsDirectiveClass:
245 break;
246 case Stmt::OMPSectionDirectiveClass:
248 break;
249 case Stmt::OMPSingleDirectiveClass:
251 break;
252 case Stmt::OMPMasterDirectiveClass:
254 break;
255 case Stmt::OMPCriticalDirectiveClass:
257 break;
258 case Stmt::OMPParallelForDirectiveClass:
260 break;
261 case Stmt::OMPParallelForSimdDirectiveClass:
263 break;
264 case Stmt::OMPParallelMasterDirectiveClass:
266 break;
267 case Stmt::OMPParallelSectionsDirectiveClass:
269 break;
270 case Stmt::OMPTaskDirectiveClass:
272 break;
273 case Stmt::OMPTaskyieldDirectiveClass:
275 break;
276 case Stmt::OMPErrorDirectiveClass:
278 break;
279 case Stmt::OMPBarrierDirectiveClass:
281 break;
282 case Stmt::OMPTaskwaitDirectiveClass:
284 break;
285 case Stmt::OMPTaskgroupDirectiveClass:
287 break;
288 case Stmt::OMPFlushDirectiveClass:
290 break;
291 case Stmt::OMPDepobjDirectiveClass:
293 break;
294 case Stmt::OMPScanDirectiveClass:
296 break;
297 case Stmt::OMPOrderedDirectiveClass:
299 break;
300 case Stmt::OMPAtomicDirectiveClass:
302 break;
303 case Stmt::OMPTargetDirectiveClass:
305 break;
306 case Stmt::OMPTeamsDirectiveClass:
308 break;
309 case Stmt::OMPCancellationPointDirectiveClass:
311 break;
312 case Stmt::OMPCancelDirectiveClass:
314 break;
315 case Stmt::OMPTargetDataDirectiveClass:
317 break;
318 case Stmt::OMPTargetEnterDataDirectiveClass:
320 break;
321 case Stmt::OMPTargetExitDataDirectiveClass:
323 break;
324 case Stmt::OMPTargetParallelDirectiveClass:
326 break;
327 case Stmt::OMPTargetParallelForDirectiveClass:
329 break;
330 case Stmt::OMPTaskLoopDirectiveClass:
332 break;
333 case Stmt::OMPTaskLoopSimdDirectiveClass:
335 break;
336 case Stmt::OMPMasterTaskLoopDirectiveClass:
338 break;
339 case Stmt::OMPMaskedTaskLoopDirectiveClass:
341 break;
342 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
345 break;
346 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
349 break;
350 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
353 break;
354 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
357 break;
358 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
361 break;
362 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
365 break;
366 case Stmt::OMPDistributeDirectiveClass:
368 break;
369 case Stmt::OMPTargetUpdateDirectiveClass:
371 break;
372 case Stmt::OMPDistributeParallelForDirectiveClass:
375 break;
376 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
379 break;
380 case Stmt::OMPDistributeSimdDirectiveClass:
382 break;
383 case Stmt::OMPTargetParallelForSimdDirectiveClass:
386 break;
387 case Stmt::OMPTargetSimdDirectiveClass:
389 break;
390 case Stmt::OMPTeamsDistributeDirectiveClass:
392 break;
393 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
396 break;
397 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
400 break;
401 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
404 break;
405 case Stmt::OMPTargetTeamsDirectiveClass:
407 break;
408 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
411 break;
412 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
415 break;
416 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
419 break;
420 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
423 break;
424 case Stmt::OMPInteropDirectiveClass:
426 break;
427 case Stmt::OMPDispatchDirectiveClass:
428 CGM.ErrorUnsupported(S, "OpenMP dispatch directive");
429 break;
430 case Stmt::OMPScopeDirectiveClass:
432 break;
433 case Stmt::OMPMaskedDirectiveClass:
435 break;
436 case Stmt::OMPGenericLoopDirectiveClass:
438 break;
439 case Stmt::OMPTeamsGenericLoopDirectiveClass:
441 break;
442 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
445 break;
446 case Stmt::OMPParallelGenericLoopDirectiveClass:
449 break;
450 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
453 break;
454 case Stmt::OMPParallelMaskedDirectiveClass:
456 break;
457 case Stmt::OMPAssumeDirectiveClass:
459 break;
460 case Stmt::OpenACCComputeConstructClass:
462 break;
463 case Stmt::OpenACCLoopConstructClass:
465 break;
466 case Stmt::OpenACCCombinedConstructClass:
468 break;
469 case Stmt::OpenACCDataConstructClass:
471 break;
472 case Stmt::OpenACCEnterDataConstructClass:
474 break;
475 case Stmt::OpenACCExitDataConstructClass:
477 break;
478 case Stmt::OpenACCHostDataConstructClass:
480 break;
481 case Stmt::OpenACCWaitConstructClass:
483 break;
484 case Stmt::OpenACCInitConstructClass:
486 break;
487 case Stmt::OpenACCShutdownConstructClass:
489 break;
490 case Stmt::OpenACCSetConstructClass:
492 break;
493 case Stmt::OpenACCUpdateConstructClass:
495 break;
496 case Stmt::OpenACCAtomicConstructClass:
498 break;
499 case Stmt::OpenACCCacheConstructClass:
501 break;
502 }
503}
504
507 switch (S->getStmtClass()) {
508 default:
509 return false;
510 case Stmt::NullStmtClass:
511 break;
512 case Stmt::CompoundStmtClass:
514 break;
515 case Stmt::DeclStmtClass:
517 break;
518 case Stmt::LabelStmtClass:
520 break;
521 case Stmt::AttributedStmtClass:
523 break;
524 case Stmt::GotoStmtClass:
526 break;
527 case Stmt::BreakStmtClass:
529 break;
530 case Stmt::ContinueStmtClass:
532 break;
533 case Stmt::DefaultStmtClass:
535 break;
536 case Stmt::CaseStmtClass:
537 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
538 break;
539 case Stmt::SEHLeaveStmtClass:
541 break;
542 case Stmt::SYCLKernelCallStmtClass:
543 // SYCL kernel call statements are generated as wrappers around the body
544 // of functions declared with the sycl_kernel_entry_point attribute. Such
545 // functions are used to specify how a SYCL kernel (a function object) is
546 // to be invoked; the SYCL kernel call statement contains a transformed
547 // variation of the function body and is used to generate a SYCL kernel
548 // caller function; a function that serves as the device side entry point
549 // used to execute the SYCL kernel. The sycl_kernel_entry_point attributed
550 // function is invoked by host code in order to trigger emission of the
551 // device side SYCL kernel caller function and to generate metadata needed
552 // by SYCL run-time library implementations; the function is otherwise
553 // intended to have no effect. As such, the function body is not evaluated
554 // as part of the invocation during host compilation (and the function
555 // should not be called or emitted during device compilation); the SYCL
556 // kernel call statement is thus handled as a null statement for the
557 // purpose of code generation.
558 break;
559 }
560 return true;
561}
562
563/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
564/// this captures the expression result of the last sub-statement and returns it
565/// (for use by the statement expression extension).
567 AggValueSlot AggSlot) {
568 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
569 "LLVM IR generation of compound statement ('{}')");
570
571 // Keep track of the current cleanup stack depth, including debug scopes.
573
574 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
575}
576
579 bool GetLast,
580 AggValueSlot AggSlot) {
581
582 const Stmt *ExprResult = S.getStmtExprResult();
583 assert((!GetLast || (GetLast && ExprResult)) &&
584 "If GetLast is true then the CompoundStmt must have a StmtExprResult");
585
586 Address RetAlloca = Address::invalid();
587
588 for (auto *CurStmt : S.body()) {
589 if (GetLast && ExprResult == CurStmt) {
590 // We have to special case labels here. They are statements, but when put
591 // at the end of a statement expression, they yield the value of their
592 // subexpression. Handle this by walking through all labels we encounter,
593 // emitting them before we evaluate the subexpr.
594 // Similar issues arise for attributed statements.
595 while (!isa<Expr>(ExprResult)) {
596 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
597 EmitLabel(LS->getDecl());
598 ExprResult = LS->getSubStmt();
599 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
600 // FIXME: Update this if we ever have attributes that affect the
601 // semantics of an expression.
602 ExprResult = AS->getSubStmt();
603 } else {
604 llvm_unreachable("unknown value statement");
605 }
606 }
607
609
610 const Expr *E = cast<Expr>(ExprResult);
611 QualType ExprTy = E->getType();
612 if (hasAggregateEvaluationKind(ExprTy)) {
613 EmitAggExpr(E, AggSlot);
614 } else {
615 // We can't return an RValue here because there might be cleanups at
616 // the end of the StmtExpr. Because of that, we have to emit the result
617 // here into a temporary alloca.
618 RetAlloca = CreateMemTemp(ExprTy);
619 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
620 /*IsInit*/ false);
621 }
622 } else {
623 EmitStmt(CurStmt);
624 }
625 }
626
627 return RetAlloca;
628}
629
631 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
632
633 // If there is a cleanup stack, then we it isn't worth trying to
634 // simplify this block (we would need to remove it from the scope map
635 // and cleanup entry).
636 if (!EHStack.empty())
637 return;
638
639 // Can only simplify direct branches.
640 if (!BI || !BI->isUnconditional())
641 return;
642
643 // Can only simplify empty blocks.
644 if (BI->getIterator() != BB->begin())
645 return;
646
647 BB->replaceAllUsesWith(BI->getSuccessor(0));
648 BI->eraseFromParent();
649 BB->eraseFromParent();
650}
651
652void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
653 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
654
655 // Fall out of the current block (if necessary).
656 EmitBranch(BB);
657
658 if (IsFinished && BB->use_empty()) {
659 delete BB;
660 return;
661 }
662
663 // Place the block after the current block, if possible, or else at
664 // the end of the function.
665 if (CurBB && CurBB->getParent())
666 CurFn->insert(std::next(CurBB->getIterator()), BB);
667 else
668 CurFn->insert(CurFn->end(), BB);
669 Builder.SetInsertPoint(BB);
670}
671
672void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
673 // Emit a branch from the current block to the target one if this
674 // was a real block. If this was just a fall-through block after a
675 // terminator, don't emit it.
676 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
677
678 if (!CurBB || CurBB->getTerminator()) {
679 // If there is no insert point or the previous block is already
680 // terminated, don't touch it.
681 } else {
682 // Otherwise, create a fall-through branch.
683 Builder.CreateBr(Target);
684 }
685
686 Builder.ClearInsertionPoint();
687}
688
689void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
690 bool inserted = false;
691 for (llvm::User *u : block->users()) {
692 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
693 CurFn->insert(std::next(insn->getParent()->getIterator()), block);
694 inserted = true;
695 break;
696 }
697 }
698
699 if (!inserted)
700 CurFn->insert(CurFn->end(), block);
701
702 Builder.SetInsertPoint(block);
703}
704
707 JumpDest &Dest = LabelMap[D];
708 if (Dest.isValid()) return Dest;
709
710 // Create, but don't insert, the new block.
711 Dest = JumpDest(createBasicBlock(D->getName()),
714 return Dest;
715}
716
718 // Add this label to the current lexical scope if we're within any
719 // normal cleanups. Jumps "in" to this label --- when permitted by
720 // the language --- may need to be routed around such cleanups.
721 if (EHStack.hasNormalCleanups() && CurLexicalScope)
722 CurLexicalScope->addLabel(D);
723
724 JumpDest &Dest = LabelMap[D];
725
726 // If we didn't need a forward reference to this label, just go
727 // ahead and create a destination at the current scope.
728 if (!Dest.isValid()) {
730
731 // Otherwise, we need to give this label a target depth and remove
732 // it from the branch-fixups list.
733 } else {
734 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
735 Dest.setScopeDepth(EHStack.stable_begin());
737 }
738
739 EmitBlock(Dest.getBlock());
740
741 // Emit debug info for labels.
742 if (CGDebugInfo *DI = getDebugInfo()) {
743 if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
744 DI->setLocation(D->getLocation());
745 DI->EmitLabel(D, Builder);
746 }
747 }
748
750}
751
752/// Change the cleanup scope of the labels in this lexical scope to
753/// match the scope of the enclosing context.
755 assert(!Labels.empty());
756 EHScopeStack::stable_iterator innermostScope
757 = CGF.EHStack.getInnermostNormalCleanup();
758
759 // Change the scope depth of all the labels.
760 for (const LabelDecl *Label : Labels) {
761 assert(CGF.LabelMap.count(Label));
762 JumpDest &dest = CGF.LabelMap.find(Label)->second;
763 assert(dest.getScopeDepth().isValid());
764 assert(innermostScope.encloses(dest.getScopeDepth()));
765 dest.setScopeDepth(innermostScope);
766 }
767
768 // Reparent the labels if the new scope also has cleanups.
769 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
770 ParentScope->Labels.append(Labels.begin(), Labels.end());
771 }
772}
773
774
776 EmitLabel(S.getDecl());
777
778 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
779 if (getLangOpts().EHAsynch && S.isSideEntry())
781
782 EmitStmt(S.getSubStmt());
783}
784
786 bool nomerge = false;
787 bool noinline = false;
788 bool alwaysinline = false;
789 bool noconvergent = false;
790 HLSLControlFlowHintAttr::Spelling flattenOrBranch =
791 HLSLControlFlowHintAttr::SpellingNotCalculated;
792 const CallExpr *musttail = nullptr;
793 const AtomicAttr *AA = nullptr;
794
795 for (const auto *A : S.getAttrs()) {
796 switch (A->getKind()) {
797 default:
798 break;
799 case attr::NoMerge:
800 nomerge = true;
801 break;
802 case attr::NoInline:
803 noinline = true;
804 break;
805 case attr::AlwaysInline:
806 alwaysinline = true;
807 break;
808 case attr::NoConvergent:
809 noconvergent = true;
810 break;
811 case attr::MustTail: {
812 const Stmt *Sub = S.getSubStmt();
813 const ReturnStmt *R = cast<ReturnStmt>(Sub);
814 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
815 } break;
816 case attr::CXXAssume: {
817 const Expr *Assumption = cast<CXXAssumeAttr>(A)->getAssumption();
818 if (getLangOpts().CXXAssumptions && Builder.GetInsertBlock() &&
819 !Assumption->HasSideEffects(getContext())) {
820 llvm::Value *AssumptionVal = EmitCheckedArgForAssume(Assumption);
821 Builder.CreateAssumption(AssumptionVal);
822 }
823 } break;
824 case attr::Atomic:
825 AA = cast<AtomicAttr>(A);
826 break;
827 case attr::HLSLControlFlowHint: {
828 flattenOrBranch = cast<HLSLControlFlowHintAttr>(A)->getSemanticSpelling();
829 } break;
830 }
831 }
832 SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
833 SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
834 SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
835 SaveAndRestore save_noconvergent(InNoConvergentAttributedStmt, noconvergent);
836 SaveAndRestore save_musttail(MustTailCall, musttail);
837 SaveAndRestore save_flattenOrBranch(HLSLControlFlowAttr, flattenOrBranch);
838 CGAtomicOptionsRAII AORAII(CGM, AA);
839 EmitStmt(S.getSubStmt(), S.getAttrs());
840}
841
843 // If this code is reachable then emit a stop point (if generating
844 // debug info). We have to do this ourselves because we are on the
845 // "simple" statement path.
846 if (HaveInsertPoint())
847 EmitStopPoint(&S);
848
851}
852
853
856 if (const LabelDecl *Target = S.getConstantTarget()) {
858 return;
859 }
860
861 // Ensure that we have an i8* for our PHI node.
862 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
863 Int8PtrTy, "addr");
864 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
865
866 // Get the basic block for the indirect goto.
867 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
868
869 // The first instruction in the block has to be the PHI for the switch dest,
870 // add an entry for this branch.
871 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
872
873 EmitBranch(IndGotoBB);
874 if (CurBB && CurBB->getTerminator())
875 addInstToCurrentSourceAtom(CurBB->getTerminator(), nullptr);
876}
877
879 const Stmt *Else = S.getElse();
880
881 // The else branch of a consteval if statement is always the only branch that
882 // can be runtime evaluated.
883 if (S.isConsteval()) {
884 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : Else;
885 if (Executed) {
886 RunCleanupsScope ExecutedScope(*this);
887 EmitStmt(Executed);
888 }
889 return;
890 }
891
892 // C99 6.8.4.1: The first substatement is executed if the expression compares
893 // unequal to 0. The condition must be a scalar type.
894 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
895 ApplyDebugLocation DL(*this, S.getCond());
896
897 if (S.getInit())
898 EmitStmt(S.getInit());
899
900 if (S.getConditionVariable())
902
903 // If the condition constant folds and can be elided, try to avoid emitting
904 // the condition and the dead arm of the if/else.
905 bool CondConstant;
906 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
907 S.isConstexpr())) {
908 // Figure out which block (then or else) is executed.
909 const Stmt *Executed = S.getThen();
910 const Stmt *Skipped = Else;
911 if (!CondConstant) // Condition false?
912 std::swap(Executed, Skipped);
913
914 // If the skipped block has no labels in it, just emit the executed block.
915 // This avoids emitting dead code and simplifies the CFG substantially.
916 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
917 if (CondConstant)
919 if (Executed) {
921 RunCleanupsScope ExecutedScope(*this);
922 EmitStmt(Executed);
923 }
924 PGO->markStmtMaybeUsed(Skipped);
925 return;
926 }
927 }
928
929 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
930 // the conditional branch.
931 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
932 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
933 llvm::BasicBlock *ElseBlock = ContBlock;
934 if (Else)
935 ElseBlock = createBasicBlock("if.else");
936
937 // Prefer the PGO based weights over the likelihood attribute.
938 // When the build isn't optimized the metadata isn't used, so don't generate
939 // it.
940 // Also, differentiate between disabled PGO and a never executed branch with
941 // PGO. Assuming PGO is in use:
942 // - we want to ignore the [[likely]] attribute if the branch is never
943 // executed,
944 // - assuming the profile is poor, preserving the attribute may still be
945 // beneficial.
946 // As an approximation, preserve the attribute only if both the branch and the
947 // parent context were not executed.
949 uint64_t ThenCount = getProfileCount(S.getThen());
950 if (!ThenCount && !getCurrentProfileCount() &&
951 CGM.getCodeGenOpts().OptimizationLevel)
952 LH = Stmt::getLikelihood(S.getThen(), Else);
953
954 // When measuring MC/DC, always fully evaluate the condition up front using
955 // EvaluateExprAsBool() so that the test vector bitmap can be updated prior to
956 // executing the body of the if.then or if.else. This is useful for when
957 // there is a 'return' within the body, but this is particularly beneficial
958 // when one if-stmt is nested within another if-stmt so that all of the MC/DC
959 // updates are kept linear and consistent.
960 if (!CGM.getCodeGenOpts().MCDCCoverage) {
961 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH,
962 /*ConditionalOp=*/nullptr,
963 /*ConditionalDecl=*/S.getConditionVariable());
964 } else {
965 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
967 Builder.CreateCondBr(BoolCondVal, ThenBlock, ElseBlock);
968 }
969
970 // Emit the 'then' code.
971 EmitBlock(ThenBlock);
974 else
976 {
977 RunCleanupsScope ThenScope(*this);
978 EmitStmt(S.getThen());
979 }
980 EmitBranch(ContBlock);
981
982 // Emit the 'else' code if present.
983 if (Else) {
984 {
985 // There is no need to emit line number for an unconditional branch.
986 auto NL = ApplyDebugLocation::CreateEmpty(*this);
987 EmitBlock(ElseBlock);
988 }
989 // When single byte coverage mode is enabled, add a counter to else block.
992 {
993 RunCleanupsScope ElseScope(*this);
994 EmitStmt(Else);
995 }
996 {
997 // There is no need to emit line number for an unconditional branch.
998 auto NL = ApplyDebugLocation::CreateEmpty(*this);
999 EmitBranch(ContBlock);
1000 }
1001 }
1002
1003 // Emit the continuation block for code after the if.
1004 EmitBlock(ContBlock, true);
1005
1006 // When single byte coverage mode is enabled, add a counter to continuation
1007 // block.
1010}
1011
1012bool CodeGenFunction::checkIfLoopMustProgress(const Expr *ControllingExpression,
1013 bool HasEmptyBody) {
1014 if (CGM.getCodeGenOpts().getFiniteLoops() ==
1016 return false;
1017
1018 // Now apply rules for plain C (see 6.8.5.6 in C11).
1019 // Loops with constant conditions do not have to make progress in any C
1020 // version.
1021 // As an extension, we consisider loops whose constant expression
1022 // can be constant-folded.
1024 bool CondIsConstInt =
1025 !ControllingExpression ||
1026 (ControllingExpression->EvaluateAsInt(Result, getContext()) &&
1027 Result.Val.isInt());
1028
1029 bool CondIsTrue = CondIsConstInt && (!ControllingExpression ||
1030 Result.Val.getInt().getBoolValue());
1031
1032 // Loops with non-constant conditions must make progress in C11 and later.
1033 if (getLangOpts().C11 && !CondIsConstInt)
1034 return true;
1035
1036 // [C++26][intro.progress] (DR)
1037 // The implementation may assume that any thread will eventually do one of the
1038 // following:
1039 // [...]
1040 // - continue execution of a trivial infinite loop ([stmt.iter.general]).
1041 if (CGM.getCodeGenOpts().getFiniteLoops() ==
1044 if (HasEmptyBody && CondIsTrue) {
1045 CurFn->removeFnAttr(llvm::Attribute::MustProgress);
1046 return false;
1047 }
1048 return true;
1049 }
1050 return false;
1051}
1052
1053// [C++26][stmt.iter.general] (DR)
1054// A trivially empty iteration statement is an iteration statement matching one
1055// of the following forms:
1056// - while ( expression ) ;
1057// - while ( expression ) { }
1058// - do ; while ( expression ) ;
1059// - do { } while ( expression ) ;
1060// - for ( init-statement expression(opt); ) ;
1061// - for ( init-statement expression(opt); ) { }
1062template <typename LoopStmt> static bool hasEmptyLoopBody(const LoopStmt &S) {
1063 if constexpr (std::is_same_v<LoopStmt, ForStmt>) {
1064 if (S.getInc())
1065 return false;
1066 }
1067 const Stmt *Body = S.getBody();
1068 if (!Body || isa<NullStmt>(Body))
1069 return true;
1070 if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body))
1071 return Compound->body_empty();
1072 return false;
1073}
1074
1076 ArrayRef<const Attr *> WhileAttrs) {
1077 // Emit the header for the loop, which will also become
1078 // the continue target.
1079 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
1080 EmitBlock(LoopHeader.getBlock());
1081
1082 if (CGM.shouldEmitConvergenceTokens())
1083 ConvergenceTokenStack.push_back(
1084 emitConvergenceLoopToken(LoopHeader.getBlock()));
1085
1086 // Create an exit block for when the condition fails, which will
1087 // also become the break target.
1089
1090 // Store the blocks to use for break and continue.
1091 BreakContinueStack.push_back(BreakContinue(S, LoopExit, LoopHeader));
1092
1093 // C++ [stmt.while]p2:
1094 // When the condition of a while statement is a declaration, the
1095 // scope of the variable that is declared extends from its point
1096 // of declaration (3.3.2) to the end of the while statement.
1097 // [...]
1098 // The object created in a condition is destroyed and created
1099 // with each iteration of the loop.
1100 RunCleanupsScope ConditionScope(*this);
1101
1102 if (S.getConditionVariable())
1104
1105 // Evaluate the conditional in the while header. C99 6.8.5.1: The
1106 // evaluation of the controlling expression takes place before each
1107 // execution of the loop body.
1108 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1109
1111
1112 // while(1) is common, avoid extra exit blocks. Be sure
1113 // to correctly handle break/continue though.
1114 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1115 bool EmitBoolCondBranch = !C || !C->isOne();
1116 const SourceRange &R = S.getSourceRange();
1117 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
1118 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
1121
1122 // When single byte coverage mode is enabled, add a counter to loop condition.
1125
1126 // As long as the condition is true, go to the loop body.
1127 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
1128 if (EmitBoolCondBranch) {
1129 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1130 if (ConditionScope.requiresCleanups())
1131 ExitBlock = createBasicBlock("while.exit");
1132 llvm::MDNode *Weights =
1133 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1134 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1135 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1136 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1137 auto *I = Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
1138 // Key Instructions: Emit the condition and branch as separate source
1139 // location atoms otherwise we may omit a step onto the loop condition in
1140 // favour of the `while` keyword.
1141 // FIXME: We could have the branch as the backup location for the condition,
1142 // which would probably be a better experience. Explore this later.
1143 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1144 addInstToNewSourceAtom(CondI, nullptr);
1145 addInstToNewSourceAtom(I, nullptr);
1146
1147 if (ExitBlock != LoopExit.getBlock()) {
1148 EmitBlock(ExitBlock);
1150 }
1151 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
1152 CGM.getDiags().Report(A->getLocation(),
1153 diag::warn_attribute_has_no_effect_on_infinite_loop)
1154 << A << A->getRange();
1155 CGM.getDiags().Report(
1156 S.getWhileLoc(),
1157 diag::note_attribute_has_no_effect_on_infinite_loop_here)
1159 }
1160
1161 // Emit the loop body. We have to emit this in a cleanup scope
1162 // because it might be a singleton DeclStmt.
1163 {
1164 RunCleanupsScope BodyScope(*this);
1165 EmitBlock(LoopBody);
1166 // When single byte coverage mode is enabled, add a counter to the body.
1169 else
1171 EmitStmt(S.getBody());
1172 }
1173
1174 BreakContinueStack.pop_back();
1175
1176 // Immediately force cleanup.
1177 ConditionScope.ForceCleanup();
1178
1179 EmitStopPoint(&S);
1180 // Branch to the loop header again.
1181 EmitBranch(LoopHeader.getBlock());
1182
1183 LoopStack.pop();
1184
1185 // Emit the exit block.
1186 EmitBlock(LoopExit.getBlock(), true);
1187
1188 // The LoopHeader typically is just a branch if we skipped emitting
1189 // a branch, try to erase it.
1190 if (!EmitBoolCondBranch)
1191 SimplifyForwardingBlocks(LoopHeader.getBlock());
1192
1193 // When single byte coverage mode is enabled, add a counter to continuation
1194 // block.
1197
1198 if (CGM.shouldEmitConvergenceTokens())
1199 ConvergenceTokenStack.pop_back();
1200}
1201
1203 ArrayRef<const Attr *> DoAttrs) {
1205 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
1206
1207 uint64_t ParentCount = getCurrentProfileCount();
1208
1209 // Store the blocks to use for break and continue.
1210 BreakContinueStack.push_back(BreakContinue(S, LoopExit, LoopCond));
1211
1212 // Emit the body of the loop.
1213 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
1214
1216 EmitBlockWithFallThrough(LoopBody, S.getBody());
1217 else
1218 EmitBlockWithFallThrough(LoopBody, &S);
1219
1220 if (CGM.shouldEmitConvergenceTokens())
1221 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(LoopBody));
1222
1223 {
1224 RunCleanupsScope BodyScope(*this);
1225 EmitStmt(S.getBody());
1226 }
1227
1228 EmitBlock(LoopCond.getBlock());
1229 // When single byte coverage mode is enabled, add a counter to loop condition.
1232
1233 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
1234 // after each execution of the loop body."
1235
1236 // Evaluate the conditional in the while header.
1237 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1238 // compares unequal to 0. The condition must be a scalar type.
1239 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1240
1241 BreakContinueStack.pop_back();
1242
1243 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
1244 // to correctly handle break/continue though.
1245 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1246 bool EmitBoolCondBranch = !C || !C->isZero();
1247
1248 const SourceRange &R = S.getSourceRange();
1249 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
1253
1254 // As long as the condition is true, iterate the loop.
1255 if (EmitBoolCondBranch) {
1256 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
1257 auto *I = Builder.CreateCondBr(
1258 BoolCondVal, LoopBody, LoopExit.getBlock(),
1259 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1260
1261 // Key Instructions: Emit the condition and branch as separate source
1262 // location atoms otherwise we may omit a step onto the loop condition in
1263 // favour of the closing brace.
1264 // FIXME: We could have the branch as the backup location for the condition,
1265 // which would probably be a better experience (no jumping to the brace).
1266 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1267 addInstToNewSourceAtom(CondI, nullptr);
1268 addInstToNewSourceAtom(I, nullptr);
1269 }
1270
1271 LoopStack.pop();
1272
1273 // Emit the exit block.
1274 EmitBlock(LoopExit.getBlock());
1275
1276 // The DoCond block typically is just a branch if we skipped
1277 // emitting a branch, try to erase it.
1278 if (!EmitBoolCondBranch)
1280
1281 // When single byte coverage mode is enabled, add a counter to continuation
1282 // block.
1285
1286 if (CGM.shouldEmitConvergenceTokens())
1287 ConvergenceTokenStack.pop_back();
1288}
1289
1291 ArrayRef<const Attr *> ForAttrs) {
1293
1294 std::optional<LexicalScope> ForScope;
1296 ForScope.emplace(*this, S.getSourceRange());
1297
1298 // Evaluate the first part before the loop.
1299 if (S.getInit())
1300 EmitStmt(S.getInit());
1301
1302 // Start the loop with a block that tests the condition.
1303 // If there's an increment, the continue scope will be overwritten
1304 // later.
1305 JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
1306 llvm::BasicBlock *CondBlock = CondDest.getBlock();
1307 EmitBlock(CondBlock);
1308
1309 if (CGM.shouldEmitConvergenceTokens())
1310 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(CondBlock));
1311
1312 const SourceRange &R = S.getSourceRange();
1313 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1317
1318 // Create a cleanup scope for the condition variable cleanups.
1319 LexicalScope ConditionScope(*this, S.getSourceRange());
1320
1321 // If the for loop doesn't have an increment we can just use the condition as
1322 // the continue block. Otherwise, if there is no condition variable, we can
1323 // form the continue block now. If there is a condition variable, we can't
1324 // form the continue block until after we've emitted the condition, because
1325 // the condition is in scope in the increment, but Sema's jump diagnostics
1326 // ensure that there are no continues from the condition variable that jump
1327 // to the loop increment.
1328 JumpDest Continue;
1329 if (!S.getInc())
1330 Continue = CondDest;
1331 else if (!S.getConditionVariable())
1332 Continue = getJumpDestInCurrentScope("for.inc");
1333 BreakContinueStack.push_back(BreakContinue(S, LoopExit, Continue));
1334
1335 if (S.getCond()) {
1336 // If the for statement has a condition scope, emit the local variable
1337 // declaration.
1338 if (S.getConditionVariable()) {
1340
1341 // We have entered the condition variable's scope, so we're now able to
1342 // jump to the continue block.
1343 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1344 BreakContinueStack.back().ContinueBlock = Continue;
1345 }
1346
1347 // When single byte coverage mode is enabled, add a counter to loop
1348 // condition.
1351
1352 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1353 // If there are any cleanups between here and the loop-exit scope,
1354 // create a block to stage a loop exit along.
1355 if (ForScope && ForScope->requiresCleanups())
1356 ExitBlock = createBasicBlock("for.cond.cleanup");
1357
1358 // As long as the condition is true, iterate the loop.
1359 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1360
1361 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1362 // compares unequal to 0. The condition must be a scalar type.
1363 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1364
1366
1367 llvm::MDNode *Weights =
1368 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1369 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1370 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1371 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1372
1373 auto *I = Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1374 // Key Instructions: Emit the condition and branch as separate atoms to
1375 // match existing loop stepping behaviour. FIXME: We could have the branch
1376 // as the backup location for the condition, which would probably be a
1377 // better experience (no jumping to the brace).
1378 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1379 addInstToNewSourceAtom(CondI, nullptr);
1380 addInstToNewSourceAtom(I, nullptr);
1381
1382 if (ExitBlock != LoopExit.getBlock()) {
1383 EmitBlock(ExitBlock);
1385 }
1386
1387 EmitBlock(ForBody);
1388 } else {
1389 // Treat it as a non-zero constant. Don't even create a new block for the
1390 // body, just fall into it.
1391 }
1392
1393 // When single byte coverage mode is enabled, add a counter to the body.
1396 else
1398 {
1399 // Create a separate cleanup scope for the body, in case it is not
1400 // a compound statement.
1401 RunCleanupsScope BodyScope(*this);
1402 EmitStmt(S.getBody());
1403 }
1404
1405 // The last block in the loop's body (which unconditionally branches to the
1406 // `inc` block if there is one).
1407 auto *FinalBodyBB = Builder.GetInsertBlock();
1408
1409 // If there is an increment, emit it next.
1410 if (S.getInc()) {
1411 EmitBlock(Continue.getBlock());
1412 EmitStmt(S.getInc());
1415 }
1416
1417 BreakContinueStack.pop_back();
1418
1419 ConditionScope.ForceCleanup();
1420
1421 EmitStopPoint(&S);
1422 EmitBranch(CondBlock);
1423
1424 if (ForScope)
1425 ForScope->ForceCleanup();
1426
1427 LoopStack.pop();
1428
1429 // Emit the fall-through block.
1430 EmitBlock(LoopExit.getBlock(), true);
1431
1432 // When single byte coverage mode is enabled, add a counter to continuation
1433 // block.
1436
1437 if (CGM.shouldEmitConvergenceTokens())
1438 ConvergenceTokenStack.pop_back();
1439
1440 if (FinalBodyBB) {
1441 // Key Instructions: We want the for closing brace to be step-able on to
1442 // match existing behaviour.
1443 addInstToNewSourceAtom(FinalBodyBB->getTerminator(), nullptr);
1444 }
1445}
1446
1447void
1449 ArrayRef<const Attr *> ForAttrs) {
1451
1452 LexicalScope ForScope(*this, S.getSourceRange());
1453
1454 // Evaluate the first pieces before the loop.
1455 if (S.getInit())
1456 EmitStmt(S.getInit());
1459 EmitStmt(S.getEndStmt());
1460
1461 // Start the loop with a block that tests the condition.
1462 // If there's an increment, the continue scope will be overwritten
1463 // later.
1464 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1465 EmitBlock(CondBlock);
1466
1467 if (CGM.shouldEmitConvergenceTokens())
1468 ConvergenceTokenStack.push_back(emitConvergenceLoopToken(CondBlock));
1469
1470 const SourceRange &R = S.getSourceRange();
1471 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1474
1475 // If there are any cleanups between here and the loop-exit scope,
1476 // create a block to stage a loop exit along.
1477 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1478 if (ForScope.requiresCleanups())
1479 ExitBlock = createBasicBlock("for.cond.cleanup");
1480
1481 // The loop body, consisting of the specified body and the loop variable.
1482 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1483
1484 // The body is executed if the expression, contextually converted
1485 // to bool, is true.
1486 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1487 llvm::MDNode *Weights =
1488 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1489 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1490 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1491 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1492 auto *I = Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1493 // Key Instructions: Emit the condition and branch as separate atoms to
1494 // match existing loop stepping behaviour. FIXME: We could have the branch as
1495 // the backup location for the condition, which would probably be a better
1496 // experience.
1497 if (auto *CondI = dyn_cast<llvm::Instruction>(BoolCondVal))
1498 addInstToNewSourceAtom(CondI, nullptr);
1499 addInstToNewSourceAtom(I, nullptr);
1500
1501 if (ExitBlock != LoopExit.getBlock()) {
1502 EmitBlock(ExitBlock);
1504 }
1505
1506 EmitBlock(ForBody);
1509 else
1511
1512 // Create a block for the increment. In case of a 'continue', we jump there.
1513 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1514
1515 // Store the blocks to use for break and continue.
1516 BreakContinueStack.push_back(BreakContinue(S, LoopExit, Continue));
1517
1518 {
1519 // Create a separate cleanup scope for the loop variable and body.
1520 LexicalScope BodyScope(*this, S.getSourceRange());
1522 EmitStmt(S.getBody());
1523 }
1524 // The last block in the loop's body (which unconditionally branches to the
1525 // `inc` block if there is one).
1526 auto *FinalBodyBB = Builder.GetInsertBlock();
1527
1528 EmitStopPoint(&S);
1529 // If there is an increment, emit it next.
1530 EmitBlock(Continue.getBlock());
1531 EmitStmt(S.getInc());
1532
1533 BreakContinueStack.pop_back();
1534
1535 EmitBranch(CondBlock);
1536
1537 ForScope.ForceCleanup();
1538
1539 LoopStack.pop();
1540
1541 // Emit the fall-through block.
1542 EmitBlock(LoopExit.getBlock(), true);
1543
1544 // When single byte coverage mode is enabled, add a counter to continuation
1545 // block.
1548
1549 if (CGM.shouldEmitConvergenceTokens())
1550 ConvergenceTokenStack.pop_back();
1551
1552 if (FinalBodyBB) {
1553 // We want the for closing brace to be step-able on to match existing
1554 // behaviour.
1555 addInstToNewSourceAtom(FinalBodyBB->getTerminator(), nullptr);
1556 }
1557}
1558
1559void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1560 if (RV.isScalar()) {
1561 Builder.CreateStore(RV.getScalarVal(), ReturnValue);
1562 } else if (RV.isAggregate()) {
1563 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1566 } else {
1568 /*init*/ true);
1569 }
1571}
1572
1573namespace {
1574// RAII struct used to save and restore a return statment's result expression.
1575struct SaveRetExprRAII {
1576 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1577 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1578 CGF.RetExpr = RetExpr;
1579 }
1580 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1581 const Expr *OldRetExpr;
1582 CodeGenFunction &CGF;
1583};
1584} // namespace
1585
1586/// Determine if the given call uses the swiftasync calling convention.
1587static bool isSwiftAsyncCallee(const CallExpr *CE) {
1588 auto calleeQualType = CE->getCallee()->getType();
1589 const FunctionType *calleeType = nullptr;
1590 if (calleeQualType->isFunctionPointerType() ||
1591 calleeQualType->isFunctionReferenceType() ||
1592 calleeQualType->isBlockPointerType() ||
1593 calleeQualType->isMemberFunctionPointerType()) {
1594 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1595 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1596 calleeType = ty;
1597 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1598 if (auto methodDecl = CMCE->getMethodDecl()) {
1599 // getMethodDecl() doesn't handle member pointers at the moment.
1600 calleeType = methodDecl->getType()->castAs<FunctionType>();
1601 } else {
1602 return false;
1603 }
1604 } else {
1605 return false;
1606 }
1607 return calleeType->getCallConv() == CallingConv::CC_SwiftAsync;
1608}
1609
1610/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1611/// if the function returns void, or may be missing one if the function returns
1612/// non-void. Fun stuff :).
1615 if (requiresReturnValueCheck()) {
1616 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1617 auto *SLocPtr =
1618 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1619 llvm::GlobalVariable::PrivateLinkage, SLoc);
1620 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1621 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr);
1622 assert(ReturnLocation.isValid() && "No valid return location");
1623 Builder.CreateStore(SLocPtr, ReturnLocation);
1624 }
1625
1626 // Returning from an outlined SEH helper is UB, and we already warn on it.
1627 if (IsOutlinedSEHHelper) {
1628 Builder.CreateUnreachable();
1629 Builder.ClearInsertionPoint();
1630 }
1631
1632 // Emit the result value, even if unused, to evaluate the side effects.
1633 const Expr *RV = S.getRetValue();
1634
1635 // Record the result expression of the return statement. The recorded
1636 // expression is used to determine whether a block capture's lifetime should
1637 // end at the end of the full expression as opposed to the end of the scope
1638 // enclosing the block expression.
1639 //
1640 // This permits a small, easily-implemented exception to our over-conservative
1641 // rules about not jumping to statements following block literals with
1642 // non-trivial cleanups.
1643 SaveRetExprRAII SaveRetExpr(RV, *this);
1644
1645 RunCleanupsScope cleanupScope(*this);
1646 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1647 RV = EWC->getSubExpr();
1648
1649 // If we're in a swiftasynccall function, and the return expression is a
1650 // call to a swiftasynccall function, mark the call as the musttail call.
1651 std::optional<llvm::SaveAndRestore<const CallExpr *>> SaveMustTail;
1652 if (RV && CurFnInfo &&
1653 CurFnInfo->getASTCallingConvention() == CallingConv::CC_SwiftAsync) {
1654 if (auto CE = dyn_cast<CallExpr>(RV)) {
1655 if (isSwiftAsyncCallee(CE)) {
1656 SaveMustTail.emplace(MustTailCall, CE);
1657 }
1658 }
1659 }
1660
1661 // FIXME: Clean this up by using an LValue for ReturnTemp,
1662 // EmitStoreThroughLValue, and EmitAnyExpr.
1663 // Check if the NRVO candidate was not globalized in OpenMP mode.
1664 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1666 (!getLangOpts().OpenMP ||
1667 !CGM.getOpenMPRuntime()
1668 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1669 .isValid())) {
1670 // Apply the named return value optimization for this return statement,
1671 // which means doing nothing: the appropriate result has already been
1672 // constructed into the NRVO variable.
1673
1674 // If there is an NRVO flag for this variable, set it to 1 into indicate
1675 // that the cleanup code should not destroy the variable.
1676 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1677 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1678 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1679 // Make sure not to return anything, but evaluate the expression
1680 // for side effects.
1681 if (RV) {
1682 EmitAnyExpr(RV);
1683 }
1684 } else if (!RV) {
1685 // Do nothing (return value is left uninitialized)
1686 } else if (FnRetTy->isReferenceType()) {
1687 // If this function returns a reference, take the address of the expression
1688 // rather than the value.
1690 auto *I = Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1691 addInstToCurrentSourceAtom(I, I->getValueOperand());
1692 } else {
1693 switch (getEvaluationKind(RV->getType())) {
1694 case TEK_Scalar: {
1695 llvm::Value *Ret = EmitScalarExpr(RV);
1696 if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
1698 /*isInit*/ true);
1699 } else {
1700 auto *I = Builder.CreateStore(Ret, ReturnValue);
1701 addInstToCurrentSourceAtom(I, I->getValueOperand());
1702 }
1703 break;
1704 }
1705 case TEK_Complex:
1707 /*isInit*/ true);
1708 break;
1709 case TEK_Aggregate:
1716 break;
1717 }
1718 }
1719
1720 ++NumReturnExprs;
1721 if (!RV || RV->isEvaluatable(getContext()))
1722 ++NumSimpleReturnExprs;
1723
1724 cleanupScope.ForceCleanup();
1726}
1727
1729 // As long as debug info is modeled with instructions, we have to ensure we
1730 // have a place to insert here and write the stop point here.
1731 if (HaveInsertPoint())
1732 EmitStopPoint(&S);
1733
1734 for (const auto *I : S.decls())
1735 EmitDecl(*I, /*EvaluateConditionDecl=*/true);
1736}
1737
1739 -> const BreakContinue * {
1740 if (!S.hasLabelTarget())
1741 return &BreakContinueStack.back();
1742
1743 const Stmt *LoopOrSwitch = S.getNamedLoopOrSwitch();
1744 assert(LoopOrSwitch && "break/continue target not set?");
1745 for (const BreakContinue &BC : llvm::reverse(BreakContinueStack))
1746 if (BC.LoopOrSwitch == LoopOrSwitch)
1747 return &BC;
1748
1749 llvm_unreachable("break/continue target not found");
1750}
1751
1753 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1754
1755 // If this code is reachable then emit a stop point (if generating
1756 // debug info). We have to do this ourselves because we are on the
1757 // "simple" statement path.
1758 if (HaveInsertPoint())
1759 EmitStopPoint(&S);
1760
1763}
1764
1766 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1767
1768 // If this code is reachable then emit a stop point (if generating
1769 // debug info). We have to do this ourselves because we are on the
1770 // "simple" statement path.
1771 if (HaveInsertPoint())
1772 EmitStopPoint(&S);
1773
1776}
1777
1778/// EmitCaseStmtRange - If case statement range is not too big then
1779/// add multiple cases to switch instruction, one for each value within
1780/// the range. If range is too big then emit "if" condition check.
1782 ArrayRef<const Attr *> Attrs) {
1783 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1784
1785 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1786 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1787
1788 // Emit the code for this case. We do this first to make sure it is
1789 // properly chained from our predecessor before generating the
1790 // switch machinery to enter this block.
1791 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1792 EmitBlockWithFallThrough(CaseDest, &S);
1793 EmitStmt(S.getSubStmt());
1794
1795 // If range is empty, do nothing.
1796 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1797 return;
1798
1800 llvm::APInt Range = RHS - LHS;
1801 // FIXME: parameters such as this should not be hardcoded.
1802 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1803 // Range is small enough to add multiple switch instruction cases.
1804 uint64_t Total = getProfileCount(&S);
1805 unsigned NCases = Range.getZExtValue() + 1;
1806 // We only have one region counter for the entire set of cases here, so we
1807 // need to divide the weights evenly between the generated cases, ensuring
1808 // that the total weight is preserved. E.g., a weight of 5 over three cases
1809 // will be distributed as weights of 2, 2, and 1.
1810 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1811 for (unsigned I = 0; I != NCases; ++I) {
1812 if (SwitchWeights)
1813 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1814 else if (SwitchLikelihood)
1815 SwitchLikelihood->push_back(LH);
1816
1817 if (Rem)
1818 Rem--;
1819 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1820 ++LHS;
1821 }
1822 return;
1823 }
1824
1825 // The range is too big. Emit "if" condition into a new block,
1826 // making sure to save and restore the current insertion point.
1827 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1828
1829 // Push this test onto the chain of range checks (which terminates
1830 // in the default basic block). The switch's default will be changed
1831 // to the top of this chain after switch emission is complete.
1832 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1833 CaseRangeBlock = createBasicBlock("sw.caserange");
1834
1835 CurFn->insert(CurFn->end(), CaseRangeBlock);
1836 Builder.SetInsertPoint(CaseRangeBlock);
1837
1838 // Emit range check.
1839 llvm::Value *Diff =
1840 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1841 llvm::Value *Cond =
1842 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1843
1844 llvm::MDNode *Weights = nullptr;
1845 if (SwitchWeights) {
1846 uint64_t ThisCount = getProfileCount(&S);
1847 uint64_t DefaultCount = (*SwitchWeights)[0];
1848 Weights = createProfileWeights(ThisCount, DefaultCount);
1849
1850 // Since we're chaining the switch default through each large case range, we
1851 // need to update the weight for the default, ie, the first case, to include
1852 // this case.
1853 (*SwitchWeights)[0] += ThisCount;
1854 } else if (SwitchLikelihood)
1855 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1856
1857 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1858
1859 // Restore the appropriate insertion point.
1860 if (RestoreBB)
1861 Builder.SetInsertPoint(RestoreBB);
1862 else
1863 Builder.ClearInsertionPoint();
1864}
1865
1867 ArrayRef<const Attr *> Attrs) {
1868 // If there is no enclosing switch instance that we're aware of, then this
1869 // case statement and its block can be elided. This situation only happens
1870 // when we've constant-folded the switch, are emitting the constant case,
1871 // and part of the constant case includes another case statement. For
1872 // instance: switch (4) { case 4: do { case 5: } while (1); }
1873 if (!SwitchInsn) {
1874 EmitStmt(S.getSubStmt());
1875 return;
1876 }
1877
1878 // Handle case ranges.
1879 if (S.getRHS()) {
1880 EmitCaseStmtRange(S, Attrs);
1881 return;
1882 }
1883
1884 llvm::ConstantInt *CaseVal =
1886
1887 // Emit debuginfo for the case value if it is an enum value.
1888 const ConstantExpr *CE;
1889 if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
1890 CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
1891 else
1892 CE = dyn_cast<ConstantExpr>(S.getLHS());
1893 if (CE) {
1894 if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
1895 if (CGDebugInfo *Dbg = getDebugInfo())
1896 if (CGM.getCodeGenOpts().hasReducedDebugInfo())
1897 Dbg->EmitGlobalVariable(DE->getDecl(),
1898 APValue(llvm::APSInt(CaseVal->getValue())));
1899 }
1900
1901 if (SwitchLikelihood)
1902 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1903
1904 // If the body of the case is just a 'break', try to not emit an empty block.
1905 // If we're profiling or we're not optimizing, leave the block in for better
1906 // debug and coverage analysis.
1907 if (!CGM.getCodeGenOpts().hasProfileClangInstr() &&
1908 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1910 JumpDest Block = BreakContinueStack.back().BreakBlock;
1911
1912 // Only do this optimization if there are no cleanups that need emitting.
1914 if (SwitchWeights)
1915 SwitchWeights->push_back(getProfileCount(&S));
1916 SwitchInsn->addCase(CaseVal, Block.getBlock());
1917
1918 // If there was a fallthrough into this case, make sure to redirect it to
1919 // the end of the switch as well.
1920 if (Builder.GetInsertBlock()) {
1921 Builder.CreateBr(Block.getBlock());
1922 Builder.ClearInsertionPoint();
1923 }
1924 return;
1925 }
1926 }
1927
1928 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1929 EmitBlockWithFallThrough(CaseDest, &S);
1930 if (SwitchWeights)
1931 SwitchWeights->push_back(getProfileCount(&S));
1932 SwitchInsn->addCase(CaseVal, CaseDest);
1933
1934 // Recursively emitting the statement is acceptable, but is not wonderful for
1935 // code where we have many case statements nested together, i.e.:
1936 // case 1:
1937 // case 2:
1938 // case 3: etc.
1939 // Handling this recursively will create a new block for each case statement
1940 // that falls through to the next case which is IR intensive. It also causes
1941 // deep recursion which can run into stack depth limitations. Handle
1942 // sequential non-range case statements specially.
1943 //
1944 // TODO When the next case has a likelihood attribute the code returns to the
1945 // recursive algorithm. Maybe improve this case if it becomes common practice
1946 // to use a lot of attributes.
1947 const CaseStmt *CurCase = &S;
1948 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1949
1950 // Otherwise, iteratively add consecutive cases to this switch stmt.
1951 while (NextCase && NextCase->getRHS() == nullptr) {
1952 CurCase = NextCase;
1953 llvm::ConstantInt *CaseVal =
1954 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1955
1956 if (SwitchWeights)
1957 SwitchWeights->push_back(getProfileCount(NextCase));
1958 if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
1959 CaseDest = createBasicBlock("sw.bb");
1960 EmitBlockWithFallThrough(CaseDest, CurCase);
1961 }
1962 // Since this loop is only executed when the CaseStmt has no attributes
1963 // use a hard-coded value.
1964 if (SwitchLikelihood)
1965 SwitchLikelihood->push_back(Stmt::LH_None);
1966
1967 SwitchInsn->addCase(CaseVal, CaseDest);
1968 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1969 }
1970
1971 // Generate a stop point for debug info if the case statement is
1972 // followed by a default statement. A fallthrough case before a
1973 // default case gets its own branch target.
1974 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1975 EmitStopPoint(CurCase);
1976
1977 // Normal default recursion for non-cases.
1978 EmitStmt(CurCase->getSubStmt());
1979}
1980
1982 ArrayRef<const Attr *> Attrs) {
1983 // If there is no enclosing switch instance that we're aware of, then this
1984 // default statement can be elided. This situation only happens when we've
1985 // constant-folded the switch.
1986 if (!SwitchInsn) {
1987 EmitStmt(S.getSubStmt());
1988 return;
1989 }
1990
1991 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1992 assert(DefaultBlock->empty() &&
1993 "EmitDefaultStmt: Default block already defined?");
1994
1995 if (SwitchLikelihood)
1996 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1997
1998 EmitBlockWithFallThrough(DefaultBlock, &S);
1999
2000 EmitStmt(S.getSubStmt());
2001}
2002
2003/// CollectStatementsForCase - Given the body of a 'switch' statement and a
2004/// constant value that is being switched on, see if we can dead code eliminate
2005/// the body of the switch to a simple series of statements to emit. Basically,
2006/// on a switch (5) we want to find these statements:
2007/// case 5:
2008/// printf(...); <--
2009/// ++i; <--
2010/// break;
2011///
2012/// and add them to the ResultStmts vector. If it is unsafe to do this
2013/// transformation (for example, one of the elided statements contains a label
2014/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
2015/// should include statements after it (e.g. the printf() line is a substmt of
2016/// the case) then return CSFC_FallThrough. If we handled it and found a break
2017/// statement, then return CSFC_Success.
2018///
2019/// If Case is non-null, then we are looking for the specified case, checking
2020/// that nothing we jump over contains labels. If Case is null, then we found
2021/// the case and are looking for the break.
2022///
2023/// If the recursive walk actually finds our Case, then we set FoundCase to
2024/// true.
2025///
2028 const SwitchCase *Case,
2029 bool &FoundCase,
2030 SmallVectorImpl<const Stmt*> &ResultStmts) {
2031 // If this is a null statement, just succeed.
2032 if (!S)
2033 return Case ? CSFC_Success : CSFC_FallThrough;
2034
2035 // If this is the switchcase (case 4: or default) that we're looking for, then
2036 // we're in business. Just add the substatement.
2037 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
2038 if (S == Case) {
2039 FoundCase = true;
2040 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
2041 ResultStmts);
2042 }
2043
2044 // Otherwise, this is some other case or default statement, just ignore it.
2045 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
2046 ResultStmts);
2047 }
2048
2049 // If we are in the live part of the code and we found our break statement,
2050 // return a success!
2051 if (!Case && isa<BreakStmt>(S))
2052 return CSFC_Success;
2053
2054 // If this is a switch statement, then it might contain the SwitchCase, the
2055 // break, or neither.
2056 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
2057 // Handle this as two cases: we might be looking for the SwitchCase (if so
2058 // the skipped statements must be skippable) or we might already have it.
2059 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
2060 bool StartedInLiveCode = FoundCase;
2061 unsigned StartSize = ResultStmts.size();
2062
2063 // If we've not found the case yet, scan through looking for it.
2064 if (Case) {
2065 // Keep track of whether we see a skipped declaration. The code could be
2066 // using the declaration even if it is skipped, so we can't optimize out
2067 // the decl if the kept statements might refer to it.
2068 bool HadSkippedDecl = false;
2069
2070 // If we're looking for the case, just see if we can skip each of the
2071 // substatements.
2072 for (; Case && I != E; ++I) {
2073 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
2074
2075 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
2076 case CSFC_Failure: return CSFC_Failure;
2077 case CSFC_Success:
2078 // A successful result means that either 1) that the statement doesn't
2079 // have the case and is skippable, or 2) does contain the case value
2080 // and also contains the break to exit the switch. In the later case,
2081 // we just verify the rest of the statements are elidable.
2082 if (FoundCase) {
2083 // If we found the case and skipped declarations, we can't do the
2084 // optimization.
2085 if (HadSkippedDecl)
2086 return CSFC_Failure;
2087
2088 for (++I; I != E; ++I)
2089 if (CodeGenFunction::ContainsLabel(*I, true))
2090 return CSFC_Failure;
2091 return CSFC_Success;
2092 }
2093 break;
2094 case CSFC_FallThrough:
2095 // If we have a fallthrough condition, then we must have found the
2096 // case started to include statements. Consider the rest of the
2097 // statements in the compound statement as candidates for inclusion.
2098 assert(FoundCase && "Didn't find case but returned fallthrough?");
2099 // We recursively found Case, so we're not looking for it anymore.
2100 Case = nullptr;
2101
2102 // If we found the case and skipped declarations, we can't do the
2103 // optimization.
2104 if (HadSkippedDecl)
2105 return CSFC_Failure;
2106 break;
2107 }
2108 }
2109
2110 if (!FoundCase)
2111 return CSFC_Success;
2112
2113 assert(!HadSkippedDecl && "fallthrough after skipping decl");
2114 }
2115
2116 // If we have statements in our range, then we know that the statements are
2117 // live and need to be added to the set of statements we're tracking.
2118 bool AnyDecls = false;
2119 for (; I != E; ++I) {
2121
2122 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
2123 case CSFC_Failure: return CSFC_Failure;
2124 case CSFC_FallThrough:
2125 // A fallthrough result means that the statement was simple and just
2126 // included in ResultStmt, keep adding them afterwards.
2127 break;
2128 case CSFC_Success:
2129 // A successful result means that we found the break statement and
2130 // stopped statement inclusion. We just ensure that any leftover stmts
2131 // are skippable and return success ourselves.
2132 for (++I; I != E; ++I)
2133 if (CodeGenFunction::ContainsLabel(*I, true))
2134 return CSFC_Failure;
2135 return CSFC_Success;
2136 }
2137 }
2138
2139 // If we're about to fall out of a scope without hitting a 'break;', we
2140 // can't perform the optimization if there were any decls in that scope
2141 // (we'd lose their end-of-lifetime).
2142 if (AnyDecls) {
2143 // If the entire compound statement was live, there's one more thing we
2144 // can try before giving up: emit the whole thing as a single statement.
2145 // We can do that unless the statement contains a 'break;'.
2146 // FIXME: Such a break must be at the end of a construct within this one.
2147 // We could emit this by just ignoring the BreakStmts entirely.
2148 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
2149 ResultStmts.resize(StartSize);
2150 ResultStmts.push_back(S);
2151 } else {
2152 return CSFC_Failure;
2153 }
2154 }
2155
2156 return CSFC_FallThrough;
2157 }
2158
2159 // Okay, this is some other statement that we don't handle explicitly, like a
2160 // for statement or increment etc. If we are skipping over this statement,
2161 // just verify it doesn't have labels, which would make it invalid to elide.
2162 if (Case) {
2163 if (CodeGenFunction::ContainsLabel(S, true))
2164 return CSFC_Failure;
2165 return CSFC_Success;
2166 }
2167
2168 // Otherwise, we want to include this statement. Everything is cool with that
2169 // so long as it doesn't contain a break out of the switch we're in.
2171
2172 // Otherwise, everything is great. Include the statement and tell the caller
2173 // that we fall through and include the next statement as well.
2174 ResultStmts.push_back(S);
2175 return CSFC_FallThrough;
2176}
2177
2178/// FindCaseStatementsForValue - Find the case statement being jumped to and
2179/// then invoke CollectStatementsForCase to find the list of statements to emit
2180/// for a switch on constant. See the comment above CollectStatementsForCase
2181/// for more details.
2183 const llvm::APSInt &ConstantCondValue,
2184 SmallVectorImpl<const Stmt*> &ResultStmts,
2185 ASTContext &C,
2186 const SwitchCase *&ResultCase) {
2187 // First step, find the switch case that is being branched to. We can do this
2188 // efficiently by scanning the SwitchCase list.
2189 const SwitchCase *Case = S.getSwitchCaseList();
2190 const DefaultStmt *DefaultCase = nullptr;
2191
2192 for (; Case; Case = Case->getNextSwitchCase()) {
2193 // It's either a default or case. Just remember the default statement in
2194 // case we're not jumping to any numbered cases.
2195 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
2196 DefaultCase = DS;
2197 continue;
2198 }
2199
2200 // Check to see if this case is the one we're looking for.
2201 const CaseStmt *CS = cast<CaseStmt>(Case);
2202 // Don't handle case ranges yet.
2203 if (CS->getRHS()) return false;
2204
2205 // If we found our case, remember it as 'case'.
2206 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
2207 break;
2208 }
2209
2210 // If we didn't find a matching case, we use a default if it exists, or we
2211 // elide the whole switch body!
2212 if (!Case) {
2213 // It is safe to elide the body of the switch if it doesn't contain labels
2214 // etc. If it is safe, return successfully with an empty ResultStmts list.
2215 if (!DefaultCase)
2217 Case = DefaultCase;
2218 }
2219
2220 // Ok, we know which case is being jumped to, try to collect all the
2221 // statements that follow it. This can fail for a variety of reasons. Also,
2222 // check to see that the recursive walk actually found our case statement.
2223 // Insane cases like this can fail to find it in the recursive walk since we
2224 // don't handle every stmt kind:
2225 // switch (4) {
2226 // while (1) {
2227 // case 4: ...
2228 bool FoundCase = false;
2229 ResultCase = Case;
2230 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
2231 ResultStmts) != CSFC_Failure &&
2232 FoundCase;
2233}
2234
2235static std::optional<SmallVector<uint64_t, 16>>
2237 // Are there enough branches to weight them?
2238 if (Likelihoods.size() <= 1)
2239 return std::nullopt;
2240
2241 uint64_t NumUnlikely = 0;
2242 uint64_t NumNone = 0;
2243 uint64_t NumLikely = 0;
2244 for (const auto LH : Likelihoods) {
2245 switch (LH) {
2246 case Stmt::LH_Unlikely:
2247 ++NumUnlikely;
2248 break;
2249 case Stmt::LH_None:
2250 ++NumNone;
2251 break;
2252 case Stmt::LH_Likely:
2253 ++NumLikely;
2254 break;
2255 }
2256 }
2257
2258 // Is there a likelihood attribute used?
2259 if (NumUnlikely == 0 && NumLikely == 0)
2260 return std::nullopt;
2261
2262 // When multiple cases share the same code they can be combined during
2263 // optimization. In that case the weights of the branch will be the sum of
2264 // the individual weights. Make sure the combined sum of all neutral cases
2265 // doesn't exceed the value of a single likely attribute.
2266 // The additions both avoid divisions by 0 and make sure the weights of None
2267 // don't exceed the weight of Likely.
2268 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
2269 const uint64_t None = Likely / (NumNone + 1);
2270 const uint64_t Unlikely = 0;
2271
2273 Result.reserve(Likelihoods.size());
2274 for (const auto LH : Likelihoods) {
2275 switch (LH) {
2276 case Stmt::LH_Unlikely:
2277 Result.push_back(Unlikely);
2278 break;
2279 case Stmt::LH_None:
2280 Result.push_back(None);
2281 break;
2282 case Stmt::LH_Likely:
2283 Result.push_back(Likely);
2284 break;
2285 }
2286 }
2287
2288 return Result;
2289}
2290
2292 // Handle nested switch statements.
2293 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
2294 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
2295 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
2296 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
2297
2298 // See if we can constant fold the condition of the switch and therefore only
2299 // emit the live case statement (if any) of the switch.
2300 llvm::APSInt ConstantCondValue;
2301 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
2303 const SwitchCase *Case = nullptr;
2304 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
2305 getContext(), Case)) {
2306 if (Case)
2308 RunCleanupsScope ExecutedScope(*this);
2309
2310 if (S.getInit())
2311 EmitStmt(S.getInit());
2312
2313 // Emit the condition variable if needed inside the entire cleanup scope
2314 // used by this special case for constant folded switches.
2315 if (S.getConditionVariable())
2316 EmitDecl(*S.getConditionVariable(), /*EvaluateConditionDecl=*/true);
2317
2318 // At this point, we are no longer "within" a switch instance, so
2319 // we can temporarily enforce this to ensure that any embedded case
2320 // statements are not emitted.
2321 SwitchInsn = nullptr;
2322
2323 // Okay, we can dead code eliminate everything except this case. Emit the
2324 // specified series of statements and we're good.
2325 for (const Stmt *CaseStmt : CaseStmts)
2328 PGO->markStmtMaybeUsed(S.getBody());
2329
2330 // Now we want to restore the saved switch instance so that nested
2331 // switches continue to function properly
2332 SwitchInsn = SavedSwitchInsn;
2333
2334 return;
2335 }
2336 }
2337
2338 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
2339
2340 RunCleanupsScope ConditionScope(*this);
2341
2342 if (S.getInit())
2343 EmitStmt(S.getInit());
2344
2345 if (S.getConditionVariable())
2347 llvm::Value *CondV = EmitScalarExpr(S.getCond());
2349
2350 // Create basic block to hold stuff that comes after switch
2351 // statement. We also need to create a default block now so that
2352 // explicit case ranges tests can have a place to jump to on
2353 // failure.
2354 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
2355 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
2356 addInstToNewSourceAtom(SwitchInsn, CondV);
2357
2358 if (HLSLControlFlowAttr != HLSLControlFlowHintAttr::SpellingNotCalculated) {
2359 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2360 llvm::ConstantInt *BranchHintConstant =
2362 HLSLControlFlowHintAttr::Spelling::Microsoft_branch
2363 ? llvm::ConstantInt::get(CGM.Int32Ty, 1)
2364 : llvm::ConstantInt::get(CGM.Int32Ty, 2);
2365 llvm::Metadata *Vals[] = {MDHelper.createString("hlsl.controlflow.hint"),
2366 MDHelper.createConstant(BranchHintConstant)};
2367 SwitchInsn->setMetadata("hlsl.controlflow.hint",
2368 llvm::MDNode::get(CGM.getLLVMContext(), Vals));
2369 }
2370
2371 if (PGO->haveRegionCounts()) {
2372 // Walk the SwitchCase list to find how many there are.
2373 uint64_t DefaultCount = 0;
2374 unsigned NumCases = 0;
2375 for (const SwitchCase *Case = S.getSwitchCaseList();
2376 Case;
2377 Case = Case->getNextSwitchCase()) {
2378 if (isa<DefaultStmt>(Case))
2379 DefaultCount = getProfileCount(Case);
2380 NumCases += 1;
2381 }
2382 SwitchWeights = new SmallVector<uint64_t, 16>();
2383 SwitchWeights->reserve(NumCases);
2384 // The default needs to be first. We store the edge count, so we already
2385 // know the right weight.
2386 SwitchWeights->push_back(DefaultCount);
2387 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
2388 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
2389 // Initialize the default case.
2390 SwitchLikelihood->push_back(Stmt::LH_None);
2391 }
2392
2393 CaseRangeBlock = DefaultBlock;
2394
2395 // Clear the insertion point to indicate we are in unreachable code.
2396 Builder.ClearInsertionPoint();
2397
2398 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
2399 // then reuse last ContinueBlock.
2400 JumpDest OuterContinue;
2401 if (!BreakContinueStack.empty())
2402 OuterContinue = BreakContinueStack.back().ContinueBlock;
2403
2404 BreakContinueStack.push_back(BreakContinue(S, SwitchExit, OuterContinue));
2405
2406 // Emit switch body.
2407 EmitStmt(S.getBody());
2408
2409 BreakContinueStack.pop_back();
2410
2411 // Update the default block in case explicit case range tests have
2412 // been chained on top.
2413 SwitchInsn->setDefaultDest(CaseRangeBlock);
2414
2415 // If a default was never emitted:
2416 if (!DefaultBlock->getParent()) {
2417 // If we have cleanups, emit the default block so that there's a
2418 // place to jump through the cleanups from.
2419 if (ConditionScope.requiresCleanups()) {
2420 EmitBlock(DefaultBlock);
2421
2422 // Otherwise, just forward the default block to the switch end.
2423 } else {
2424 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
2425 delete DefaultBlock;
2426 }
2427 }
2428
2429 ConditionScope.ForceCleanup();
2430
2431 // Emit continuation.
2432 EmitBlock(SwitchExit.getBlock(), true);
2434
2435 // If the switch has a condition wrapped by __builtin_unpredictable,
2436 // create metadata that specifies that the switch is unpredictable.
2437 // Don't bother if not optimizing because that metadata would not be used.
2438 auto *Call = dyn_cast<CallExpr>(S.getCond());
2439 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2440 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2441 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2442 llvm::MDBuilder MDHelper(getLLVMContext());
2443 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
2444 MDHelper.createUnpredictable());
2445 }
2446 }
2447
2448 if (SwitchWeights) {
2449 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2450 "switch weights do not match switch cases");
2451 // If there's only one jump destination there's no sense weighting it.
2452 if (SwitchWeights->size() > 1)
2453 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2454 createProfileWeights(*SwitchWeights));
2455 delete SwitchWeights;
2456 } else if (SwitchLikelihood) {
2457 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2458 "switch likelihoods do not match switch cases");
2459 std::optional<SmallVector<uint64_t, 16>> LHW =
2460 getLikelihoodWeights(*SwitchLikelihood);
2461 if (LHW) {
2462 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2463 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2464 createProfileWeights(*LHW));
2465 }
2466 delete SwitchLikelihood;
2467 }
2468 SwitchInsn = SavedSwitchInsn;
2469 SwitchWeights = SavedSwitchWeights;
2470 SwitchLikelihood = SavedSwitchLikelihood;
2471 CaseRangeBlock = SavedCRBlock;
2472}
2473
2474static std::string
2475SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
2477 std::string Result;
2478
2479 while (*Constraint) {
2480 switch (*Constraint) {
2481 default:
2482 Result += Target.convertConstraint(Constraint);
2483 break;
2484 // Ignore these
2485 case '*':
2486 case '?':
2487 case '!':
2488 case '=': // Will see this and the following in mult-alt constraints.
2489 case '+':
2490 break;
2491 case '#': // Ignore the rest of the constraint alternative.
2492 while (Constraint[1] && Constraint[1] != ',')
2493 Constraint++;
2494 break;
2495 case '&':
2496 case '%':
2497 Result += *Constraint;
2498 while (Constraint[1] && Constraint[1] == *Constraint)
2499 Constraint++;
2500 break;
2501 case ',':
2502 Result += "|";
2503 break;
2504 case 'g':
2505 Result += "imr";
2506 break;
2507 case '[': {
2508 assert(OutCons &&
2509 "Must pass output names to constraints with a symbolic name");
2510 unsigned Index;
2511 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
2512 assert(result && "Could not resolve symbolic name"); (void)result;
2513 Result += llvm::utostr(Index);
2514 break;
2515 }
2516 }
2517
2518 Constraint++;
2519 }
2520
2521 return Result;
2522}
2523
2524/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2525/// as using a particular register add that as a constraint that will be used
2526/// in this asm stmt.
2527static std::string
2528AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2530 const AsmStmt &Stmt, const bool EarlyClobber,
2531 std::string *GCCReg = nullptr) {
2532 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2533 if (!AsmDeclRef)
2534 return Constraint;
2535 const ValueDecl &Value = *AsmDeclRef->getDecl();
2536 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2537 if (!Variable)
2538 return Constraint;
2539 if (Variable->getStorageClass() != SC_Register)
2540 return Constraint;
2541 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2542 if (!Attr)
2543 return Constraint;
2544 StringRef Register = Attr->getLabel();
2545 assert(Target.isValidGCCRegisterName(Register));
2546 // We're using validateOutputConstraint here because we only care if
2547 // this is a register constraint.
2548 TargetInfo::ConstraintInfo Info(Constraint, "");
2549 if (Target.validateOutputConstraint(Info) &&
2550 !Info.allowsRegister()) {
2551 CGM.ErrorUnsupported(&Stmt, "__asm__");
2552 return Constraint;
2553 }
2554 // Canonicalize the register here before returning it.
2555 Register = Target.getNormalizedGCCRegisterName(Register);
2556 if (GCCReg != nullptr)
2557 *GCCReg = Register.str();
2558 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2559}
2560
2561std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2562 const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2563 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2564 if (Info.allowsRegister() || !Info.allowsMemory()) {
2566 return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2567
2568 llvm::Type *Ty = ConvertType(InputType);
2569 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2570 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2571 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2572 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2573
2574 return {Builder.CreateLoad(InputValue.getAddress().withElementType(Ty)),
2575 nullptr};
2576 }
2577 }
2578
2579 Address Addr = InputValue.getAddress();
2580 ConstraintStr += '*';
2581 return {InputValue.getPointer(*this), Addr.getElementType()};
2582}
2583
2584std::pair<llvm::Value *, llvm::Type *>
2585CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2586 const Expr *InputExpr,
2587 std::string &ConstraintStr) {
2588 // If this can't be a register or memory, i.e., has to be a constant
2589 // (immediate or symbolic), try to emit it as such.
2590 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2591 if (Info.requiresImmediateConstant()) {
2592 Expr::EvalResult EVResult;
2593 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2594
2595 llvm::APSInt IntResult;
2596 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2597 getContext()))
2598 return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2599 }
2600
2601 Expr::EvalResult Result;
2602 if (InputExpr->EvaluateAsInt(Result, getContext()))
2603 return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2604 nullptr};
2605 }
2606
2607 if (Info.allowsRegister() || !Info.allowsMemory())
2609 return {EmitScalarExpr(InputExpr), nullptr};
2610 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2611 return {EmitScalarExpr(InputExpr), nullptr};
2612 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2613 LValue Dest = EmitLValue(InputExpr);
2614 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2615 InputExpr->getExprLoc());
2616}
2617
2618/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2619/// asm call instruction. The !srcloc MDNode contains a list of constant
2620/// integers which are the source locations of the start of each line in the
2621/// asm.
2622static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2623 CodeGenFunction &CGF) {
2625 // Add the location of the first line to the MDNode.
2626 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2627 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2628 StringRef StrVal = Str->getString();
2629 if (!StrVal.empty()) {
2631 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2632 unsigned StartToken = 0;
2633 unsigned ByteOffset = 0;
2634
2635 // Add the location of the start of each subsequent line of the asm to the
2636 // MDNode.
2637 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2638 if (StrVal[i] != '\n') continue;
2639 SourceLocation LineLoc = Str->getLocationOfByte(
2640 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2641 Locs.push_back(llvm::ConstantAsMetadata::get(
2642 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2643 }
2644 }
2645
2646 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2647}
2648
2649static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2650 bool HasUnwindClobber, bool ReadOnly,
2651 bool ReadNone, bool NoMerge, bool NoConvergent,
2652 const AsmStmt &S,
2653 const std::vector<llvm::Type *> &ResultRegTypes,
2654 const std::vector<llvm::Type *> &ArgElemTypes,
2655 CodeGenFunction &CGF,
2656 std::vector<llvm::Value *> &RegResults) {
2657 if (!HasUnwindClobber)
2658 Result.addFnAttr(llvm::Attribute::NoUnwind);
2659
2660 if (NoMerge)
2661 Result.addFnAttr(llvm::Attribute::NoMerge);
2662 // Attach readnone and readonly attributes.
2663 if (!HasSideEffect) {
2664 if (ReadNone)
2665 Result.setDoesNotAccessMemory();
2666 else if (ReadOnly)
2667 Result.setOnlyReadsMemory();
2668 }
2669
2670 // Add elementtype attribute for indirect constraints.
2671 for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2672 if (Pair.value()) {
2673 auto Attr = llvm::Attribute::get(
2674 CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2675 Result.addParamAttr(Pair.index(), Attr);
2676 }
2677 }
2678
2679 // Slap the source location of the inline asm into a !srcloc metadata on the
2680 // call.
2681 const StringLiteral *SL;
2682 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S);
2683 gccAsmStmt &&
2684 (SL = dyn_cast<StringLiteral>(gccAsmStmt->getAsmStringExpr()))) {
2685 Result.setMetadata("srcloc", getAsmSrcLocInfo(SL, CGF));
2686 } else {
2687 // At least put the line number on MS inline asm blobs and GCC asm constexpr
2688 // strings.
2689 llvm::Constant *Loc =
2690 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2691 Result.setMetadata("srcloc",
2692 llvm::MDNode::get(CGF.getLLVMContext(),
2693 llvm::ConstantAsMetadata::get(Loc)));
2694 }
2695
2696 // Make inline-asm calls Key for the debug info feature Key Instructions.
2697 CGF.addInstToNewSourceAtom(&Result, nullptr);
2698
2699 if (!NoConvergent && CGF.getLangOpts().assumeFunctionsAreConvergent())
2700 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2701 // convergent (meaning, they may call an intrinsically convergent op, such
2702 // as bar.sync, and so can't have certain optimizations applied around
2703 // them) unless it's explicitly marked 'noconvergent'.
2704 Result.addFnAttr(llvm::Attribute::Convergent);
2705 // Extract all of the register value results from the asm.
2706 if (ResultRegTypes.size() == 1) {
2707 RegResults.push_back(&Result);
2708 } else {
2709 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2710 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2711 RegResults.push_back(Tmp);
2712 }
2713 }
2714}
2715
2716static void
2718 const llvm::ArrayRef<llvm::Value *> RegResults,
2719 const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
2720 const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
2721 const llvm::ArrayRef<LValue> ResultRegDests,
2722 const llvm::ArrayRef<QualType> ResultRegQualTys,
2723 const llvm::BitVector &ResultTypeRequiresCast,
2724 const llvm::BitVector &ResultRegIsFlagReg) {
2725 CGBuilderTy &Builder = CGF.Builder;
2726 CodeGenModule &CGM = CGF.CGM;
2727 llvm::LLVMContext &CTX = CGF.getLLVMContext();
2728
2729 assert(RegResults.size() == ResultRegTypes.size());
2730 assert(RegResults.size() == ResultTruncRegTypes.size());
2731 assert(RegResults.size() == ResultRegDests.size());
2732 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2733 // in which case its size may grow.
2734 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2735 assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
2736
2737 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2738 llvm::Value *Tmp = RegResults[i];
2739 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2740
2741 if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
2742 // Target must guarantee the Value `Tmp` here is lowered to a boolean
2743 // value.
2744 llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2);
2745 llvm::Value *IsBooleanValue =
2746 Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two);
2747 llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
2748 Builder.CreateCall(FnAssume, IsBooleanValue);
2749 }
2750
2751 // If the result type of the LLVM IR asm doesn't match the result type of
2752 // the expression, do the conversion.
2753 if (ResultRegTypes[i] != TruncTy) {
2754
2755 // Truncate the integer result to the right size, note that TruncTy can be
2756 // a pointer.
2757 if (TruncTy->isFloatingPointTy())
2758 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2759 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2760 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2761 Tmp = Builder.CreateTrunc(
2762 Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize));
2763 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2764 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2765 uint64_t TmpSize =
2766 CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2767 Tmp = Builder.CreatePtrToInt(
2768 Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize));
2769 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2770 } else if (Tmp->getType()->isIntegerTy() && TruncTy->isIntegerTy()) {
2771 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2772 } else if (Tmp->getType()->isVectorTy() || TruncTy->isVectorTy()) {
2773 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2774 }
2775 }
2776
2777 ApplyAtomGroup Grp(CGF.getDebugInfo());
2778 LValue Dest = ResultRegDests[i];
2779 // ResultTypeRequiresCast elements correspond to the first
2780 // ResultTypeRequiresCast.size() elements of RegResults.
2781 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2782 unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
2783 Address A = Dest.getAddress().withElementType(ResultRegTypes[i]);
2784 if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
2785 llvm::StoreInst *S = Builder.CreateStore(Tmp, A);
2786 CGF.addInstToCurrentSourceAtom(S, S->getValueOperand());
2787 continue;
2788 }
2789
2790 QualType Ty =
2791 CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false);
2792 if (Ty.isNull()) {
2793 const Expr *OutExpr = S.getOutputExpr(i);
2794 CGM.getDiags().Report(OutExpr->getExprLoc(),
2795 diag::err_store_value_to_reg);
2796 return;
2797 }
2798 Dest = CGF.MakeAddrLValue(A, Ty);
2799 }
2800 CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2801 }
2802}
2803
2805 const AsmStmt &S) {
2806 constexpr auto Name = "__ASM__hipstdpar_unsupported";
2807
2808 std::string Asm;
2809 if (auto GCCAsm = dyn_cast<GCCAsmStmt>(&S))
2810 Asm = GCCAsm->getAsmString();
2811
2812 auto &Ctx = CGF->CGM.getLLVMContext();
2813
2814 auto StrTy = llvm::ConstantDataArray::getString(Ctx, Asm);
2815 auto FnTy = llvm::FunctionType::get(llvm::Type::getVoidTy(Ctx),
2816 {StrTy->getType()}, false);
2817 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2818
2819 CGF->Builder.CreateCall(UBF, {StrTy});
2820}
2821
2823 // Pop all cleanup blocks at the end of the asm statement.
2824 CodeGenFunction::RunCleanupsScope Cleanups(*this);
2825
2826 // Assemble the final asm string.
2827 std::string AsmString = S.generateAsmString(getContext());
2828
2829 // Get all the output and input constraints together.
2830 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2831 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2832
2833 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2834 bool IsValidTargetAsm = true;
2835 for (unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) {
2836 StringRef Name;
2837 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2838 Name = GAS->getOutputName(i);
2840 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2841 if (IsHipStdPar && !IsValid)
2842 IsValidTargetAsm = false;
2843 else
2844 assert(IsValid && "Failed to parse output constraint");
2845 OutputConstraintInfos.push_back(Info);
2846 }
2847
2848 for (unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) {
2849 StringRef Name;
2850 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2851 Name = GAS->getInputName(i);
2853 bool IsValid =
2854 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2855 if (IsHipStdPar && !IsValid)
2856 IsValidTargetAsm = false;
2857 else
2858 assert(IsValid && "Failed to parse input constraint");
2859 InputConstraintInfos.push_back(Info);
2860 }
2861
2862 if (!IsValidTargetAsm)
2863 return EmitHipStdParUnsupportedAsm(this, S);
2864
2865 std::string Constraints;
2866
2867 std::vector<LValue> ResultRegDests;
2868 std::vector<QualType> ResultRegQualTys;
2869 std::vector<llvm::Type *> ResultRegTypes;
2870 std::vector<llvm::Type *> ResultTruncRegTypes;
2871 std::vector<llvm::Type *> ArgTypes;
2872 std::vector<llvm::Type *> ArgElemTypes;
2873 std::vector<llvm::Value*> Args;
2874 llvm::BitVector ResultTypeRequiresCast;
2875 llvm::BitVector ResultRegIsFlagReg;
2876
2877 // Keep track of inout constraints.
2878 std::string InOutConstraints;
2879 std::vector<llvm::Value*> InOutArgs;
2880 std::vector<llvm::Type*> InOutArgTypes;
2881 std::vector<llvm::Type*> InOutArgElemTypes;
2882
2883 // Keep track of out constraints for tied input operand.
2884 std::vector<std::string> OutputConstraints;
2885
2886 // Keep track of defined physregs.
2887 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2888
2889 // An inline asm can be marked readonly if it meets the following conditions:
2890 // - it doesn't have any sideeffects
2891 // - it doesn't clobber memory
2892 // - it doesn't return a value by-reference
2893 // It can be marked readnone if it doesn't have any input memory constraints
2894 // in addition to meeting the conditions listed above.
2895 bool ReadOnly = true, ReadNone = true;
2896
2897 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2898 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2899
2900 // Simplify the output constraint.
2901 std::string OutputConstraint(S.getOutputConstraint(i));
2902 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2903 getTarget(), &OutputConstraintInfos);
2904
2905 const Expr *OutExpr = S.getOutputExpr(i);
2906 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2907
2908 std::string GCCReg;
2909 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2910 getTarget(), CGM, S,
2911 Info.earlyClobber(),
2912 &GCCReg);
2913 // Give an error on multiple outputs to same physreg.
2914 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2915 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2916
2917 OutputConstraints.push_back(OutputConstraint);
2918 LValue Dest = EmitLValue(OutExpr);
2919 if (!Constraints.empty())
2920 Constraints += ',';
2921
2922 // If this is a register output, then make the inline asm return it
2923 // by-value. If this is a memory result, return the value by-reference.
2924 QualType QTy = OutExpr->getType();
2925 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2927 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2928
2929 Constraints += "=" + OutputConstraint;
2930 ResultRegQualTys.push_back(QTy);
2931 ResultRegDests.push_back(Dest);
2932
2933 bool IsFlagReg = llvm::StringRef(OutputConstraint).starts_with("{@cc");
2934 ResultRegIsFlagReg.push_back(IsFlagReg);
2935
2936 llvm::Type *Ty = ConvertTypeForMem(QTy);
2937 const bool RequiresCast = Info.allowsRegister() &&
2939 Ty->isAggregateType());
2940
2941 ResultTruncRegTypes.push_back(Ty);
2942 ResultTypeRequiresCast.push_back(RequiresCast);
2943
2944 if (RequiresCast) {
2945 unsigned Size = getContext().getTypeSize(QTy);
2946 if (Size)
2947 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2948 else
2949 CGM.Error(OutExpr->getExprLoc(), "output size should not be zero");
2950 }
2951 ResultRegTypes.push_back(Ty);
2952 // If this output is tied to an input, and if the input is larger, then
2953 // we need to set the actual result type of the inline asm node to be the
2954 // same as the input type.
2955 if (Info.hasMatchingInput()) {
2956 unsigned InputNo;
2957 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2958 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2959 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2960 break;
2961 }
2962 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2963
2964 QualType InputTy = S.getInputExpr(InputNo)->getType();
2965 QualType OutputType = OutExpr->getType();
2966
2967 uint64_t InputSize = getContext().getTypeSize(InputTy);
2968 if (getContext().getTypeSize(OutputType) < InputSize) {
2969 // Form the asm to return the value as a larger integer or fp type.
2970 ResultRegTypes.back() = ConvertType(InputTy);
2971 }
2972 }
2973 if (llvm::Type* AdjTy =
2974 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2975 ResultRegTypes.back()))
2976 ResultRegTypes.back() = AdjTy;
2977 else {
2978 CGM.getDiags().Report(S.getAsmLoc(),
2979 diag::err_asm_invalid_type_in_input)
2980 << OutExpr->getType() << OutputConstraint;
2981 }
2982
2983 // Update largest vector width for any vector types.
2984 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2985 LargestVectorWidth =
2986 std::max((uint64_t)LargestVectorWidth,
2987 VT->getPrimitiveSizeInBits().getKnownMinValue());
2988 } else {
2989 Address DestAddr = Dest.getAddress();
2990 // Matrix types in memory are represented by arrays, but accessed through
2991 // vector pointers, with the alignment specified on the access operation.
2992 // For inline assembly, update pointer arguments to use vector pointers.
2993 // Otherwise there will be a mis-match if the matrix is also an
2994 // input-argument which is represented as vector.
2995 if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
2996 DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType()));
2997
2998 ArgTypes.push_back(DestAddr.getType());
2999 ArgElemTypes.push_back(DestAddr.getElementType());
3000 Args.push_back(DestAddr.emitRawPointer(*this));
3001 Constraints += "=*";
3002 Constraints += OutputConstraint;
3003 ReadOnly = ReadNone = false;
3004 }
3005
3006 if (Info.isReadWrite()) {
3007 InOutConstraints += ',';
3008
3009 const Expr *InputExpr = S.getOutputExpr(i);
3010 llvm::Value *Arg;
3011 llvm::Type *ArgElemType;
3012 std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
3013 Info, Dest, InputExpr->getType(), InOutConstraints,
3014 InputExpr->getExprLoc());
3015
3016 if (llvm::Type* AdjTy =
3017 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
3018 Arg->getType()))
3019 Arg = Builder.CreateBitCast(Arg, AdjTy);
3020
3021 // Update largest vector width for any vector types.
3022 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
3023 LargestVectorWidth =
3024 std::max((uint64_t)LargestVectorWidth,
3025 VT->getPrimitiveSizeInBits().getKnownMinValue());
3026 // Only tie earlyclobber physregs.
3027 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
3028 InOutConstraints += llvm::utostr(i);
3029 else
3030 InOutConstraints += OutputConstraint;
3031
3032 InOutArgTypes.push_back(Arg->getType());
3033 InOutArgElemTypes.push_back(ArgElemType);
3034 InOutArgs.push_back(Arg);
3035 }
3036 }
3037
3038 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
3039 // to the return value slot. Only do this when returning in registers.
3040 if (isa<MSAsmStmt>(&S)) {
3041 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
3042 if (RetAI.isDirect() || RetAI.isExtend()) {
3043 // Make a fake lvalue for the return value slot.
3045 CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
3046 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
3047 ResultRegDests, AsmString, S.getNumOutputs());
3048 SawAsmBlock = true;
3049 }
3050 }
3051
3052 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
3053 const Expr *InputExpr = S.getInputExpr(i);
3054
3055 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
3056
3057 if (Info.allowsMemory())
3058 ReadNone = false;
3059
3060 if (!Constraints.empty())
3061 Constraints += ',';
3062
3063 // Simplify the input constraint.
3064 std::string InputConstraint(S.getInputConstraint(i));
3065 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
3066 &OutputConstraintInfos);
3067
3068 InputConstraint = AddVariableConstraints(
3069 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
3070 getTarget(), CGM, S, false /* No EarlyClobber */);
3071
3072 std::string ReplaceConstraint (InputConstraint);
3073 llvm::Value *Arg;
3074 llvm::Type *ArgElemType;
3075 std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
3076
3077 // If this input argument is tied to a larger output result, extend the
3078 // input to be the same size as the output. The LLVM backend wants to see
3079 // the input and output of a matching constraint be the same size. Note
3080 // that GCC does not define what the top bits are here. We use zext because
3081 // that is usually cheaper, but LLVM IR should really get an anyext someday.
3082 if (Info.hasTiedOperand()) {
3083 unsigned Output = Info.getTiedOperand();
3084 QualType OutputType = S.getOutputExpr(Output)->getType();
3085 QualType InputTy = InputExpr->getType();
3086
3087 if (getContext().getTypeSize(OutputType) >
3088 getContext().getTypeSize(InputTy)) {
3089 // Use ptrtoint as appropriate so that we can do our extension.
3090 if (isa<llvm::PointerType>(Arg->getType()))
3091 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
3092 llvm::Type *OutputTy = ConvertType(OutputType);
3093 if (isa<llvm::IntegerType>(OutputTy))
3094 Arg = Builder.CreateZExt(Arg, OutputTy);
3095 else if (isa<llvm::PointerType>(OutputTy))
3096 Arg = Builder.CreateZExt(Arg, IntPtrTy);
3097 else if (OutputTy->isFloatingPointTy())
3098 Arg = Builder.CreateFPExt(Arg, OutputTy);
3099 }
3100 // Deal with the tied operands' constraint code in adjustInlineAsmType.
3101 ReplaceConstraint = OutputConstraints[Output];
3102 }
3103 if (llvm::Type* AdjTy =
3104 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
3105 Arg->getType()))
3106 Arg = Builder.CreateBitCast(Arg, AdjTy);
3107 else
3108 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
3109 << InputExpr->getType() << InputConstraint;
3110
3111 // Update largest vector width for any vector types.
3112 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
3113 LargestVectorWidth =
3114 std::max((uint64_t)LargestVectorWidth,
3115 VT->getPrimitiveSizeInBits().getKnownMinValue());
3116
3117 ArgTypes.push_back(Arg->getType());
3118 ArgElemTypes.push_back(ArgElemType);
3119 Args.push_back(Arg);
3120 Constraints += InputConstraint;
3121 }
3122
3123 // Append the "input" part of inout constraints.
3124 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
3125 ArgTypes.push_back(InOutArgTypes[i]);
3126 ArgElemTypes.push_back(InOutArgElemTypes[i]);
3127 Args.push_back(InOutArgs[i]);
3128 }
3129 Constraints += InOutConstraints;
3130
3131 // Labels
3133 llvm::BasicBlock *Fallthrough = nullptr;
3134 bool IsGCCAsmGoto = false;
3135 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
3136 IsGCCAsmGoto = GS->isAsmGoto();
3137 if (IsGCCAsmGoto) {
3138 for (const auto *E : GS->labels()) {
3139 JumpDest Dest = getJumpDestForLabel(E->getLabel());
3140 Transfer.push_back(Dest.getBlock());
3141 if (!Constraints.empty())
3142 Constraints += ',';
3143 Constraints += "!i";
3144 }
3145 Fallthrough = createBasicBlock("asm.fallthrough");
3146 }
3147 }
3148
3149 bool HasUnwindClobber = false;
3150
3151 // Clobbers
3152 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
3153 std::string Clobber = S.getClobber(i);
3154
3155 if (Clobber == "memory")
3156 ReadOnly = ReadNone = false;
3157 else if (Clobber == "unwind") {
3158 HasUnwindClobber = true;
3159 continue;
3160 } else if (Clobber != "cc") {
3161 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
3162 if (CGM.getCodeGenOpts().StackClashProtector &&
3163 getTarget().isSPRegName(Clobber)) {
3164 CGM.getDiags().Report(S.getAsmLoc(),
3165 diag::warn_stack_clash_protection_inline_asm);
3166 }
3167 }
3168
3169 if (isa<MSAsmStmt>(&S)) {
3170 if (Clobber == "eax" || Clobber == "edx") {
3171 if (Constraints.find("=&A") != std::string::npos)
3172 continue;
3173 std::string::size_type position1 =
3174 Constraints.find("={" + Clobber + "}");
3175 if (position1 != std::string::npos) {
3176 Constraints.insert(position1 + 1, "&");
3177 continue;
3178 }
3179 std::string::size_type position2 = Constraints.find("=A");
3180 if (position2 != std::string::npos) {
3181 Constraints.insert(position2 + 1, "&");
3182 continue;
3183 }
3184 }
3185 }
3186 if (!Constraints.empty())
3187 Constraints += ',';
3188
3189 Constraints += "~{";
3190 Constraints += Clobber;
3191 Constraints += '}';
3192 }
3193
3194 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
3195 "unwind clobber can't be used with asm goto");
3196
3197 // Add machine specific clobbers
3198 std::string_view MachineClobbers = getTarget().getClobbers();
3199 if (!MachineClobbers.empty()) {
3200 if (!Constraints.empty())
3201 Constraints += ',';
3202 Constraints += MachineClobbers;
3203 }
3204
3205 llvm::Type *ResultType;
3206 if (ResultRegTypes.empty())
3207 ResultType = VoidTy;
3208 else if (ResultRegTypes.size() == 1)
3209 ResultType = ResultRegTypes[0];
3210 else
3211 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
3212
3213 llvm::FunctionType *FTy =
3214 llvm::FunctionType::get(ResultType, ArgTypes, false);
3215
3216 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
3217
3218 llvm::InlineAsm::AsmDialect GnuAsmDialect =
3219 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
3220 ? llvm::InlineAsm::AD_ATT
3221 : llvm::InlineAsm::AD_Intel;
3222 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
3223 llvm::InlineAsm::AD_Intel : GnuAsmDialect;
3224
3225 llvm::InlineAsm *IA = llvm::InlineAsm::get(
3226 FTy, AsmString, Constraints, HasSideEffect,
3227 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
3228 std::vector<llvm::Value*> RegResults;
3229 llvm::CallBrInst *CBR;
3230 llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
3231 CBRRegResults;
3232 if (IsGCCAsmGoto) {
3233 CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
3234 EmitBlock(Fallthrough);
3235 UpdateAsmCallInst(*CBR, HasSideEffect, /*HasUnwindClobber=*/false, ReadOnly,
3236 ReadNone, InNoMergeAttributedStmt,
3237 InNoConvergentAttributedStmt, S, ResultRegTypes,
3238 ArgElemTypes, *this, RegResults);
3239 // Because we are emitting code top to bottom, we don't have enough
3240 // information at this point to know precisely whether we have a critical
3241 // edge. If we have outputs, split all indirect destinations.
3242 if (!RegResults.empty()) {
3243 unsigned i = 0;
3244 for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
3245 llvm::Twine SynthName = Dest->getName() + ".split";
3246 llvm::BasicBlock *SynthBB = createBasicBlock(SynthName);
3247 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3248 Builder.SetInsertPoint(SynthBB);
3249
3250 if (ResultRegTypes.size() == 1) {
3251 CBRRegResults[SynthBB].push_back(CBR);
3252 } else {
3253 for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
3254 llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult");
3255 CBRRegResults[SynthBB].push_back(Tmp);
3256 }
3257 }
3258
3259 EmitBranch(Dest);
3260 EmitBlock(SynthBB);
3261 CBR->setIndirectDest(i++, SynthBB);
3262 }
3263 }
3264 } else if (HasUnwindClobber) {
3265 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
3266 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/true,
3267 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3268 InNoConvergentAttributedStmt, S, ResultRegTypes,
3269 ArgElemTypes, *this, RegResults);
3270 } else {
3271 llvm::CallInst *Result =
3272 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
3273 UpdateAsmCallInst(*Result, HasSideEffect, /*HasUnwindClobber=*/false,
3274 ReadOnly, ReadNone, InNoMergeAttributedStmt,
3275 InNoConvergentAttributedStmt, S, ResultRegTypes,
3276 ArgElemTypes, *this, RegResults);
3277 }
3278
3279 EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
3280 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
3281 ResultRegIsFlagReg);
3282
3283 // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
3284 // different insertion point; one for each indirect destination and with
3285 // CBRRegResults rather than RegResults.
3286 if (IsGCCAsmGoto && !CBRRegResults.empty()) {
3287 for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
3288 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3289 Builder.SetInsertPoint(Succ, --(Succ->end()));
3290 EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
3291 ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
3292 ResultTypeRequiresCast, ResultRegIsFlagReg);
3293 }
3294 }
3295}
3296
3298 const RecordDecl *RD = S.getCapturedRecordDecl();
3300
3301 // Initialize the captured struct.
3302 LValue SlotLV =
3303 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
3304
3305 RecordDecl::field_iterator CurField = RD->field_begin();
3307 E = S.capture_init_end();
3308 I != E; ++I, ++CurField) {
3309 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
3310 if (CurField->hasCapturedVLAType()) {
3311 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
3312 } else {
3313 EmitInitializerForField(*CurField, LV, *I);
3314 }
3315 }
3316
3317 return SlotLV;
3318}
3319
3320/// Generate an outlined function for the body of a CapturedStmt, store any
3321/// captured variables into the captured struct, and call the outlined function.
3322llvm::Function *
3324 LValue CapStruct = InitCapturedStruct(S);
3325
3326 // Emit the CapturedDecl
3327 CodeGenFunction CGF(CGM, true);
3328 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
3329 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
3330 delete CGF.CapturedStmtInfo;
3331
3332 // Emit call to the helper function.
3333 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
3334
3335 return F;
3336}
3337
3339 LValue CapStruct = InitCapturedStruct(S);
3340 return CapStruct.getAddress();
3341}
3342
3343/// Creates the outlined function for a CapturedStmt.
3344llvm::Function *
3346 assert(CapturedStmtInfo &&
3347 "CapturedStmtInfo should be set when generating the captured function");
3348 const CapturedDecl *CD = S.getCapturedDecl();
3349 const RecordDecl *RD = S.getCapturedRecordDecl();
3350 SourceLocation Loc = S.getBeginLoc();
3351 assert(CD->hasBody() && "missing CapturedDecl body");
3352
3353 // Build the argument list.
3354 ASTContext &Ctx = CGM.getContext();
3355 FunctionArgList Args;
3356 Args.append(CD->param_begin(), CD->param_end());
3357
3358 // Create the function declaration.
3359 const CGFunctionInfo &FuncInfo =
3360 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
3361 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
3362
3363 llvm::Function *F =
3364 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
3365 CapturedStmtInfo->getHelperName(), &CGM.getModule());
3366 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
3367 if (CD->isNothrow())
3368 F->addFnAttr(llvm::Attribute::NoUnwind);
3369
3370 // Generate the function.
3371 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
3372 CD->getBody()->getBeginLoc());
3373 // Set the context parameter in CapturedStmtInfo.
3374 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
3375 CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
3376
3377 // Initialize variable-length arrays.
3379 CapturedStmtInfo->getContextValue(), Ctx.getCanonicalTagType(RD));
3380 for (auto *FD : RD->fields()) {
3381 if (FD->hasCapturedVLAType()) {
3382 auto *ExprArg =
3384 .getScalarVal();
3385 auto VAT = FD->getCapturedVLAType();
3386 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
3387 }
3388 }
3389
3390 // If 'this' is captured, load it into CXXThisValue.
3391 if (CapturedStmtInfo->isCXXThisExprCaptured()) {
3392 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
3393 LValue ThisLValue = EmitLValueForField(Base, FD);
3394 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
3395 }
3396
3397 PGO->assignRegionCounters(GlobalDecl(CD), F);
3398 CapturedStmtInfo->EmitBody(*this, CD->getBody());
3400
3401 return F;
3402}
3403
3404// Returns the first convergence entry/loop/anchor instruction found in |BB|.
3405// std::nullptr otherwise.
3406static llvm::ConvergenceControlInst *getConvergenceToken(llvm::BasicBlock *BB) {
3407 for (auto &I : *BB) {
3408 if (auto *CI = dyn_cast<llvm::ConvergenceControlInst>(&I))
3409 return CI;
3410 }
3411 return nullptr;
3412}
3413
3414llvm::CallBase *
3415CodeGenFunction::addConvergenceControlToken(llvm::CallBase *Input) {
3416 llvm::ConvergenceControlInst *ParentToken = ConvergenceTokenStack.back();
3417 assert(ParentToken);
3418
3419 llvm::Value *bundleArgs[] = {ParentToken};
3420 llvm::OperandBundleDef OB("convergencectrl", bundleArgs);
3421 auto *Output = llvm::CallBase::addOperandBundle(
3422 Input, llvm::LLVMContext::OB_convergencectrl, OB, Input->getIterator());
3423 Input->replaceAllUsesWith(Output);
3424 Input->eraseFromParent();
3425 return Output;
3426}
3427
3428llvm::ConvergenceControlInst *
3429CodeGenFunction::emitConvergenceLoopToken(llvm::BasicBlock *BB) {
3430 llvm::ConvergenceControlInst *ParentToken = ConvergenceTokenStack.back();
3431 assert(ParentToken);
3432 return llvm::ConvergenceControlInst::CreateLoop(*BB, ParentToken);
3433}
3434
3435llvm::ConvergenceControlInst *
3436CodeGenFunction::getOrEmitConvergenceEntryToken(llvm::Function *F) {
3437 llvm::BasicBlock *BB = &F->getEntryBlock();
3438 llvm::ConvergenceControlInst *Token = getConvergenceToken(BB);
3439 if (Token)
3440 return Token;
3441
3442 // Adding a convergence token requires the function to be marked as
3443 // convergent.
3444 F->setConvergent();
3445 return llvm::ConvergenceControlInst::CreateEntry(*BB);
3446}
#define V(N, I)
Defines enum values for all the target-independent builtin functions.
static std::string AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, const TargetInfo &Target, CodeGenModule &CGM, const AsmStmt &Stmt, const bool EarlyClobber, std::string *GCCReg=nullptr)
AddVariableConstraints - Look at AsmExpr and if it is a variable declared as using a particular regis...
Definition CGStmt.cpp:2528
static bool FindCaseStatementsForValue(const SwitchStmt &S, const llvm::APSInt &ConstantCondValue, SmallVectorImpl< const Stmt * > &ResultStmts, ASTContext &C, const SwitchCase *&ResultCase)
FindCaseStatementsForValue - Find the case statement being jumped to and then invoke CollectStatement...
Definition CGStmt.cpp:2182
static llvm::ConvergenceControlInst * getConvergenceToken(llvm::BasicBlock *BB)
Definition CGStmt.cpp:3406
static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF, const AsmStmt &S)
Definition CGStmt.cpp:2804
static std::optional< SmallVector< uint64_t, 16 > > getLikelihoodWeights(ArrayRef< Stmt::Likelihood > Likelihoods)
Definition CGStmt.cpp:2236
static llvm::MDNode * getAsmSrcLocInfo(const StringLiteral *Str, CodeGenFunction &CGF)
getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline asm call instruction.
Definition CGStmt.cpp:2622
static std::string SimplifyConstraint(const char *Constraint, const TargetInfo &Target, SmallVectorImpl< TargetInfo::ConstraintInfo > *OutCons=nullptr)
Definition CGStmt.cpp:2475
static bool isSwiftAsyncCallee(const CallExpr *CE)
Determine if the given call uses the swiftasync calling convention.
Definition CGStmt.cpp:1587
static CSFC_Result CollectStatementsForCase(const Stmt *S, const SwitchCase *Case, bool &FoundCase, SmallVectorImpl< const Stmt * > &ResultStmts)
Definition CGStmt.cpp:2027
static void EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, const llvm::ArrayRef< llvm::Value * > RegResults, const llvm::ArrayRef< llvm::Type * > ResultRegTypes, const llvm::ArrayRef< llvm::Type * > ResultTruncRegTypes, const llvm::ArrayRef< LValue > ResultRegDests, const llvm::ArrayRef< QualType > ResultRegQualTys, const llvm::BitVector &ResultTypeRequiresCast, const llvm::BitVector &ResultRegIsFlagReg)
Definition CGStmt.cpp:2717
static bool hasEmptyLoopBody(const LoopStmt &S)
Definition CGStmt.cpp:1062
CSFC_Result
CollectStatementsForCase - Given the body of a 'switch' statement and a constant value that is being ...
Definition CGStmt.cpp:2026
@ CSFC_Failure
Definition CGStmt.cpp:2026
@ CSFC_Success
Definition CGStmt.cpp:2026
@ CSFC_FallThrough
Definition CGStmt.cpp:2026
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, bool HasUnwindClobber, bool ReadOnly, bool ReadNone, bool NoMerge, bool NoConvergent, const AsmStmt &S, const std::vector< llvm::Type * > &ResultRegTypes, const std::vector< llvm::Type * > &ArgElemTypes, CodeGenFunction &CGF, std::vector< llvm::Value * > &RegResults)
Definition CGStmt.cpp:2649
llvm::MachO::Target Target
Definition MachO.h:51
#define SM(sm)
Defines the PrettyStackTraceEntry class, which is used to make crashes give more contextual informati...
Defines the SourceManager interface.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition APValue.h:122
bool toIntegralConstant(APSInt &Result, QualType SrcTy, const ASTContext &Ctx) const
Try to convert this value to an integral constant.
Definition APValue.cpp:963
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:188
SourceManager & getSourceManager()
Definition ASTContext.h:798
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CanQualType VoidTy
CanQualType getCanonicalTagType(const TagDecl *TD) const
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition Stmt.h:3236
std::string getInputConstraint(unsigned i) const
getInputConstraint - Return the specified input constraint.
Definition Stmt.cpp:473
bool isVolatile() const
Definition Stmt.h:3272
std::string getOutputConstraint(unsigned i) const
getOutputConstraint - Return the constraint string for the specified output operand.
Definition Stmt.cpp:457
SourceLocation getAsmLoc() const
Definition Stmt.h:3266
const Expr * getInputExpr(unsigned i) const
Definition Stmt.cpp:481
unsigned getNumClobbers() const
Definition Stmt.h:3317
const Expr * getOutputExpr(unsigned i) const
Definition Stmt.cpp:465
unsigned getNumOutputs() const
Definition Stmt.h:3285
std::string generateAsmString(const ASTContext &C) const
Assemble final IR asm string.
Definition Stmt.cpp:449
unsigned getNumInputs() const
Definition Stmt.h:3307
std::string getClobber(unsigned i) const
Definition Stmt.cpp:489
Attr - This represents one attribute.
Definition Attr.h:44
Represents an attribute applied to a statement.
Definition Stmt.h:2203
Stmt * getSubStmt()
Definition Stmt.h:2239
ArrayRef< const Attr * > getAttrs() const
Definition Stmt.h:2235
BreakStmt - This represents a break.
Definition Stmt.h:3135
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition StmtCXX.h:135
DeclStmt * getBeginStmt()
Definition StmtCXX.h:163
DeclStmt * getLoopVarStmt()
Definition StmtCXX.h:169
DeclStmt * getEndStmt()
Definition StmtCXX.h:166
DeclStmt * getRangeStmt()
Definition StmtCXX.h:162
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2877
Expr * getCallee()
Definition Expr.h:3024
Represents the body of a CapturedStmt, and serves as its DeclContext.
Definition Decl.h:4923
ImplicitParamDecl * getContextParam() const
Retrieve the parameter containing captured variables.
Definition Decl.h:4981
bool isNothrow() const
Definition Decl.cpp:5573
param_iterator param_end() const
Retrieve an iterator one past the last parameter decl.
Definition Decl.h:4998
param_iterator param_begin() const
Retrieve an iterator pointing to the first parameter decl.
Definition Decl.h:4996
Stmt * getBody() const override
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
Definition Decl.cpp:5570
This captures a statement into a function.
Definition Stmt.h:3886
CapturedDecl * getCapturedDecl()
Retrieve the outlined function declaration.
Definition Stmt.cpp:1451
const RecordDecl * getCapturedRecordDecl() const
Retrieve the record declaration for captured variables.
Definition Stmt.h:4007
capture_init_iterator capture_init_begin()
Retrieve the first initialization argument.
Definition Stmt.h:4063
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.h:4081
capture_init_iterator capture_init_end()
Retrieve the iterator pointing one past the last initialization argument.
Definition Stmt.h:4073
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition Stmt.h:4050
CapturedRegionKind getCapturedRegionKind() const
Retrieve the captured region kind.
Definition Stmt.cpp:1466
CaseStmt - Represent a case statement.
Definition Stmt.h:1920
Stmt * getSubStmt()
Definition Stmt.h:2033
Expr * getLHS()
Definition Stmt.h:2003
Expr * getRHS()
Definition Stmt.h:2015
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
static Address invalid()
Definition Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition Address.h:204
An aggregate value slot.
Definition CGValue.h:504
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition CGValue.h:587
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
A scoped helper to set the current debug location to the specified location or preferred location of ...
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition CGDebugInfo.h:59
CGFunctionInfo - Class to encapsulate the information about a function definition.
API for captured statement code generation.
RAII for correct setting/restoring of CapturedStmtInfo.
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
Definition CGStmt.cpp:754
void ForceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
void ForceCleanup(std::initializer_list< llvm::Value ** > ValuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
bool requiresCleanups() const
Determine whether this scope requires any cleanups.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitOMPParallelMaskedTaskLoopDirective(const OMPParallelMaskedTaskLoopDirective &S)
void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S)
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
void EmitCXXTryStmt(const CXXTryStmt &S)
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S)
Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1448
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S)
void EmitOMPScanDirective(const OMPScanDirective &S)
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
void EmitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &S)
LValue InitCapturedStruct(const CapturedStmt &S)
Definition CGStmt.cpp:3297
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
CGCapturedStmtInfo * CapturedStmtInfo
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
Definition CGCall.cpp:5095
void EmitOMPMasterDirective(const OMPMasterDirective &S)
void EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &S)
void EmitOpenACCInitConstruct(const OpenACCInitConstruct &S)
void EmitOMPFlushDirective(const OMPFlushDirective &S)
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
Definition CGStmt.cpp:706
void EmitCoreturnStmt(const CoreturnStmt &S)
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
SmallVector< llvm::ConvergenceControlInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
Definition CGObjC.cpp:2129
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:3648
bool EmitSimpleStmt(const Stmt *S, ArrayRef< const Attr * > Attrs)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
Definition CGStmt.cpp:505
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
const LangOptions & getLangOpts() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
Definition CGExpr.cpp:684
void EmitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &S)
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void EmitOpenACCWaitConstruct(const OpenACCWaitConstruct &S)
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
Definition CGStmt.cpp:689
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block,...
Definition CGStmt.cpp:630
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
void EmitOMPScopeDirective(const OMPScopeDirective &S)
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
void EmitOMPInterchangeDirective(const OMPInterchangeDirective &S)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field, bool IsInBounds=true)
Definition CGExpr.cpp:5252
const TargetInfo & getTarget() const
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
Definition CGStmt.cpp:578
void EmitGotoStmt(const GotoStmt &S)
Definition CGStmt.cpp:842
void EmitOMPDepobjDirective(const OMPDepobjDirective &S)
void EmitOMPMetaDirective(const OMPMetaDirective &S)
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:242
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2336
void EmitOMPCancelDirective(const OMPCancelDirective &S)
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1290
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
Definition CGObjC.cpp:3688
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitOMPInteropDirective(const OMPInteropDirective &S)
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:223
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1075
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
void EmitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &S)
Emit combined directive 'target parallel loop' as if its constituent constructs are 'target',...
void EmitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &S)
void ResolveBranchFixups(llvm::BasicBlock *Target)
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitOMPMaskedDirective(const OMPMaskedDirective &S)
bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
Definition CGStmt.cpp:1012
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S)
void EmitOMPReverseDirective(const OMPReverseDirective &S)
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
void EmitOpenACCDataConstruct(const OpenACCDataConstruct &S)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
Definition CGExpr.cpp:5426
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
void EmitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &S)
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
Definition CGObjC.cpp:2125
const TargetCodeGenInfo & getTargetHooks() const
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
Definition CGCall.cpp:5023
void EmitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &S)
void EmitCoroutineBody(const CoroutineBodyStmt &S)
void EmitOMPParallelDirective(const OMPParallelDirective &S)
void EmitOMPTaskDirective(const OMPTaskDirective &S)
void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S)
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
void EmitOMPAssumeDirective(const OMPAssumeDirective &S)
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
Definition CGStmt.cpp:51
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
void EmitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &S)
void EmitIfStmt(const IfStmt &S)
Definition CGStmt.cpp:878
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition CGExpr.cpp:2533
void EmitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &S)
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
Definition CGObjC.cpp:2133
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition CGStmt.cpp:566
void EmitOpenACCCacheConstruct(const OpenACCCacheConstruct &S)
void EmitOpenACCLoopConstruct(const OpenACCLoopConstruct &S)
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
Definition CGClass.cpp:682
void EmitAsmStmt(const AsmStmt &S)
Definition CGStmt.cpp:2822
void EmitDefaultStmt(const DefaultStmt &S, ArrayRef< const Attr * > Attrs)
Definition CGStmt.cpp:1981
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitSwitchStmt(const SwitchStmt &S)
Definition CGStmt.cpp:2291
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
Definition CGExpr.cpp:293
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition CGExpr.cpp:264
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs={})
EmitStmt - Emit the code for the statement.
Definition CGStmt.cpp:61
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
uint64_t getCurrentProfileCount()
Get the profiler's current count.
llvm::Type * ConvertTypeForMem(QualType T)
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
void EmitOMPTargetDirective(const OMPTargetDirective &S)
void EmitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &S)
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
Generate an outlined function for the body of a CapturedStmt, store any captured variables into the c...
Definition CGStmt.cpp:3323
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
HLSLControlFlowHintAttr::Spelling HLSLControlFlowAttr
HLSL Branch attribute.
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitCaseStmt(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
Definition CGStmt.cpp:1866
void EmitOMPErrorDirective(const OMPErrorDirective &S)
void EmitBreakStmt(const BreakStmt &S)
Definition CGStmt.cpp:1752
void EmitOMPParallelMaskedTaskLoopSimdDirective(const OMPParallelMaskedTaskLoopSimdDirective &S)
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S)
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr * > Attrs={})
Definition CGStmt.cpp:1202
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
Definition CGStmt.cpp:3338
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
Definition CGStmt.cpp:672
void EmitOMPSimdDirective(const OMPSimdDirective &S)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:186
void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S)
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
const BreakContinue * GetDestForLoopControlStmt(const LoopControlStmt &S)
Definition CGStmt.cpp:1738
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
llvm::BasicBlock * GetIndirectGotoBlock()
void EmitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &S)
void EmitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &S)
void EmitOMPUnrollDirective(const OMPUnrollDirective &S)
void EmitOMPStripeDirective(const OMPStripeDirective &S)
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
static bool hasAggregateEvaluationKind(QualType T)
void EmitCaseStmtRange(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
EmitCaseStmtRange - If case statement range is not too big then add multiple cases to switch instruct...
Definition CGStmt.cpp:1781
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitOMPSingleDirective(const OMPSingleDirective &S)
void EmitReturnStmt(const ReturnStmt &S)
EmitReturnStmt - Note that due to GCC extensions, this can have an operand if the function returns vo...
Definition CGStmt.cpp:1613
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
llvm::Value * EmitCheckedArgForAssume(const Expr *E)
Emits an argument for a call to a __builtin_assume.
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
Creates the outlined function for a CapturedStmt.
Definition CGStmt.cpp:3345
const CGFunctionInfo * CurFnInfo
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitDeclStmt(const DeclStmt &S)
Definition CGStmt.cpp:1728
void EmitLabelStmt(const LabelStmt &S)
Definition CGStmt.cpp:775
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void EmitOMPTileDirective(const OMPTileDirective &S)
void EmitDecl(const Decl &D, bool EvaluateConditionDecl=false)
EmitDecl - Emit a declaration.
Definition CGDecl.cpp:52
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
void EmitOpenACCSetConstruct(const OpenACCSetConstruct &S)
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1631
void EmitAttributedStmt(const AttributedStmt &S)
Definition CGStmt.cpp:785
void EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &S)
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
void EmitOMPSectionDirective(const OMPSectionDirective &S)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
llvm::LLVMContext & getLLVMContext()
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
Definition CGObjC.cpp:1804
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
Definition CGStmt.cpp:854
void MaybeEmitDeferredVarDeclInit(const VarDecl *var)
Definition CGDecl.cpp:2074
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
void EmitSEHTryStmt(const SEHTryStmt &S)
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S)
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
When instrumenting to collect profile data, the counts for some blocks such as switch cases need to n...
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitOMPForDirective(const OMPForDirective &S)
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
Definition CGStmt.cpp:717
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:652
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
void EmitContinueStmt(const ContinueStmt &S)
Definition CGStmt.cpp:1765
This class organizes the cross-function state that is used while generating LLVM code.
llvm::Module & getModule() const
DiagnosticsEngine & getDiags() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
const llvm::DataLayout & getDataLayout() const
ASTContext & getContext() const
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
A saved depth on the scope stack.
bool encloses(stable_iterator I) const
Returns true if this scope encloses I.
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition CGCall.h:375
LValue - This represents an lvalue references.
Definition CGValue.h:182
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition CGValue.h:361
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
bool isScalar() const
Definition CGValue.h:64
static RValue get(llvm::Value *V)
Definition CGValue.h:98
bool isAggregate() const
Definition CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition CGValue.h:83
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:71
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition CGValue.h:78
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Definition TargetInfo.h:202
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1720
Stmt *const * const_body_iterator
Definition Stmt.h:1792
body_range body()
Definition Stmt.h:1783
SourceLocation getLBracLoc() const
Definition Stmt.h:1857
Stmt * getStmtExprResult()
Definition Stmt.h:1842
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition Expr.h:1082
ContinueStmt - This represents a continue.
Definition Stmt.h:3119
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1270
ValueDecl * getDecl()
Definition Expr.h:1338
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition Stmt.h:1611
decl_range decls()
Definition Stmt.h:1659
SourceLocation getBodyRBrace() const
getBodyRBrace - Gets the right brace of the body, if a body exists.
virtual bool hasBody() const
Returns true if this Decl represents a declaration for a body of code, such as a function or method d...
Definition DeclBase.h:1093
SourceLocation getLocation() const
Definition DeclBase.h:439
Stmt * getSubStmt()
Definition Stmt.h:2081
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
DoStmt - This represents a 'do/while' stmt.
Definition Stmt.h:2832
Stmt * getBody()
Definition Stmt.h:2857
Expr * getCond()
Definition Stmt.h:2850
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition Expr.cpp:3112
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3081
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3665
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3157
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition Stmt.h:2888
Stmt * getInit()
Definition Stmt.h:2903
VarDecl * getConditionVariable() const
Retrieve the variable declared in this "for" statement, if any.
Definition Stmt.cpp:1078
Stmt * getBody()
Definition Stmt.h:2932
Expr * getInc()
Definition Stmt.h:2931
Expr * getCond()
Definition Stmt.h:2930
const Expr * getSubExpr() const
Definition Expr.h:1062
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition TypeBase.h:4460
CallingConv getCallConv() const
Definition TypeBase.h:4815
This represents a GCC inline-assembly statement extension.
Definition Stmt.h:3395
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
GotoStmt - This represents a direct goto.
Definition Stmt.h:2969
LabelDecl * getLabel() const
Definition Stmt.h:2982
IfStmt - This represents an if/then/else.
Definition Stmt.h:2259
Stmt * getThen()
Definition Stmt.h:2348
Stmt * getInit()
Definition Stmt.h:2409
Expr * getCond()
Definition Stmt.h:2336
bool isConstexpr() const
Definition Stmt.h:2452
bool isNegatedConsteval() const
Definition Stmt.h:2448
Stmt * getElse()
Definition Stmt.h:2357
bool isConsteval() const
Definition Stmt.h:2439
VarDecl * getConditionVariable()
Retrieve the variable declared in this "if" statement, if any.
Definition Stmt.cpp:1026
IndirectGotoStmt - This represents an indirect goto.
Definition Stmt.h:3008
LabelDecl * getConstantTarget()
getConstantTarget - Returns the fixed target of this indirect goto, if one exists.
Definition Stmt.cpp:1227
Represents the declaration of a label.
Definition Decl.h:523
LabelStmt * getStmt() const
Definition Decl.h:547
LabelStmt - Represents a label, which has a substatement.
Definition Stmt.h:2146
LabelDecl * getDecl() const
Definition Stmt.h:2164
bool isSideEntry() const
Definition Stmt.h:2193
Stmt * getSubStmt()
Definition Stmt.h:2168
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
bool assumeFunctionsAreConvergent() const
Base class for BreakStmt and ContinueStmt.
Definition Stmt.h:3057
Represents a point when we exit a loop.
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:300
If a crash happens while one of these objects are live, the message is printed out along with the spe...
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
QualType getCanonicalType() const
Definition TypeBase.h:8337
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
Represents a struct/union/class.
Definition Decl.h:4309
field_range fields() const
Definition Decl.h:4512
specific_decl_iterator< FieldDecl > field_iterator
Definition Decl.h:4509
field_iterator field_begin() const
Definition Decl.cpp:5154
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition Stmt.h:3160
SourceLocation getBeginLoc() const
Definition Stmt.h:3212
const VarDecl * getNRVOCandidate() const
Retrieve the variable that might be used for the named return value optimization.
Definition Stmt.h:3196
Expr * getRetValue()
Definition Stmt.h:3187
Scope - A scope is a transient data structure that is used while parsing the program.
Definition Scope.h:41
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
This class handles loading and caching of source files into memory.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition Stmt.h:85
@ NoStmtClass
Definition Stmt.h:88
StmtClass getStmtClass() const
Definition Stmt.h:1472
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:334
Likelihood
The likelihood of a branch being taken.
Definition Stmt.h:1415
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition Stmt.h:1416
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition Stmt.h:1417
@ LH_Likely
Branch has the [[likely]] attribute.
Definition Stmt.h:1419
static const Attr * getLikelihoodAttr(const Stmt *S)
Definition Stmt.cpp:171
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:346
static Likelihood getLikelihood(ArrayRef< const Attr * > Attrs)
Definition Stmt.cpp:163
StringLiteral - This represents a string literal expression, e.g.
Definition Expr.h:1799
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Expr.h:1973
SourceLocation getLocationOfByte(unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, const TargetInfo &Target, unsigned *StartToken=nullptr, unsigned *StartTokenByteOffset=nullptr) const
getLocationOfByte - Return a source location that points to the specified byte of this string literal...
Definition Expr.cpp:1322
StringRef getString() const
Definition Expr.h:1867
const SwitchCase * getNextSwitchCase() const
Definition Stmt.h:1893
SwitchStmt - This represents a 'switch' stmt.
Definition Stmt.h:2509
Expr * getCond()
Definition Stmt.h:2572
Stmt * getBody()
Definition Stmt.h:2584
VarDecl * getConditionVariable()
Retrieve the variable declared in this "switch" statement, if any.
Definition Stmt.cpp:1144
Stmt * getInit()
Definition Stmt.h:2589
SwitchCase * getSwitchCaseList()
Definition Stmt.h:2640
Exposes information about the current target.
Definition TargetInfo.h:226
bool validateInputConstraint(MutableArrayRef< ConstraintInfo > OutputConstraints, ConstraintInfo &info) const
StringRef getNormalizedGCCRegisterName(StringRef Name, bool ReturnCanonical=false) const
Returns the "normalized" GCC register name.
bool validateOutputConstraint(ConstraintInfo &Info) const
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
bool isVoidType() const
Definition TypeBase.h:8878
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9165
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:711
Represents a variable declaration or definition.
Definition Decl.h:925
bool isNRVOVariable() const
Determine whether this local variable can be used with the named return value optimization (NRVO).
Definition Decl.h:1511
WhileStmt - This represents a 'while' stmt.
Definition Stmt.h:2697
Expr * getCond()
Definition Stmt.h:2749
SourceLocation getWhileLoc() const
Definition Stmt.h:2802
SourceLocation getRParenLoc() const
Definition Stmt.h:2807
VarDecl * getConditionVariable()
Retrieve the variable declared in this "while" statement, if any.
Definition Stmt.cpp:1205
Stmt * getBody()
Definition Stmt.h:2761
Defines the clang::TargetInfo interface.
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
@ CPlusPlus11
CapturedRegionKind
The different kinds of captured statement.
@ SC_Register
Definition Specifiers.h:257
Expr * Cond
};
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
@ CC_SwiftAsync
Definition Specifiers.h:294
U cast(CodeGen::Address addr)
Definition Address.h:327
ActionResult< Expr * > ExprResult
Definition Ownership.h:249
unsigned long uint64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool hasMatchingInput() const
Return true if this output operand has a matching (tied) input operand.
bool hasTiedOperand() const
Return true if this input operand is a matching constraint that ties it to an output operand.