Thanks to visit codestin.com
Credit goes to clang.llvm.org

clang 22.0.0git
CIRGenFunction.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Internal per-function state used for AST-to-ClangIR code gen
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
14
15#include "CIRGenCXXABI.h"
16#include "CIRGenCall.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/Location.h"
19#include "clang/AST/ExprCXX.h"
22
23#include <cassert>
24
25namespace clang::CIRGen {
26
28 bool suppressNewContext)
29 : CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) {
30 ehStack.setCGF(this);
31}
32
34
35// This is copied from clang/lib/CodeGen/CodeGenFunction.cpp
37 type = type.getCanonicalType();
38 while (true) {
39 switch (type->getTypeClass()) {
40#define TYPE(name, parent)
41#define ABSTRACT_TYPE(name, parent)
42#define NON_CANONICAL_TYPE(name, parent) case Type::name:
43#define DEPENDENT_TYPE(name, parent) case Type::name:
44#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
45#include "clang/AST/TypeNodes.inc"
46 llvm_unreachable("non-canonical or dependent type in IR-generation");
47
48 case Type::Auto:
49 case Type::DeducedTemplateSpecialization:
50 llvm_unreachable("undeduced type in IR-generation");
51
52 // Various scalar types.
53 case Type::Builtin:
54 case Type::Pointer:
55 case Type::BlockPointer:
56 case Type::LValueReference:
57 case Type::RValueReference:
58 case Type::MemberPointer:
59 case Type::Vector:
60 case Type::ExtVector:
61 case Type::ConstantMatrix:
62 case Type::FunctionProto:
63 case Type::FunctionNoProto:
64 case Type::Enum:
65 case Type::ObjCObjectPointer:
66 case Type::Pipe:
67 case Type::BitInt:
68 case Type::HLSLAttributedResource:
69 case Type::HLSLInlineSpirv:
70 return cir::TEK_Scalar;
71
72 // Complexes.
73 case Type::Complex:
74 return cir::TEK_Complex;
75
76 // Arrays, records, and Objective-C objects.
77 case Type::ConstantArray:
78 case Type::IncompleteArray:
79 case Type::VariableArray:
80 case Type::Record:
81 case Type::ObjCObject:
82 case Type::ObjCInterface:
83 case Type::ArrayParameter:
84 return cir::TEK_Aggregate;
85
86 // We operate on atomic values according to their underlying type.
87 case Type::Atomic:
88 type = cast<AtomicType>(type)->getValueType();
89 continue;
90 }
91 llvm_unreachable("unknown type kind!");
92 }
93}
94
96 return cgm.getTypes().convertTypeForMem(t);
97}
98
100 return cgm.getTypes().convertType(t);
101}
102
104 // Some AST nodes might contain invalid source locations (e.g.
105 // CXXDefaultArgExpr), workaround that to still get something out.
106 if (srcLoc.isValid()) {
108 PresumedLoc pLoc = sm.getPresumedLoc(srcLoc);
109 StringRef filename = pLoc.getFilename();
110 return mlir::FileLineColLoc::get(builder.getStringAttr(filename),
111 pLoc.getLine(), pLoc.getColumn());
112 }
113 // Do our best...
114 assert(currSrcLoc && "expected to inherit some source location");
115 return *currSrcLoc;
116}
117
118mlir::Location CIRGenFunction::getLoc(SourceRange srcLoc) {
119 // Some AST nodes might contain invalid source locations (e.g.
120 // CXXDefaultArgExpr), workaround that to still get something out.
121 if (srcLoc.isValid()) {
122 mlir::Location beg = getLoc(srcLoc.getBegin());
123 mlir::Location end = getLoc(srcLoc.getEnd());
124 SmallVector<mlir::Location, 2> locs = {beg, end};
125 mlir::Attribute metadata;
126 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
127 }
128 if (currSrcLoc) {
129 return *currSrcLoc;
130 }
131 // We're brave, but time to give up.
132 return builder.getUnknownLoc();
133}
134
135mlir::Location CIRGenFunction::getLoc(mlir::Location lhs, mlir::Location rhs) {
136 SmallVector<mlir::Location, 2> locs = {lhs, rhs};
137 mlir::Attribute metadata;
138 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
139}
140
141bool CIRGenFunction::containsLabel(const Stmt *s, bool ignoreCaseStmts) {
142 // Null statement, not a label!
143 if (!s)
144 return false;
145
146 // If this is a label, we have to emit the code, consider something like:
147 // if (0) { ... foo: bar(); } goto foo;
148 //
149 // TODO: If anyone cared, we could track __label__'s, since we know that you
150 // can't jump to one from outside their declared region.
151 if (isa<LabelStmt>(s))
152 return true;
153
154 // If this is a case/default statement, and we haven't seen a switch, we
155 // have to emit the code.
156 if (isa<SwitchCase>(s) && !ignoreCaseStmts)
157 return true;
158
159 // If this is a switch statement, we want to ignore case statements when we
160 // recursively process the sub-statements of the switch. If we haven't
161 // encountered a switch statement, we treat case statements like labels, but
162 // if we are processing a switch statement, case statements are expected.
163 if (isa<SwitchStmt>(s))
164 ignoreCaseStmts = true;
165
166 // Scan subexpressions for verboten labels.
167 return std::any_of(s->child_begin(), s->child_end(),
168 [=](const Stmt *subStmt) {
169 return containsLabel(subStmt, ignoreCaseStmts);
170 });
171}
172
173/// If the specified expression does not fold to a constant, or if it does but
174/// contains a label, return false. If it constant folds return true and set
175/// the boolean result in Result.
176bool CIRGenFunction::constantFoldsToBool(const Expr *cond, bool &resultBool,
177 bool allowLabels) {
178 llvm::APSInt resultInt;
179 if (!constantFoldsToSimpleInteger(cond, resultInt, allowLabels))
180 return false;
181
182 resultBool = resultInt.getBoolValue();
183 return true;
184}
185
186/// If the specified expression does not fold to a constant, or if it does
187/// fold but contains a label, return false. If it constant folds, return
188/// true and set the folded value.
190 llvm::APSInt &resultInt,
191 bool allowLabels) {
192 // FIXME: Rename and handle conversion of other evaluatable things
193 // to bool.
194 Expr::EvalResult result;
195 if (!cond->EvaluateAsInt(result, getContext()))
196 return false; // Not foldable, not integer or not fully evaluatable.
197
198 llvm::APSInt intValue = result.Val.getInt();
199 if (!allowLabels && containsLabel(cond))
200 return false; // Contains a label.
201
202 resultInt = intValue;
203 return true;
204}
205
206void CIRGenFunction::emitAndUpdateRetAlloca(QualType type, mlir::Location loc,
207 CharUnits alignment) {
208 if (!type->isVoidType()) {
209 mlir::Value addr = emitAlloca("__retval", convertType(type), loc, alignment,
210 /*insertIntoFnEntryBlock=*/false);
211 fnRetAlloca = addr;
212 returnValue = Address(addr, alignment);
213 }
214}
215
216void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty,
217 mlir::Location loc, CharUnits alignment,
218 bool isParam) {
219 assert(isa<NamedDecl>(var) && "Needs a named decl");
220 assert(!symbolTable.count(var) && "not supposed to be available just yet");
221
222 auto allocaOp = addrVal.getDefiningOp<cir::AllocaOp>();
223 assert(allocaOp && "expected cir::AllocaOp");
224
225 if (isParam)
226 allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
227 if (ty->isReferenceType() || ty.isConstQualified())
228 allocaOp.setConstantAttr(mlir::UnitAttr::get(&getMLIRContext()));
229
230 symbolTable.insert(var, allocaOp);
231}
232
234 CIRGenBuilderTy &builder = cgf.builder;
235 LexicalScope *localScope = cgf.curLexScope;
236
237 auto applyCleanup = [&]() {
238 if (performCleanup) {
239 // ApplyDebugLocation
241 forceCleanup();
242 }
243 };
244
245 if (returnBlock != nullptr) {
246 // Write out the return block, which loads the value from `__retval` and
247 // issues the `cir.return`.
248 mlir::OpBuilder::InsertionGuard guard(builder);
249 builder.setInsertionPointToEnd(returnBlock);
250 (void)emitReturn(*returnLoc);
251 }
252
253 auto insertCleanupAndLeave = [&](mlir::Block *insPt) {
254 mlir::OpBuilder::InsertionGuard guard(builder);
255 builder.setInsertionPointToEnd(insPt);
256
257 // If we still don't have a cleanup block, it means that `applyCleanup`
258 // below might be able to get us one.
259 mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
260
261 // Leverage and defers to RunCleanupsScope's dtor and scope handling.
262 applyCleanup();
263
264 // If we now have one after `applyCleanup`, hook it up properly.
265 if (!cleanupBlock && localScope->getCleanupBlock(builder)) {
266 cleanupBlock = localScope->getCleanupBlock(builder);
267 builder.create<cir::BrOp>(insPt->back().getLoc(), cleanupBlock);
268 if (!cleanupBlock->mightHaveTerminator()) {
269 mlir::OpBuilder::InsertionGuard guard(builder);
270 builder.setInsertionPointToEnd(cleanupBlock);
271 builder.create<cir::YieldOp>(localScope->endLoc);
272 }
273 }
274
275 if (localScope->depth == 0) {
276 // Reached the end of the function.
277 if (returnBlock != nullptr) {
278 if (returnBlock->getUses().empty()) {
279 returnBlock->erase();
280 } else {
281 // Thread return block via cleanup block.
282 if (cleanupBlock) {
283 for (mlir::BlockOperand &blockUse : returnBlock->getUses()) {
284 cir::BrOp brOp = mlir::cast<cir::BrOp>(blockUse.getOwner());
285 brOp.setSuccessor(cleanupBlock);
286 }
287 }
288
289 builder.create<cir::BrOp>(*returnLoc, returnBlock);
290 return;
291 }
292 }
293 emitImplicitReturn();
294 return;
295 }
296
297 // End of any local scope != function
298 // Ternary ops have to deal with matching arms for yielding types
299 // and do return a value, it must do its own cir.yield insertion.
300 if (!localScope->isTernary() && !insPt->mightHaveTerminator()) {
301 !retVal ? builder.create<cir::YieldOp>(localScope->endLoc)
302 : builder.create<cir::YieldOp>(localScope->endLoc, retVal);
303 }
304 };
305
306 // If a cleanup block has been created at some point, branch to it
307 // and set the insertion point to continue at the cleanup block.
308 // Terminators are then inserted either in the cleanup block or
309 // inline in this current block.
310 mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
311 if (cleanupBlock)
312 insertCleanupAndLeave(cleanupBlock);
313
314 // Now deal with any pending block wrap up like implicit end of
315 // scope.
316
317 mlir::Block *curBlock = builder.getBlock();
318 if (isGlobalInit() && !curBlock)
319 return;
320 if (curBlock->mightHaveTerminator() && curBlock->getTerminator())
321 return;
322
323 // Get rid of any empty block at the end of the scope.
324 bool entryBlock = builder.getInsertionBlock()->isEntryBlock();
325 if (!entryBlock && curBlock->empty()) {
326 curBlock->erase();
327 if (returnBlock != nullptr && returnBlock->getUses().empty())
328 returnBlock->erase();
329 return;
330 }
331
332 // If there's a cleanup block, branch to it, nothing else to do.
333 if (cleanupBlock) {
334 builder.create<cir::BrOp>(curBlock->back().getLoc(), cleanupBlock);
335 return;
336 }
337
338 // No pre-existent cleanup block, emit cleanup code and yield/return.
339 insertCleanupAndLeave(curBlock);
340}
341
342cir::ReturnOp CIRGenFunction::LexicalScope::emitReturn(mlir::Location loc) {
343 CIRGenBuilderTy &builder = cgf.getBuilder();
344
345 auto fn = dyn_cast<cir::FuncOp>(cgf.curFn);
346 assert(fn && "emitReturn from non-function");
347 if (!fn.getFunctionType().hasVoidReturn()) {
348 // Load the value from `__retval` and return it via the `cir.return` op.
349 auto value = builder.create<cir::LoadOp>(
350 loc, fn.getFunctionType().getReturnType(), *cgf.fnRetAlloca);
351 return builder.create<cir::ReturnOp>(loc,
352 llvm::ArrayRef(value.getResult()));
353 }
354 return builder.create<cir::ReturnOp>(loc);
355}
356
357// This is copied from CodeGenModule::MayDropFunctionReturn. This is a
358// candidate for sharing between CIRGen and CodeGen.
359static bool mayDropFunctionReturn(const ASTContext &astContext,
360 QualType returnType) {
361 // We can't just discard the return value for a record type with a complex
362 // destructor or a non-trivially copyable type.
363 if (const auto *classDecl = returnType->getAsCXXRecordDecl())
364 return classDecl->hasTrivialDestructor();
365 return returnType.isTriviallyCopyableType(astContext);
366}
367
368void CIRGenFunction::LexicalScope::emitImplicitReturn() {
369 CIRGenBuilderTy &builder = cgf.getBuilder();
370 LexicalScope *localScope = cgf.curLexScope;
371
372 const auto *fd = cast<clang::FunctionDecl>(cgf.curGD.getDecl());
373
374 // In C++, flowing off the end of a non-void function is always undefined
375 // behavior. In C, flowing off the end of a non-void function is undefined
376 // behavior only if the non-existent return value is used by the caller.
377 // That influences whether the terminating op is trap, unreachable, or
378 // return.
379 if (cgf.getLangOpts().CPlusPlus && !fd->hasImplicitReturnZero() &&
380 !cgf.sawAsmBlock && !fd->getReturnType()->isVoidType() &&
381 builder.getInsertionBlock()) {
382 bool shouldEmitUnreachable =
383 cgf.cgm.getCodeGenOpts().StrictReturn ||
384 !mayDropFunctionReturn(fd->getASTContext(), fd->getReturnType());
385
386 if (shouldEmitUnreachable) {
388 if (cgf.cgm.getCodeGenOpts().OptimizationLevel == 0)
389 builder.create<cir::TrapOp>(localScope->endLoc);
390 else
391 builder.create<cir::UnreachableOp>(localScope->endLoc);
392 builder.clearInsertionPoint();
393 return;
394 }
395 }
396
397 (void)emitReturn(localScope->endLoc);
398}
399
401 cir::FuncOp fn, cir::FuncType funcType,
403 SourceLocation startLoc) {
404 assert(!curFn &&
405 "CIRGenFunction can only be used for one function at a time");
406
407 curFn = fn;
408
409 const Decl *d = gd.getDecl();
410 curCodeDecl = d;
411 const auto *fd = dyn_cast_or_null<FunctionDecl>(d);
413
414 prologueCleanupDepth = ehStack.stable_begin();
415
416 mlir::Block *entryBB = &fn.getBlocks().front();
417 builder.setInsertionPointToStart(entryBB);
418
419 // TODO(cir): this should live in `emitFunctionProlog
420 // Declare all the function arguments in the symbol table.
421 for (const auto nameValue : llvm::zip(args, entryBB->getArguments())) {
422 const VarDecl *paramVar = std::get<0>(nameValue);
423 mlir::Value paramVal = std::get<1>(nameValue);
424 CharUnits alignment = getContext().getDeclAlign(paramVar);
425 mlir::Location paramLoc = getLoc(paramVar->getSourceRange());
426 paramVal.setLoc(paramLoc);
427
428 mlir::Value addrVal =
429 emitAlloca(cast<NamedDecl>(paramVar)->getName(),
430 convertType(paramVar->getType()), paramLoc, alignment,
431 /*insertIntoFnEntryBlock=*/true);
432
433 declare(addrVal, paramVar, paramVar->getType(), paramLoc, alignment,
434 /*isParam=*/true);
435
436 setAddrOfLocalVar(paramVar, Address(addrVal, alignment));
437
438 bool isPromoted = isa<ParmVarDecl>(paramVar) &&
439 cast<ParmVarDecl>(paramVar)->isKNRPromoted();
441 if (isPromoted)
442 cgm.errorNYI(fd->getSourceRange(), "Function argument demotion");
443
444 // Location of the store to the param storage tracked as beginning of
445 // the function body.
446 mlir::Location fnBodyBegin = getLoc(fd->getBody()->getBeginLoc());
447 builder.CIRBaseBuilderTy::createStore(fnBodyBegin, paramVal, addrVal);
448 }
449 assert(builder.getInsertionBlock() && "Should be valid");
450
451 // When the current function is not void, create an address to store the
452 // result value.
453 if (!returnType->isVoidType())
454 emitAndUpdateRetAlloca(returnType, getLoc(fd->getBody()->getEndLoc()),
455 getContext().getTypeAlignInChars(returnType));
456
457 if (isa_and_nonnull<CXXMethodDecl>(d) &&
458 cast<CXXMethodDecl>(d)->isInstance()) {
459 cgm.getCXXABI().emitInstanceFunctionProlog(loc, *this);
460
461 const auto *md = cast<CXXMethodDecl>(d);
462 if (md->getParent()->isLambda() && md->getOverloadedOperator() == OO_Call) {
463 // We're in a lambda.
464 auto fn = dyn_cast<cir::FuncOp>(curFn);
465 assert(fn && "lambda in non-function region");
466 fn.setLambda(true);
467
468 // Figure out the captures.
469 md->getParent()->getCaptureFields(lambdaCaptureFields,
472 // If the lambda captures the object referred to by '*this' - either by
473 // value or by reference, make sure CXXThisValue points to the correct
474 // object.
475
476 // Get the lvalue for the field (which is a copy of the enclosing object
477 // or contains the address of the enclosing object).
478 LValue thisFieldLValue =
480 if (!lambdaThisCaptureField->getType()->isPointerType()) {
481 // If the enclosing object was captured by value, just use its
482 // address. Sign this pointer.
483 cxxThisValue = thisFieldLValue.getPointer();
484 } else {
485 // Load the lvalue pointed to by the field, since '*this' was captured
486 // by reference.
488 emitLoadOfLValue(thisFieldLValue, SourceLocation()).getValue();
489 }
490 }
491 for (auto *fd : md->getParent()->fields()) {
492 if (fd->hasCapturedVLAType())
493 cgm.errorNYI(loc, "lambda captured VLA type");
494 }
495 } else {
496 // Not in a lambda; just use 'this' from the method.
497 // FIXME: Should we generate a new load for each use of 'this'? The fast
498 // register allocator would be happier...
500 }
501
504 }
505}
506
508 // Pop any cleanups that might have been associated with the
509 // parameters. Do this in whatever block we're currently in; it's
510 // important to do this before we enter the return block or return
511 // edges will be *really* confused.
512 // TODO(cir): Use prologueCleanupDepth here.
513 bool hasCleanups = ehStack.stable_begin() != prologueCleanupDepth;
514 if (hasCleanups) {
516 // FIXME(cir): should we clearInsertionPoint? breaks many testcases
518 }
519}
520
521mlir::LogicalResult CIRGenFunction::emitFunctionBody(const clang::Stmt *body) {
522 // We start with function level scope for variables.
524
525 if (const CompoundStmt *block = dyn_cast<CompoundStmt>(body))
526 return emitCompoundStmtWithoutScope(*block);
527
528 return emitStmt(body, /*useCurrentScope=*/true);
529}
530
531static void eraseEmptyAndUnusedBlocks(cir::FuncOp func) {
532 // Remove any leftover blocks that are unreachable and empty, since they do
533 // not represent unreachable code useful for warnings nor anything deemed
534 // useful in general.
535 SmallVector<mlir::Block *> blocksToDelete;
536 for (mlir::Block &block : func.getBlocks()) {
537 if (block.empty() && block.getUses().empty())
538 blocksToDelete.push_back(&block);
539 }
540 for (mlir::Block *block : blocksToDelete)
541 block->erase();
542}
543
544cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
545 cir::FuncType funcType) {
546 const auto funcDecl = cast<FunctionDecl>(gd.getDecl());
547 curGD = gd;
548
549 SourceLocation loc = funcDecl->getLocation();
550 Stmt *body = funcDecl->getBody();
551 SourceRange bodyRange =
552 body ? body->getSourceRange() : funcDecl->getLocation();
553
554 SourceLocRAIIObject fnLoc{*this, loc.isValid() ? getLoc(loc)
555 : builder.getUnknownLoc()};
556
557 auto validMLIRLoc = [&](clang::SourceLocation clangLoc) {
558 return clangLoc.isValid() ? getLoc(clangLoc) : builder.getUnknownLoc();
559 };
560 const mlir::Location fusedLoc = mlir::FusedLoc::get(
562 {validMLIRLoc(bodyRange.getBegin()), validMLIRLoc(bodyRange.getEnd())});
563 mlir::Block *entryBB = fn.addEntryBlock();
564
565 FunctionArgList args;
566 QualType retTy = buildFunctionArgList(gd, args);
567
568 // Create a scope in the symbol table to hold variable declarations.
570 {
571 LexicalScope lexScope(*this, fusedLoc, entryBB);
572
573 startFunction(gd, retTy, fn, funcType, args, loc, bodyRange.getBegin());
574
575 if (isa<CXXDestructorDecl>(funcDecl)) {
576 emitDestructorBody(args);
577 } else if (isa<CXXConstructorDecl>(funcDecl)) {
579 } else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice &&
580 funcDecl->hasAttr<CUDAGlobalAttr>()) {
581 getCIRGenModule().errorNYI(bodyRange, "CUDA kernel");
582 } else if (isa<CXXMethodDecl>(funcDecl) &&
583 cast<CXXMethodDecl>(funcDecl)->isLambdaStaticInvoker()) {
584 // The lambda static invoker function is special, because it forwards or
585 // clones the body of the function call operator (but is actually
586 // static).
588 } else if (funcDecl->isDefaulted() && isa<CXXMethodDecl>(funcDecl) &&
589 (cast<CXXMethodDecl>(funcDecl)->isCopyAssignmentOperator() ||
590 cast<CXXMethodDecl>(funcDecl)->isMoveAssignmentOperator())) {
591 // Implicit copy-assignment gets the same special treatment as implicit
592 // copy-constructors.
594 } else if (body) {
595 if (mlir::failed(emitFunctionBody(body))) {
596 return nullptr;
597 }
598 } else {
599 // Anything without a body should have been handled above.
600 llvm_unreachable("no definition for normal function");
601 }
602
603 if (mlir::failed(fn.verifyBody()))
604 return nullptr;
605
606 finishFunction(bodyRange.getEnd());
607 }
608
610 return fn;
611}
612
615 const auto *ctor = cast<CXXConstructorDecl>(curGD.getDecl());
616 CXXCtorType ctorType = curGD.getCtorType();
617
618 assert((cgm.getTarget().getCXXABI().hasConstructorVariants() ||
619 ctorType == Ctor_Complete) &&
620 "can only generate complete ctor for this ABI");
621
622 if (ctorType == Ctor_Complete && isConstructorDelegationValid(ctor) &&
623 cgm.getTarget().getCXXABI().hasConstructorVariants()) {
624 emitDelegateCXXConstructorCall(ctor, Ctor_Base, args, ctor->getEndLoc());
625 return;
626 }
627
628 const FunctionDecl *definition = nullptr;
629 Stmt *body = ctor->getBody(definition);
630 assert(definition == ctor && "emitting wrong constructor body");
631
632 if (isa_and_nonnull<CXXTryStmt>(body)) {
633 cgm.errorNYI(ctor->getSourceRange(), "emitConstructorBody: try body");
634 return;
635 }
636
639
640 // TODO: in restricted cases, we can emit the vbase initializers of a
641 // complete ctor and then delegate to the base ctor.
642
643 // Emit the constructor prologue, i.e. the base and member initializers.
644 emitCtorPrologue(ctor, ctorType, args);
645
646 // TODO(cir): propagate this result via mlir::logical result. Just unreachable
647 // now just to have it handled.
648 if (mlir::failed(emitStmt(body, true))) {
649 cgm.errorNYI(ctor->getSourceRange(),
650 "emitConstructorBody: emit body statement failed.");
651 return;
652 }
653}
654
655/// Emits the body of the current destructor.
657 const CXXDestructorDecl *dtor = cast<CXXDestructorDecl>(curGD.getDecl());
658 CXXDtorType dtorType = curGD.getDtorType();
659
660 // For an abstract class, non-base destructors are never used (and can't
661 // be emitted in general, because vbase dtors may not have been validated
662 // by Sema), but the Itanium ABI doesn't make them optional and Clang may
663 // in fact emit references to them from other compilations, so emit them
664 // as functions containing a trap instruction.
665 if (dtorType != Dtor_Base && dtor->getParent()->isAbstract()) {
666 cgm.errorNYI(dtor->getSourceRange(), "abstract base class destructors");
667 return;
668 }
669
670 Stmt *body = dtor->getBody();
672
673 // The call to operator delete in a deleting destructor happens
674 // outside of the function-try-block, which means it's always
675 // possible to delegate the destructor body to the complete
676 // destructor. Do so.
677 if (dtorType == Dtor_Deleting) {
678 cgm.errorNYI(dtor->getSourceRange(), "deleting destructor");
679 return;
680 }
681
682 // If the body is a function-try-block, enter the try before
683 // anything else.
684 const bool isTryBody = isa_and_nonnull<CXXTryStmt>(body);
685 if (isTryBody)
686 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
687
690
691 // If this is the complete variant, just invoke the base variant;
692 // the epilogue will destruct the virtual bases. But we can't do
693 // this optimization if the body is a function-try-block, because
694 // we'd introduce *two* handler blocks. In the Microsoft ABI, we
695 // always delegate because we might not have a definition in this TU.
696 switch (dtorType) {
697 case Dtor_Unified:
698 llvm_unreachable("not expecting a unified dtor");
699 case Dtor_Comdat:
700 llvm_unreachable("not expecting a COMDAT");
701 case Dtor_Deleting:
702 llvm_unreachable("already handled deleting case");
703
704 case Dtor_Complete:
705 assert((body || getTarget().getCXXABI().isMicrosoft()) &&
706 "can't emit a dtor without a body for non-Microsoft ABIs");
707
709
710 if (!isTryBody) {
712 emitCXXDestructorCall(dtor, Dtor_Base, /*forVirtualBase=*/false,
713 /*delegating=*/false, loadCXXThisAddress(), thisTy);
714 break;
715 }
716
717 // Fallthrough: act like we're in the base variant.
718 [[fallthrough]];
719
720 case Dtor_Base:
721 assert(body);
722
725
726 if (isTryBody) {
727 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
728 } else if (body) {
729 (void)emitStmt(body, /*useCurrentScope=*/true);
730 } else {
731 assert(dtor->isImplicit() && "bodyless dtor not implicit");
732 // nothing to do besides what's in the epilogue
733 }
734 // -fapple-kext must inline any call to this dtor into
735 // the caller's body.
737
738 break;
739 }
740
742
743 // Exit the try if applicable.
744 if (isTryBody)
745 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
746}
747
748/// Given a value of type T* that may not be to a complete object, construct
749/// an l-vlaue withi the natural pointee alignment of T.
751 QualType ty) {
752 // FIXME(cir): is it safe to assume Op->getResult(0) is valid? Perhaps
753 // assert on the result type first.
754 LValueBaseInfo baseInfo;
756 CharUnits align = cgm.getNaturalTypeAlignment(ty, &baseInfo);
757 return makeAddrLValue(Address(val, align), ty, baseInfo);
758}
759
761 QualType ty) {
762 LValueBaseInfo baseInfo;
763 CharUnits alignment = cgm.getNaturalTypeAlignment(ty, &baseInfo);
764 Address addr(val, convertTypeForMem(ty), alignment);
766 return makeAddrLValue(addr, ty, baseInfo);
767}
768
770 FunctionArgList &args) {
771 const auto *fd = cast<FunctionDecl>(gd.getDecl());
772 QualType retTy = fd->getReturnType();
773
774 const auto *md = dyn_cast<CXXMethodDecl>(fd);
775 if (md && md->isInstance()) {
776 if (cgm.getCXXABI().hasThisReturn(gd))
777 cgm.errorNYI(fd->getSourceRange(), "this return");
778 else if (cgm.getCXXABI().hasMostDerivedReturn(gd))
779 cgm.errorNYI(fd->getSourceRange(), "most derived return");
780 cgm.getCXXABI().buildThisParam(*this, args);
781 }
782
783 if (const auto *cd = dyn_cast<CXXConstructorDecl>(fd))
784 if (cd->getInheritedConstructor())
785 cgm.errorNYI(fd->getSourceRange(),
786 "buildFunctionArgList: inherited constructor");
787
788 for (auto *param : fd->parameters())
789 args.push_back(param);
790
791 if (md && (isa<CXXConstructorDecl>(md) || isa<CXXDestructorDecl>(md)))
792 cgm.getCXXABI().addImplicitStructorParams(*this, retTy, args);
793
794 return retTy;
795}
796
797/// Emit code to compute a designator that specifies the location
798/// of the expression.
799/// FIXME: document this function better.
801 // FIXME: ApplyDebugLocation DL(*this, e);
802 switch (e->getStmtClass()) {
803 default:
805 std::string("l-value not implemented for '") +
806 e->getStmtClassName() + "'");
807 return LValue();
808 case Expr::ArraySubscriptExprClass:
810 case Expr::UnaryOperatorClass:
812 case Expr::StringLiteralClass:
814 case Expr::MemberExprClass:
816 case Expr::CompoundLiteralExprClass:
818 case Expr::BinaryOperatorClass:
820 case Expr::CompoundAssignOperatorClass: {
821 QualType ty = e->getType();
822 if (ty->getAs<AtomicType>()) {
823 cgm.errorNYI(e->getSourceRange(),
824 "CompoundAssignOperator with AtomicType");
825 return LValue();
826 }
827 if (!ty->isAnyComplexType())
829
831 }
832 case Expr::CallExprClass:
833 case Expr::CXXMemberCallExprClass:
834 case Expr::CXXOperatorCallExprClass:
835 case Expr::UserDefinedLiteralClass:
837 case Expr::ParenExprClass:
838 return emitLValue(cast<ParenExpr>(e)->getSubExpr());
839 case Expr::GenericSelectionExprClass:
840 return emitLValue(cast<GenericSelectionExpr>(e)->getResultExpr());
841 case Expr::DeclRefExprClass:
843 case Expr::CStyleCastExprClass:
844 case Expr::CXXStaticCastExprClass:
845 case Expr::CXXDynamicCastExprClass:
846 case Expr::ImplicitCastExprClass:
848 case Expr::MaterializeTemporaryExprClass:
850 case Expr::ChooseExprClass:
851 return emitLValue(cast<ChooseExpr>(e)->getChosenSubExpr());
852 }
853}
854
855static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt) {
856 SmallString<256> buffer;
857 llvm::raw_svector_ostream out(buffer);
858 out << name << cnt;
859 return std::string(out.str());
860}
861
865
869
870void CIRGenFunction::emitNullInitialization(mlir::Location loc, Address destPtr,
871 QualType ty) {
872 // Ignore empty classes in C++.
874 if (const auto *rd = ty->getAsCXXRecordDecl(); rd && rd->isEmpty())
875 return;
876
877 // Cast the dest ptr to the appropriate i8 pointer type.
878 if (builder.isInt8Ty(destPtr.getElementType())) {
879 cgm.errorNYI(loc, "Cast the dest ptr to the appropriate i8 pointer type");
880 }
881
882 // Get size and alignment info for this aggregate.
883 const CharUnits size = getContext().getTypeSizeInChars(ty);
884 if (size.isZero()) {
885 // But note that getTypeInfo returns 0 for a VLA.
886 if (isa<VariableArrayType>(getContext().getAsArrayType(ty))) {
887 cgm.errorNYI(loc,
888 "emitNullInitialization for zero size VariableArrayType");
889 } else {
890 return;
891 }
892 }
893
894 // If the type contains a pointer to data member we can't memset it to zero.
895 // Instead, create a null constant and copy it to the destination.
896 // TODO: there are other patterns besides zero that we can usefully memset,
897 // like -1, which happens to be the pattern used by member-pointers.
898 if (!cgm.getTypes().isZeroInitializable(ty)) {
899 cgm.errorNYI(loc, "type is not zero initializable");
900 }
901
902 // In LLVM Codegen: otherwise, just memset the whole thing to zero using
903 // Builder.CreateMemSet. In CIR just emit a store of #cir.zero to the
904 // respective address.
905 // Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
906 const mlir::Value zeroValue = builder.getNullValue(convertType(ty), loc);
907 builder.createStore(loc, zeroValue, destPtr);
908}
909
910// TODO(cir): should be shared with LLVM codegen.
912 const Expr *e = ce->getSubExpr();
913
914 if (ce->getCastKind() == CK_UncheckedDerivedToBase)
915 return false;
916
917 if (isa<CXXThisExpr>(e->IgnoreParens())) {
918 // We always assume that 'this' is never null.
919 return false;
920 }
921
922 if (const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(ce)) {
923 // And that glvalue casts are never null.
924 if (ice->isGLValue())
925 return false;
926 }
927
928 return true;
929}
930
931/// Computes the length of an array in elements, as well as the base
932/// element type and a properly-typed first element pointer.
933mlir::Value
935 QualType &baseType, Address &addr) {
936 const clang::ArrayType *arrayType = origArrayType;
937
938 // If it's a VLA, we have to load the stored size. Note that
939 // this is the size of the VLA in bytes, not its size in elements.
942 cgm.errorNYI(*currSrcLoc, "VLAs");
943 return builder.getConstInt(*currSrcLoc, SizeTy, 0);
944 }
945
946 uint64_t countFromCLAs = 1;
947 QualType eltType;
948
949 auto cirArrayType = mlir::dyn_cast<cir::ArrayType>(addr.getElementType());
950
951 while (cirArrayType) {
953 countFromCLAs *= cirArrayType.getSize();
954 eltType = arrayType->getElementType();
955
956 cirArrayType =
957 mlir::dyn_cast<cir::ArrayType>(cirArrayType.getElementType());
958
959 arrayType = getContext().getAsArrayType(arrayType->getElementType());
960 assert((!cirArrayType || arrayType) &&
961 "CIR and Clang types are out-of-sync");
962 }
963
964 if (arrayType) {
965 // From this point onwards, the Clang array type has been emitted
966 // as some other type (probably a packed struct). Compute the array
967 // size, and just emit the 'begin' expression as a bitcast.
968 cgm.errorNYI(*currSrcLoc, "length for non-array underlying types");
969 }
970
971 baseType = eltType;
972 return builder.getConstInt(*currSrcLoc, SizeTy, countFromCLAs);
973}
974
976 mlir::Value ptrValue, QualType ty, SourceLocation loc,
977 SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue) {
979 return cir::AssumeAlignedOp::create(builder, getLoc(assumptionLoc), ptrValue,
980 alignment, offsetValue);
981}
982
984 mlir::Value ptrValue, const Expr *expr, SourceLocation assumptionLoc,
985 int64_t alignment, mlir::Value offsetValue) {
986 QualType ty = expr->getType();
987 SourceLocation loc = expr->getExprLoc();
988 return emitAlignmentAssumption(ptrValue, ty, loc, assumptionLoc, alignment,
989 offsetValue);
990}
991
992// TODO(cir): Most of this function can be shared between CIRGen
993// and traditional LLVM codegen
995 assert(type->isVariablyModifiedType() &&
996 "Must pass variably modified type to EmitVLASizes!");
997
998 // We're going to walk down into the type and look for VLA
999 // expressions.
1000 do {
1001 assert(type->isVariablyModifiedType());
1002
1003 const Type *ty = type.getTypePtr();
1004 switch (ty->getTypeClass()) {
1005 case Type::CountAttributed:
1006 case Type::PackIndexing:
1007 case Type::ArrayParameter:
1008 case Type::HLSLAttributedResource:
1009 case Type::HLSLInlineSpirv:
1010 case Type::PredefinedSugar:
1011 cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType");
1012 break;
1013
1014#define TYPE(Class, Base)
1015#define ABSTRACT_TYPE(Class, Base)
1016#define NON_CANONICAL_TYPE(Class, Base)
1017#define DEPENDENT_TYPE(Class, Base) case Type::Class:
1018#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
1019#include "clang/AST/TypeNodes.inc"
1020 llvm_unreachable(
1021 "dependent type must be resolved before the CIR codegen");
1022
1023 // These types are never variably-modified.
1024 case Type::Builtin:
1025 case Type::Complex:
1026 case Type::Vector:
1027 case Type::ExtVector:
1028 case Type::ConstantMatrix:
1029 case Type::Record:
1030 case Type::Enum:
1031 case Type::Using:
1032 case Type::TemplateSpecialization:
1033 case Type::ObjCTypeParam:
1034 case Type::ObjCObject:
1035 case Type::ObjCInterface:
1036 case Type::ObjCObjectPointer:
1037 case Type::BitInt:
1038 llvm_unreachable("type class is never variably-modified!");
1039
1040 case Type::Adjusted:
1041 type = cast<clang::AdjustedType>(ty)->getAdjustedType();
1042 break;
1043
1044 case Type::Decayed:
1045 type = cast<clang::DecayedType>(ty)->getPointeeType();
1046 break;
1047
1048 case Type::Pointer:
1049 type = cast<clang::PointerType>(ty)->getPointeeType();
1050 break;
1051
1052 case Type::BlockPointer:
1053 type = cast<clang::BlockPointerType>(ty)->getPointeeType();
1054 break;
1055
1056 case Type::LValueReference:
1057 case Type::RValueReference:
1058 type = cast<clang::ReferenceType>(ty)->getPointeeType();
1059 break;
1060
1061 case Type::MemberPointer:
1062 type = cast<clang::MemberPointerType>(ty)->getPointeeType();
1063 break;
1064
1065 case Type::ConstantArray:
1066 case Type::IncompleteArray:
1067 // Losing element qualification here is fine.
1068 type = cast<clang::ArrayType>(ty)->getElementType();
1069 break;
1070
1071 case Type::VariableArray: {
1072 cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType VLA");
1073 break;
1074 }
1075
1076 case Type::FunctionProto:
1077 case Type::FunctionNoProto:
1078 type = cast<clang::FunctionType>(ty)->getReturnType();
1079 break;
1080
1081 case Type::Paren:
1082 case Type::TypeOf:
1083 case Type::UnaryTransform:
1084 case Type::Attributed:
1085 case Type::BTFTagAttributed:
1086 case Type::SubstTemplateTypeParm:
1087 case Type::MacroQualified:
1088 // Keep walking after single level desugaring.
1089 type = type.getSingleStepDesugaredType(getContext());
1090 break;
1091
1092 case Type::Typedef:
1093 case Type::Decltype:
1094 case Type::Auto:
1095 case Type::DeducedTemplateSpecialization:
1096 // Stop walking: nothing to do.
1097 return;
1098
1099 case Type::TypeOfExpr:
1100 // Stop walking: emit typeof expression.
1101 emitIgnoredExpr(cast<clang::TypeOfExprType>(ty)->getUnderlyingExpr());
1102 return;
1103
1104 case Type::Atomic:
1105 type = cast<clang::AtomicType>(ty)->getValueType();
1106 break;
1107
1108 case Type::Pipe:
1109 type = cast<clang::PipeType>(ty)->getElementType();
1110 break;
1111 }
1112 } while (type->isVariablyModifiedType());
1113}
1114
1116 if (getContext().getBuiltinVaListType()->isArrayType())
1117 return emitPointerWithAlignment(e);
1118 return emitLValue(e).getAddress();
1119}
1120
1121} // namespace clang::CIRGen
Defines the clang::Expr interface and subclasses for C++ expressions.
__device__ __2f16 float __ockl_bool s
APSInt & getInt()
Definition APValue.h:489
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
SourceManager & getSourceManager()
Definition ASTContext.h:833
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3722
mlir::Type getElementType() const
Definition Address.h:101
void forceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
static bool isConstructorDelegationValid(const clang::CXXConstructorDecl *ctor)
Checks whether the given constructor is a valid subject for the complete-to-base constructor delegati...
mlir::Type convertType(clang::QualType t)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
EHScopeStack::stable_iterator prologueCleanupDepth
The cleanup depth enclosing all the cleanups associated with the parameters.
cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn, cir::FuncType funcType)
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
void emitVariablyModifiedType(QualType ty)
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
LValue emitMemberExpr(const MemberExpr *e)
const TargetInfo & getTarget() const
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
const clang::Decl * curFuncDecl
LValue emitLValueForLambdaField(const FieldDecl *field)
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
LValue emitStringLiteralLValue(const StringLiteral *e)
void emitDelegateCXXConstructorCall(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, const FunctionArgList &args, clang::SourceLocation loc)
mlir::Value emitArrayLength(const clang::ArrayType *arrayType, QualType &baseType, Address &addr)
Computes the length of an array in elements, as well as the base element type and a properly-typed fi...
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
llvm::ScopedHashTableScope< const clang::Decl *, mlir::Value > SymTableScopeTy
mlir::Operation * curFn
The current function or global initializer that is generated code for.
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
std::optional< mlir::Value > fnRetAlloca
The compiler-generated variable that holds the return value.
void emitImplicitAssignmentOperatorBody(FunctionArgList &args)
mlir::Type convertTypeForMem(QualType t)
clang::QualType buildFunctionArgList(clang::GlobalDecl gd, FunctionArgList &args)
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, FunctionArgList &args)
This routine generates necessary code to initialize base classes and non-static data members belongin...
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
Address returnValue
The temporary alloca to hold the return value.
void finishFunction(SourceLocation endLoc)
mlir::LogicalResult emitFunctionBody(const clang::Stmt *body)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
clang::FieldDecl * lambdaThisCaptureField
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
void emitConstructorBody(FunctionArgList &args)
LValue emitCallExprLValue(const clang::CallExpr *e)
bool shouldNullCheckClassCastValue(const CastExpr *ce)
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
void startFunction(clang::GlobalDecl gd, clang::QualType returnType, cir::FuncOp fn, cir::FuncType funcType, FunctionArgList args, clang::SourceLocation loc, clang::SourceLocation startLoc)
Emit code for the start of a function.
unsigned counterRefTmp
Hold counters for incrementally naming temporaries.
mlir::MLIRContext & getMLIRContext()
void emitDestructorBody(FunctionArgList &args)
Emits the body of the current destructor.
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy)
void emitLambdaStaticInvokeBody(const CXXMethodDecl *md)
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
clang::ASTContext & getContext() const
void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
Set the address of a local variable.
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth)
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::LogicalResult emitCompoundStmtWithoutScope(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
Type for representing both the decl and type of parameters to a function.
Definition CIRGenCall.h:191
Address getAddress() const
mlir::Value getPointer() const
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:56
Represents a C++ destructor within a class.
Definition DeclCXX.h:2869
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition DeclCXX.h:2255
QualType getFunctionObjectParameterType() const
Definition DeclCXX.h:2279
bool isAbstract() const
Determine whether this class has a pure virtual function.
Definition DeclCXX.h:1221
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition DeclCXX.h:1186
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3610
CastKind getCastKind() const
Definition Expr.h:3654
Expr * getSubExpr()
Definition Expr.h:3660
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1720
bool isImplicit() const
isImplicit - Indicates whether the declaration was implicitly generated by the implementation.
Definition DeclBase.h:593
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks,...
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3081
QualType getType() const
Definition Expr.h:144
Represents a function declaration or definition.
Definition Decl.h:2000
Stmt * getBody(const FunctionDecl *&Definition) const
Retrieve the body (definition) of the function.
Definition Decl.cpp:3271
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4537
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition Expr.h:3787
Represents an unpacked "presumed" location which can be presented to the user.
unsigned getColumn() const
Return the presumed column number of this location.
const char * getFilename() const
Return the presumed filename of this location.
unsigned getLine() const
Return the presumed line number of this location.
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition Type.cpp:2867
Encodes a location in the source.
bool isValid() const
Return true if this is a valid SourceLocation object.
This class handles loading and caching of source files into memory.
PresumedLoc getPresumedLoc(SourceLocation Loc, bool UseLineDirectives=true) const
Returns the "presumed" location of a SourceLocation specifies.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition Stmt.h:85
StmtClass getStmtClass() const
Definition Stmt.h:1472
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:334
const char * getStmtClassName() const
Definition Stmt.cpp:87
bool isVoidType() const
Definition TypeBase.h:8880
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isAnyComplexType() const
Definition TypeBase.h:8659
TypeClass getTypeClass() const
Definition TypeBase.h:2385
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9100
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:2190
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt)
static bool mayDropFunctionReturn(const ASTContext &astContext, QualType returnType)
static void eraseEmptyAndUnusedBlocks(cir::FuncOp func)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
CXXCtorType
C++ constructor types.
Definition ABI.h:24
@ Ctor_Base
Base object ctor.
Definition ABI.h:26
@ Ctor_Complete
Complete object ctor.
Definition ABI.h:25
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
CXXDtorType
C++ destructor types.
Definition ABI.h:34
@ Dtor_Comdat
The COMDAT used for dtors.
Definition ABI.h:38
@ Dtor_Unified
GCC-style unified dtor.
Definition ABI.h:39
@ Dtor_Base
Base object dtor.
Definition ABI.h:37
@ Dtor_Complete
Complete object dtor.
Definition ABI.h:36
@ Dtor_Deleting
Deleting dtor.
Definition ABI.h:35
U cast(CodeGen::Address addr)
Definition Address.h:327
static bool vtableInitialization()
static bool constructABIArgDirectExtend()
static bool dtorCleanups()
static bool runCleanupsScope()
static bool emitTypeCheck()
static bool generateDebugInfo()
static bool incrementProfileCounter()
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
mlir::Block * getCleanupBlock(mlir::OpBuilder &builder)
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647