Thanks to visit codestin.com
Credit goes to clang.llvm.org

clang 22.0.0git
CIRGenExprScalar.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Emit Expr nodes with scalar CIR types as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
14#include "CIRGenValue.h"
15
16#include "clang/AST/Expr.h"
19
20#include "mlir/IR/Location.h"
21#include "mlir/IR/Value.h"
22
23#include <cassert>
24#include <utility>
25
26using namespace clang;
27using namespace clang::CIRGen;
28
29namespace {
30
31struct BinOpInfo {
32 mlir::Value lhs;
33 mlir::Value rhs;
34 SourceRange loc;
35 QualType fullType; // Type of operands and result
36 QualType compType; // Type used for computations. Element type
37 // for vectors, otherwise same as FullType.
38 BinaryOperator::Opcode opcode; // Opcode of BinOp to perform
39 FPOptions fpfeatures;
40 const Expr *e; // Entire expr, for error unsupported. May not be binop.
41
42 /// Check if the binop computes a division or a remainder.
43 bool isDivRemOp() const {
44 return opcode == BO_Div || opcode == BO_Rem || opcode == BO_DivAssign ||
45 opcode == BO_RemAssign;
46 }
47
48 /// Check if the binop can result in integer overflow.
49 bool mayHaveIntegerOverflow() const {
50 // Without constant input, we can't rule out overflow.
51 auto lhsci = lhs.getDefiningOp<cir::ConstantOp>();
52 auto rhsci = rhs.getDefiningOp<cir::ConstantOp>();
53 if (!lhsci || !rhsci)
54 return true;
55
57 // TODO(cir): For now we just assume that we might overflow
58 return true;
59 }
60
61 /// Check if at least one operand is a fixed point type. In such cases,
62 /// this operation did not follow usual arithmetic conversion and both
63 /// operands might not be of the same type.
64 bool isFixedPointOp() const {
65 // We cannot simply check the result type since comparison operations
66 // return an int.
67 if (const auto *binOp = llvm::dyn_cast<BinaryOperator>(e)) {
68 QualType lhstype = binOp->getLHS()->getType();
69 QualType rhstype = binOp->getRHS()->getType();
70 return lhstype->isFixedPointType() || rhstype->isFixedPointType();
71 }
72 if (const auto *unop = llvm::dyn_cast<UnaryOperator>(e))
73 return unop->getSubExpr()->getType()->isFixedPointType();
74 return false;
75 }
76};
77
78class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
79 CIRGenFunction &cgf;
80 CIRGenBuilderTy &builder;
81 bool ignoreResultAssign;
82
83public:
84 ScalarExprEmitter(CIRGenFunction &cgf, CIRGenBuilderTy &builder)
85 : cgf(cgf), builder(builder) {}
86
87 //===--------------------------------------------------------------------===//
88 // Utilities
89 //===--------------------------------------------------------------------===//
90
91 mlir::Value emitComplexToScalarConversion(mlir::Location loc,
92 mlir::Value value, CastKind kind,
93 QualType destTy);
94
95 mlir::Value emitNullValue(QualType ty, mlir::Location loc) {
96 return cgf.cgm.emitNullConstant(ty, loc);
97 }
98
99 mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType) {
100 return builder.createFloatingCast(result, cgf.convertType(promotionType));
101 }
102
103 mlir::Value emitUnPromotedValue(mlir::Value result, QualType exprType) {
104 return builder.createFloatingCast(result, cgf.convertType(exprType));
105 }
106
107 mlir::Value emitPromoted(const Expr *e, QualType promotionType);
108
109 mlir::Value maybePromoteBoolResult(mlir::Value value,
110 mlir::Type dstTy) const {
111 if (mlir::isa<cir::IntType>(dstTy))
112 return builder.createBoolToInt(value, dstTy);
113 if (mlir::isa<cir::BoolType>(dstTy))
114 return value;
115 llvm_unreachable("Can only promote integer or boolean types");
116 }
117
118 //===--------------------------------------------------------------------===//
119 // Visitor Methods
120 //===--------------------------------------------------------------------===//
121
122 mlir::Value Visit(Expr *e) {
123 return StmtVisitor<ScalarExprEmitter, mlir::Value>::Visit(e);
124 }
125
126 mlir::Value VisitStmt(Stmt *s) {
127 llvm_unreachable("Statement passed to ScalarExprEmitter");
128 }
129
130 mlir::Value VisitExpr(Expr *e) {
131 cgf.getCIRGenModule().errorNYI(
132 e->getSourceRange(), "scalar expression kind: ", e->getStmtClassName());
133 return {};
134 }
135
136 mlir::Value VisitPackIndexingExpr(PackIndexingExpr *e) {
137 return Visit(e->getSelectedExpr());
138 }
139
140 mlir::Value VisitParenExpr(ParenExpr *pe) { return Visit(pe->getSubExpr()); }
141
142 mlir::Value VisitGenericSelectionExpr(GenericSelectionExpr *ge) {
143 return Visit(ge->getResultExpr());
144 }
145
146 /// Emits the address of the l-value, then loads and returns the result.
147 mlir::Value emitLoadOfLValue(const Expr *e) {
148 LValue lv = cgf.emitLValue(e);
149 // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V);
150 return cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
151 }
152
153 mlir::Value emitLoadOfLValue(LValue lv, SourceLocation loc) {
154 return cgf.emitLoadOfLValue(lv, loc).getValue();
155 }
156
157 // l-values
158 mlir::Value VisitDeclRefExpr(DeclRefExpr *e) {
159 if (CIRGenFunction::ConstantEmission constant = cgf.tryEmitAsConstant(e))
160 return cgf.emitScalarConstant(constant, e);
161
162 return emitLoadOfLValue(e);
163 }
164
165 mlir::Value VisitIntegerLiteral(const IntegerLiteral *e) {
166 mlir::Type type = cgf.convertType(e->getType());
167 return builder.create<cir::ConstantOp>(
168 cgf.getLoc(e->getExprLoc()), cir::IntAttr::get(type, e->getValue()));
169 }
170
171 mlir::Value VisitFloatingLiteral(const FloatingLiteral *e) {
172 mlir::Type type = cgf.convertType(e->getType());
173 assert(mlir::isa<cir::FPTypeInterface>(type) &&
174 "expect floating-point type");
175 return builder.create<cir::ConstantOp>(
176 cgf.getLoc(e->getExprLoc()), cir::FPAttr::get(type, e->getValue()));
177 }
178
179 mlir::Value VisitCharacterLiteral(const CharacterLiteral *e) {
180 mlir::Type ty = cgf.convertType(e->getType());
181 auto init = cir::IntAttr::get(ty, e->getValue());
182 return builder.create<cir::ConstantOp>(cgf.getLoc(e->getExprLoc()), init);
183 }
184
185 mlir::Value VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *e) {
186 return builder.getBool(e->getValue(), cgf.getLoc(e->getExprLoc()));
187 }
188
189 mlir::Value VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *e) {
190 if (e->getType()->isVoidType())
191 return {};
192
193 return emitNullValue(e->getType(), cgf.getLoc(e->getSourceRange()));
194 }
195
196 mlir::Value VisitOpaqueValueExpr(OpaqueValueExpr *e) {
197 if (e->isGLValue())
198 return emitLoadOfLValue(cgf.getOrCreateOpaqueLValueMapping(e),
199 e->getExprLoc());
200
201 // Otherwise, assume the mapping is the scalar directly.
202 return cgf.getOrCreateOpaqueRValueMapping(e).getValue();
203 }
204
205 mlir::Value VisitCastExpr(CastExpr *e);
206 mlir::Value VisitCallExpr(const CallExpr *e);
207
208 mlir::Value VisitStmtExpr(StmtExpr *e) {
209 CIRGenFunction::StmtExprEvaluation eval(cgf);
210 if (e->getType()->isVoidType()) {
211 (void)cgf.emitCompoundStmt(*e->getSubStmt());
212 return {};
213 }
214
215 Address retAlloca =
216 cgf.createMemTemp(e->getType(), cgf.getLoc(e->getSourceRange()));
217 (void)cgf.emitCompoundStmt(*e->getSubStmt(), &retAlloca);
218
219 return cgf.emitLoadOfScalar(cgf.makeAddrLValue(retAlloca, e->getType()),
220 e->getExprLoc());
221 }
222
223 mlir::Value VisitArraySubscriptExpr(ArraySubscriptExpr *e) {
224 if (e->getBase()->getType()->isVectorType()) {
226
227 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
228 const mlir::Value vecValue = Visit(e->getBase());
229 const mlir::Value indexValue = Visit(e->getIdx());
230 return cgf.builder.create<cir::VecExtractOp>(loc, vecValue, indexValue);
231 }
232 // Just load the lvalue formed by the subscript expression.
233 return emitLoadOfLValue(e);
234 }
235
236 mlir::Value VisitShuffleVectorExpr(ShuffleVectorExpr *e) {
237 if (e->getNumSubExprs() == 2) {
238 // The undocumented form of __builtin_shufflevector.
239 mlir::Value inputVec = Visit(e->getExpr(0));
240 mlir::Value indexVec = Visit(e->getExpr(1));
241 return cgf.builder.create<cir::VecShuffleDynamicOp>(
242 cgf.getLoc(e->getSourceRange()), inputVec, indexVec);
243 }
244
245 mlir::Value vec1 = Visit(e->getExpr(0));
246 mlir::Value vec2 = Visit(e->getExpr(1));
247
248 // The documented form of __builtin_shufflevector, where the indices are
249 // a variable number of integer constants. The constants will be stored
250 // in an ArrayAttr.
251 SmallVector<mlir::Attribute, 8> indices;
252 for (unsigned i = 2; i < e->getNumSubExprs(); ++i) {
253 indices.push_back(
254 cir::IntAttr::get(cgf.builder.getSInt64Ty(),
255 e->getExpr(i)
256 ->EvaluateKnownConstInt(cgf.getContext())
257 .getSExtValue()));
258 }
259
260 return cgf.builder.create<cir::VecShuffleOp>(
261 cgf.getLoc(e->getSourceRange()), cgf.convertType(e->getType()), vec1,
262 vec2, cgf.builder.getArrayAttr(indices));
263 }
264
265 mlir::Value VisitConvertVectorExpr(ConvertVectorExpr *e) {
266 // __builtin_convertvector is an element-wise cast, and is implemented as a
267 // regular cast. The back end handles casts of vectors correctly.
268 return emitScalarConversion(Visit(e->getSrcExpr()),
269 e->getSrcExpr()->getType(), e->getType(),
270 e->getSourceRange().getBegin());
271 }
272
273 mlir::Value VisitMemberExpr(MemberExpr *e);
274
275 mlir::Value VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
276 return emitLoadOfLValue(e);
277 }
278
279 mlir::Value VisitInitListExpr(InitListExpr *e);
280
281 mlir::Value VisitExplicitCastExpr(ExplicitCastExpr *e) {
282 return VisitCastExpr(e);
283 }
284
285 mlir::Value VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *e) {
286 return cgf.cgm.emitNullConstant(e->getType(),
287 cgf.getLoc(e->getSourceRange()));
288 }
289
290 /// Perform a pointer to boolean conversion.
291 mlir::Value emitPointerToBoolConversion(mlir::Value v, QualType qt) {
292 // TODO(cir): comparing the ptr to null is done when lowering CIR to LLVM.
293 // We might want to have a separate pass for these types of conversions.
294 return cgf.getBuilder().createPtrToBoolCast(v);
295 }
296
297 mlir::Value emitFloatToBoolConversion(mlir::Value src, mlir::Location loc) {
298 cir::BoolType boolTy = builder.getBoolTy();
299 return builder.create<cir::CastOp>(loc, boolTy,
300 cir::CastKind::float_to_bool, src);
301 }
302
303 mlir::Value emitIntToBoolConversion(mlir::Value srcVal, mlir::Location loc) {
304 // Because of the type rules of C, we often end up computing a
305 // logical value, then zero extending it to int, then wanting it
306 // as a logical value again.
307 // TODO: optimize this common case here or leave it for later
308 // CIR passes?
309 cir::BoolType boolTy = builder.getBoolTy();
310 return builder.create<cir::CastOp>(loc, boolTy, cir::CastKind::int_to_bool,
311 srcVal);
312 }
313
314 /// Convert the specified expression value to a boolean (!cir.bool) truth
315 /// value. This is equivalent to "Val != 0".
316 mlir::Value emitConversionToBool(mlir::Value src, QualType srcType,
317 mlir::Location loc) {
318 assert(srcType.isCanonical() && "EmitScalarConversion strips typedefs");
319
320 if (srcType->isRealFloatingType())
321 return emitFloatToBoolConversion(src, loc);
322
323 if (llvm::isa<MemberPointerType>(srcType)) {
324 cgf.getCIRGenModule().errorNYI(loc, "member pointer to bool conversion");
325 return builder.getFalse(loc);
326 }
327
328 if (srcType->isIntegerType())
329 return emitIntToBoolConversion(src, loc);
330
331 assert(::mlir::isa<cir::PointerType>(src.getType()));
332 return emitPointerToBoolConversion(src, srcType);
333 }
334
335 // Emit a conversion from the specified type to the specified destination
336 // type, both of which are CIR scalar types.
337 struct ScalarConversionOpts {
338 bool treatBooleanAsSigned;
339 bool emitImplicitIntegerTruncationChecks;
340 bool emitImplicitIntegerSignChangeChecks;
341
342 ScalarConversionOpts()
343 : treatBooleanAsSigned(false),
344 emitImplicitIntegerTruncationChecks(false),
345 emitImplicitIntegerSignChangeChecks(false) {}
346
347 ScalarConversionOpts(clang::SanitizerSet sanOpts)
348 : treatBooleanAsSigned(false),
349 emitImplicitIntegerTruncationChecks(
350 sanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
351 emitImplicitIntegerSignChangeChecks(
352 sanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
353 };
354
355 // Conversion from bool, integral, or floating-point to integral or
356 // floating-point. Conversions involving other types are handled elsewhere.
357 // Conversion to bool is handled elsewhere because that's a comparison against
358 // zero, not a simple cast. This handles both individual scalars and vectors.
359 mlir::Value emitScalarCast(mlir::Value src, QualType srcType,
360 QualType dstType, mlir::Type srcTy,
361 mlir::Type dstTy, ScalarConversionOpts opts) {
362 assert(!srcType->isMatrixType() && !dstType->isMatrixType() &&
363 "Internal error: matrix types not handled by this function.");
364 assert(!(mlir::isa<mlir::IntegerType>(srcTy) ||
365 mlir::isa<mlir::IntegerType>(dstTy)) &&
366 "Obsolete code. Don't use mlir::IntegerType with CIR.");
367
368 mlir::Type fullDstTy = dstTy;
369 if (mlir::isa<cir::VectorType>(srcTy) &&
370 mlir::isa<cir::VectorType>(dstTy)) {
371 // Use the element types of the vectors to figure out the CastKind.
372 srcTy = mlir::dyn_cast<cir::VectorType>(srcTy).getElementType();
373 dstTy = mlir::dyn_cast<cir::VectorType>(dstTy).getElementType();
374 }
375
376 std::optional<cir::CastKind> castKind;
377
378 if (mlir::isa<cir::BoolType>(srcTy)) {
379 if (opts.treatBooleanAsSigned)
380 cgf.getCIRGenModule().errorNYI("signed bool");
381 if (cgf.getBuilder().isInt(dstTy))
382 castKind = cir::CastKind::bool_to_int;
383 else if (mlir::isa<cir::FPTypeInterface>(dstTy))
384 castKind = cir::CastKind::bool_to_float;
385 else
386 llvm_unreachable("Internal error: Cast to unexpected type");
387 } else if (cgf.getBuilder().isInt(srcTy)) {
388 if (cgf.getBuilder().isInt(dstTy))
389 castKind = cir::CastKind::integral;
390 else if (mlir::isa<cir::FPTypeInterface>(dstTy))
391 castKind = cir::CastKind::int_to_float;
392 else
393 llvm_unreachable("Internal error: Cast to unexpected type");
394 } else if (mlir::isa<cir::FPTypeInterface>(srcTy)) {
395 if (cgf.getBuilder().isInt(dstTy)) {
396 // If we can't recognize overflow as undefined behavior, assume that
397 // overflow saturates. This protects against normal optimizations if we
398 // are compiling with non-standard FP semantics.
399 if (!cgf.cgm.getCodeGenOpts().StrictFloatCastOverflow)
400 cgf.getCIRGenModule().errorNYI("strict float cast overflow");
402 castKind = cir::CastKind::float_to_int;
403 } else if (mlir::isa<cir::FPTypeInterface>(dstTy)) {
404 // TODO: split this to createFPExt/createFPTrunc
405 return builder.createFloatingCast(src, fullDstTy);
406 } else {
407 llvm_unreachable("Internal error: Cast to unexpected type");
408 }
409 } else {
410 llvm_unreachable("Internal error: Cast from unexpected type");
411 }
412
413 assert(castKind.has_value() && "Internal error: CastKind not set.");
414 return builder.create<cir::CastOp>(src.getLoc(), fullDstTy, *castKind, src);
415 }
416
417 mlir::Value
418 VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *e) {
419 return Visit(e->getReplacement());
420 }
421
422 mlir::Value VisitVAArgExpr(VAArgExpr *ve) {
423 QualType ty = ve->getType();
424
425 if (ty->isVariablyModifiedType()) {
426 cgf.cgm.errorNYI(ve->getSourceRange(),
427 "variably modified types in varargs");
428 }
429
430 return cgf.emitVAArg(ve);
431 }
432
433 mlir::Value VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *e);
434 mlir::Value
435 VisitAbstractConditionalOperator(const AbstractConditionalOperator *e);
436
437 // Unary Operators.
438 mlir::Value VisitUnaryPostDec(const UnaryOperator *e) {
439 LValue lv = cgf.emitLValue(e->getSubExpr());
440 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Dec, false);
441 }
442 mlir::Value VisitUnaryPostInc(const UnaryOperator *e) {
443 LValue lv = cgf.emitLValue(e->getSubExpr());
444 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Inc, false);
445 }
446 mlir::Value VisitUnaryPreDec(const UnaryOperator *e) {
447 LValue lv = cgf.emitLValue(e->getSubExpr());
448 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Dec, true);
449 }
450 mlir::Value VisitUnaryPreInc(const UnaryOperator *e) {
451 LValue lv = cgf.emitLValue(e->getSubExpr());
452 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Inc, true);
453 }
454 mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv,
455 cir::UnaryOpKind kind, bool isPre) {
456 if (cgf.getLangOpts().OpenMP)
457 cgf.cgm.errorNYI(e->getSourceRange(), "inc/dec OpenMP");
458
459 QualType type = e->getSubExpr()->getType();
460
461 mlir::Value value;
462 mlir::Value input;
463
464 if (type->getAs<AtomicType>()) {
465 cgf.cgm.errorNYI(e->getSourceRange(), "Atomic inc/dec");
466 // TODO(cir): This is not correct, but it will produce reasonable code
467 // until atomic operations are implemented.
468 value = cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
469 input = value;
470 } else {
471 value = cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
472 input = value;
473 }
474
475 // NOTE: When possible, more frequent cases are handled first.
476
477 // Special case of integer increment that we have to check first: bool++.
478 // Due to promotion rules, we get:
479 // bool++ -> bool = bool + 1
480 // -> bool = (int)bool + 1
481 // -> bool = ((int)bool + 1 != 0)
482 // An interesting aspect of this is that increment is always true.
483 // Decrement does not have this property.
484 if (kind == cir::UnaryOpKind::Inc && type->isBooleanType()) {
485 value = builder.getTrue(cgf.getLoc(e->getExprLoc()));
486 } else if (type->isIntegerType()) {
487 QualType promotedType;
488 [[maybe_unused]] bool canPerformLossyDemotionCheck = false;
489 if (cgf.getContext().isPromotableIntegerType(type)) {
490 promotedType = cgf.getContext().getPromotedIntegerType(type);
491 assert(promotedType != type && "Shouldn't promote to the same type.");
492 canPerformLossyDemotionCheck = true;
493 canPerformLossyDemotionCheck &=
494 cgf.getContext().getCanonicalType(type) !=
495 cgf.getContext().getCanonicalType(promotedType);
496 canPerformLossyDemotionCheck &=
497 type->isIntegerType() && promotedType->isIntegerType();
498
499 // TODO(cir): Currently, we store bitwidths in CIR types only for
500 // integers. This might also be required for other types.
501
502 assert(
503 (!canPerformLossyDemotionCheck ||
504 type->isSignedIntegerOrEnumerationType() ||
505 promotedType->isSignedIntegerOrEnumerationType() ||
506 mlir::cast<cir::IntType>(cgf.convertType(type)).getWidth() ==
507 mlir::cast<cir::IntType>(cgf.convertType(type)).getWidth()) &&
508 "The following check expects that if we do promotion to different "
509 "underlying canonical type, at least one of the types (either "
510 "base or promoted) will be signed, or the bitwidths will match.");
511 }
512
514 if (e->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
515 value = emitIncDecConsiderOverflowBehavior(e, value, kind);
516 } else {
517 cir::UnaryOpKind kind =
518 e->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec;
519 // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
520 value = emitUnaryOp(e, kind, input, /*nsw=*/false);
521 }
522 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
523 QualType type = ptr->getPointeeType();
524 if (cgf.getContext().getAsVariableArrayType(type)) {
525 // VLA types don't have constant size.
526 cgf.cgm.errorNYI(e->getSourceRange(), "Pointer arithmetic on VLA");
527 return {};
528 } else if (type->isFunctionType()) {
529 // Arithmetic on function pointers (!) is just +-1.
530 cgf.cgm.errorNYI(e->getSourceRange(),
531 "Pointer arithmetic on function pointer");
532 return {};
533 } else {
534 // For everything else, we can just do a simple increment.
535 mlir::Location loc = cgf.getLoc(e->getSourceRange());
536 CIRGenBuilderTy &builder = cgf.getBuilder();
537 int amount = kind == cir::UnaryOpKind::Inc ? 1 : -1;
538 mlir::Value amt = builder.getSInt32(amount, loc);
540 value = builder.createPtrStride(loc, value, amt);
541 }
542 } else if (type->isVectorType()) {
543 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec vector");
544 return {};
545 } else if (type->isRealFloatingType()) {
547
548 if (type->isHalfType() &&
549 !cgf.getContext().getLangOpts().NativeHalfType) {
550 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec half");
551 return {};
552 }
553
554 if (mlir::isa<cir::SingleType, cir::DoubleType>(value.getType())) {
555 // Create the inc/dec operation.
556 // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
557 assert(kind == cir::UnaryOpKind::Inc ||
558 kind == cir::UnaryOpKind::Dec && "Invalid UnaryOp kind");
559 value = emitUnaryOp(e, kind, value);
560 } else {
561 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec other fp type");
562 return {};
563 }
564 } else if (type->isFixedPointType()) {
565 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec other fixed point");
566 return {};
567 } else {
568 assert(type->castAs<ObjCObjectPointerType>());
569 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec ObjectiveC pointer");
570 return {};
571 }
572
573 CIRGenFunction::SourceLocRAIIObject sourceloc{
574 cgf, cgf.getLoc(e->getSourceRange())};
575
576 // Store the updated result through the lvalue
577 if (lv.isBitField())
578 return cgf.emitStoreThroughBitfieldLValue(RValue::get(value), lv);
579 else
580 cgf.emitStoreThroughLValue(RValue::get(value), lv);
581
582 // If this is a postinc, return the value read from memory, otherwise use
583 // the updated value.
584 return isPre ? value : input;
585 }
586
587 mlir::Value emitIncDecConsiderOverflowBehavior(const UnaryOperator *e,
588 mlir::Value inVal,
589 cir::UnaryOpKind kind) {
590 assert(kind == cir::UnaryOpKind::Inc ||
591 kind == cir::UnaryOpKind::Dec && "Invalid UnaryOp kind");
592 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
593 case LangOptions::SOB_Defined:
594 return emitUnaryOp(e, kind, inVal, /*nsw=*/false);
595 case LangOptions::SOB_Undefined:
597 return emitUnaryOp(e, kind, inVal, /*nsw=*/true);
598 case LangOptions::SOB_Trapping:
599 if (!e->canOverflow())
600 return emitUnaryOp(e, kind, inVal, /*nsw=*/true);
601 cgf.cgm.errorNYI(e->getSourceRange(), "inc/def overflow SOB_Trapping");
602 return {};
603 }
604 llvm_unreachable("Unexpected signed overflow behavior kind");
605 }
606
607 mlir::Value VisitUnaryAddrOf(const UnaryOperator *e) {
608 if (llvm::isa<MemberPointerType>(e->getType())) {
609 cgf.cgm.errorNYI(e->getSourceRange(), "Address of member pointer");
610 return builder.getNullPtr(cgf.convertType(e->getType()),
611 cgf.getLoc(e->getExprLoc()));
612 }
613
614 return cgf.emitLValue(e->getSubExpr()).getPointer();
615 }
616
617 mlir::Value VisitUnaryDeref(const UnaryOperator *e) {
618 if (e->getType()->isVoidType())
619 return Visit(e->getSubExpr()); // the actual value should be unused
620 return emitLoadOfLValue(e);
621 }
622
623 mlir::Value VisitUnaryPlus(const UnaryOperator *e) {
624 QualType promotionType = getPromotionType(e->getSubExpr()->getType());
625 mlir::Value result =
626 emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Plus, promotionType);
627 if (result && !promotionType.isNull())
628 return emitUnPromotedValue(result, e->getType());
629 return result;
630 }
631
632 mlir::Value VisitUnaryMinus(const UnaryOperator *e) {
633 QualType promotionType = getPromotionType(e->getSubExpr()->getType());
634 mlir::Value result =
635 emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Minus, promotionType);
636 if (result && !promotionType.isNull())
637 return emitUnPromotedValue(result, e->getType());
638 return result;
639 }
640
641 mlir::Value emitUnaryPlusOrMinus(const UnaryOperator *e,
642 cir::UnaryOpKind kind,
643 QualType promotionType) {
644 ignoreResultAssign = false;
645 mlir::Value operand;
646 if (!promotionType.isNull())
647 operand = cgf.emitPromotedScalarExpr(e->getSubExpr(), promotionType);
648 else
649 operand = Visit(e->getSubExpr());
650
651 bool nsw =
652 kind == cir::UnaryOpKind::Minus && e->getType()->isSignedIntegerType();
653
654 // NOTE: LLVM codegen will lower this directly to either a FNeg
655 // or a Sub instruction. In CIR this will be handled later in LowerToLLVM.
656 return emitUnaryOp(e, kind, operand, nsw);
657 }
658
659 mlir::Value emitUnaryOp(const UnaryOperator *e, cir::UnaryOpKind kind,
660 mlir::Value input, bool nsw = false) {
661 return builder.create<cir::UnaryOp>(
662 cgf.getLoc(e->getSourceRange().getBegin()), input.getType(), kind,
663 input, nsw);
664 }
665
666 mlir::Value VisitUnaryNot(const UnaryOperator *e) {
667 ignoreResultAssign = false;
668 mlir::Value op = Visit(e->getSubExpr());
669 return emitUnaryOp(e, cir::UnaryOpKind::Not, op);
670 }
671
672 mlir::Value VisitUnaryLNot(const UnaryOperator *e);
673
674 mlir::Value VisitUnaryReal(const UnaryOperator *e);
675 mlir::Value VisitUnaryImag(const UnaryOperator *e);
676 mlir::Value VisitRealImag(const UnaryOperator *e,
677 QualType promotionType = QualType());
678
679 mlir::Value VisitUnaryExtension(const UnaryOperator *e) {
680 return Visit(e->getSubExpr());
681 }
682
683 mlir::Value VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die) {
684 CIRGenFunction::CXXDefaultInitExprScope scope(cgf, die);
685 return Visit(die->getExpr());
686 }
687
688 mlir::Value VisitCXXThisExpr(CXXThisExpr *te) { return cgf.loadCXXThis(); }
689
690 mlir::Value VisitExprWithCleanups(ExprWithCleanups *e);
691 mlir::Value VisitCXXNewExpr(const CXXNewExpr *e) {
692 return cgf.emitCXXNewExpr(e);
693 }
694 mlir::Value VisitCXXDeleteExpr(const CXXDeleteExpr *e) {
695 cgf.emitCXXDeleteExpr(e);
696 return {};
697 }
698
699 mlir::Value VisitCXXThrowExpr(const CXXThrowExpr *e) {
700 cgf.emitCXXThrowExpr(e);
701 return {};
702 }
703
704 /// Emit a conversion from the specified type to the specified destination
705 /// type, both of which are CIR scalar types.
706 /// TODO: do we need ScalarConversionOpts here? Should be done in another
707 /// pass.
708 mlir::Value
709 emitScalarConversion(mlir::Value src, QualType srcType, QualType dstType,
710 SourceLocation loc,
711 ScalarConversionOpts opts = ScalarConversionOpts()) {
712 // All conversions involving fixed point types should be handled by the
713 // emitFixedPoint family functions. This is done to prevent bloating up
714 // this function more, and although fixed point numbers are represented by
715 // integers, we do not want to follow any logic that assumes they should be
716 // treated as integers.
717 // TODO(leonardchan): When necessary, add another if statement checking for
718 // conversions to fixed point types from other types.
719 // conversions to fixed point types from other types.
720 if (srcType->isFixedPointType() || dstType->isFixedPointType()) {
721 cgf.getCIRGenModule().errorNYI(loc, "fixed point conversions");
722 return {};
723 }
724
725 srcType = srcType.getCanonicalType();
726 dstType = dstType.getCanonicalType();
727 if (srcType == dstType) {
728 if (opts.emitImplicitIntegerSignChangeChecks)
729 cgf.getCIRGenModule().errorNYI(loc,
730 "implicit integer sign change checks");
731 return src;
732 }
733
734 if (dstType->isVoidType())
735 return {};
736
737 mlir::Type mlirSrcType = src.getType();
738
739 // Handle conversions to bool first, they are special: comparisons against
740 // 0.
741 if (dstType->isBooleanType())
742 return emitConversionToBool(src, srcType, cgf.getLoc(loc));
743
744 mlir::Type mlirDstType = cgf.convertType(dstType);
745
746 if (srcType->isHalfType() &&
747 !cgf.getContext().getLangOpts().NativeHalfType) {
748 // Cast to FP using the intrinsic if the half type itself isn't supported.
749 if (mlir::isa<cir::FPTypeInterface>(mlirDstType)) {
750 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics())
751 cgf.getCIRGenModule().errorNYI(loc,
752 "cast via llvm.convert.from.fp16");
753 } else {
754 // Cast to other types through float, using either the intrinsic or
755 // FPExt, depending on whether the half type itself is supported (as
756 // opposed to operations on half, available with NativeHalfType).
757 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics())
758 cgf.getCIRGenModule().errorNYI(loc,
759 "cast via llvm.convert.from.fp16");
760 // FIXME(cir): For now lets pretend we shouldn't use the conversion
761 // intrinsics and insert a cast here unconditionally.
762 src = builder.createCast(cgf.getLoc(loc), cir::CastKind::floating, src,
763 cgf.FloatTy);
764 srcType = cgf.getContext().FloatTy;
765 mlirSrcType = cgf.FloatTy;
766 }
767 }
768
769 // TODO(cir): LLVM codegen ignore conversions like int -> uint,
770 // is there anything to be done for CIR here?
771 if (mlirSrcType == mlirDstType) {
772 if (opts.emitImplicitIntegerSignChangeChecks)
773 cgf.getCIRGenModule().errorNYI(loc,
774 "implicit integer sign change checks");
775 return src;
776 }
777
778 // Handle pointer conversions next: pointers can only be converted to/from
779 // other pointers and integers. Check for pointer types in terms of LLVM, as
780 // some native types (like Obj-C id) may map to a pointer type.
781 if (auto dstPT = dyn_cast<cir::PointerType>(mlirDstType)) {
782 cgf.getCIRGenModule().errorNYI(loc, "pointer casts");
783 return builder.getNullPtr(dstPT, src.getLoc());
784 }
785
786 if (isa<cir::PointerType>(mlirSrcType)) {
787 // Must be an ptr to int cast.
788 assert(isa<cir::IntType>(mlirDstType) && "not ptr->int?");
789 return builder.createPtrToInt(src, mlirDstType);
790 }
791
792 // A scalar can be splatted to an extended vector of the same element type
793 if (dstType->isExtVectorType() && !srcType->isVectorType()) {
794 // Sema should add casts to make sure that the source expression's type
795 // is the same as the vector's element type (sans qualifiers)
796 assert(dstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
797 srcType.getTypePtr() &&
798 "Splatted expr doesn't match with vector element type?");
799
800 cgf.getCIRGenModule().errorNYI(loc, "vector splatting");
801 return {};
802 }
803
804 if (srcType->isMatrixType() && dstType->isMatrixType()) {
805 cgf.getCIRGenModule().errorNYI(loc,
806 "matrix type to matrix type conversion");
807 return {};
808 }
809 assert(!srcType->isMatrixType() && !dstType->isMatrixType() &&
810 "Internal error: conversion between matrix type and scalar type");
811
812 // Finally, we have the arithmetic types or vectors of arithmetic types.
813 mlir::Value res = nullptr;
814 mlir::Type resTy = mlirDstType;
815
816 res = emitScalarCast(src, srcType, dstType, mlirSrcType, mlirDstType, opts);
817
818 if (mlirDstType != resTy) {
819 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
820 cgf.getCIRGenModule().errorNYI(loc, "cast via llvm.convert.to.fp16");
821 }
822 // FIXME(cir): For now we never use FP16 conversion intrinsics even if
823 // required by the target. Change that once this is implemented
824 res = builder.createCast(cgf.getLoc(loc), cir::CastKind::floating, res,
825 resTy);
826 }
827
828 if (opts.emitImplicitIntegerTruncationChecks)
829 cgf.getCIRGenModule().errorNYI(loc, "implicit integer truncation checks");
830
831 if (opts.emitImplicitIntegerSignChangeChecks)
832 cgf.getCIRGenModule().errorNYI(loc,
833 "implicit integer sign change checks");
834
835 return res;
836 }
837
838 BinOpInfo emitBinOps(const BinaryOperator *e,
839 QualType promotionType = QualType()) {
840 BinOpInfo result;
841 result.lhs = cgf.emitPromotedScalarExpr(e->getLHS(), promotionType);
842 result.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionType);
843 if (!promotionType.isNull())
844 result.fullType = promotionType;
845 else
846 result.fullType = e->getType();
847 result.compType = result.fullType;
848 if (const auto *vecType = dyn_cast_or_null<VectorType>(result.fullType)) {
849 result.compType = vecType->getElementType();
850 }
851 result.opcode = e->getOpcode();
852 result.loc = e->getSourceRange();
853 // TODO(cir): Result.FPFeatures
855 result.e = e;
856 return result;
857 }
858
859 mlir::Value emitMul(const BinOpInfo &ops);
860 mlir::Value emitDiv(const BinOpInfo &ops);
861 mlir::Value emitRem(const BinOpInfo &ops);
862 mlir::Value emitAdd(const BinOpInfo &ops);
863 mlir::Value emitSub(const BinOpInfo &ops);
864 mlir::Value emitShl(const BinOpInfo &ops);
865 mlir::Value emitShr(const BinOpInfo &ops);
866 mlir::Value emitAnd(const BinOpInfo &ops);
867 mlir::Value emitXor(const BinOpInfo &ops);
868 mlir::Value emitOr(const BinOpInfo &ops);
869
870 LValue emitCompoundAssignLValue(
871 const CompoundAssignOperator *e,
872 mlir::Value (ScalarExprEmitter::*f)(const BinOpInfo &),
873 mlir::Value &result);
874 mlir::Value
875 emitCompoundAssign(const CompoundAssignOperator *e,
876 mlir::Value (ScalarExprEmitter::*f)(const BinOpInfo &));
877
878 // TODO(cir): Candidate to be in a common AST helper between CIR and LLVM
879 // codegen.
880 QualType getPromotionType(QualType ty) {
881 const clang::ASTContext &ctx = cgf.getContext();
882 if (auto *complexTy = ty->getAs<ComplexType>()) {
883 QualType elementTy = complexTy->getElementType();
884 if (elementTy.UseExcessPrecision(ctx))
885 return ctx.getComplexType(ctx.FloatTy);
886 }
887
888 if (ty.UseExcessPrecision(cgf.getContext())) {
889 if (auto *vt = ty->getAs<VectorType>()) {
890 unsigned numElements = vt->getNumElements();
891 return ctx.getVectorType(ctx.FloatTy, numElements, vt->getVectorKind());
892 }
893 return cgf.getContext().FloatTy;
894 }
895
896 return QualType();
897 }
898
899// Binary operators and binary compound assignment operators.
900#define HANDLEBINOP(OP) \
901 mlir::Value VisitBin##OP(const BinaryOperator *e) { \
902 QualType promotionTy = getPromotionType(e->getType()); \
903 auto result = emit##OP(emitBinOps(e, promotionTy)); \
904 if (result && !promotionTy.isNull()) \
905 result = emitUnPromotedValue(result, e->getType()); \
906 return result; \
907 } \
908 mlir::Value VisitBin##OP##Assign(const CompoundAssignOperator *e) { \
909 return emitCompoundAssign(e, &ScalarExprEmitter::emit##OP); \
910 }
911
912 HANDLEBINOP(Mul)
913 HANDLEBINOP(Div)
914 HANDLEBINOP(Rem)
915 HANDLEBINOP(Add)
916 HANDLEBINOP(Sub)
917 HANDLEBINOP(Shl)
918 HANDLEBINOP(Shr)
920 HANDLEBINOP(Xor)
922#undef HANDLEBINOP
923
924 mlir::Value emitCmp(const BinaryOperator *e) {
925 const mlir::Location loc = cgf.getLoc(e->getExprLoc());
926 mlir::Value result;
927 QualType lhsTy = e->getLHS()->getType();
928 QualType rhsTy = e->getRHS()->getType();
929
930 auto clangCmpToCIRCmp =
931 [](clang::BinaryOperatorKind clangCmp) -> cir::CmpOpKind {
932 switch (clangCmp) {
933 case BO_LT:
934 return cir::CmpOpKind::lt;
935 case BO_GT:
936 return cir::CmpOpKind::gt;
937 case BO_LE:
938 return cir::CmpOpKind::le;
939 case BO_GE:
940 return cir::CmpOpKind::ge;
941 case BO_EQ:
942 return cir::CmpOpKind::eq;
943 case BO_NE:
944 return cir::CmpOpKind::ne;
945 default:
946 llvm_unreachable("unsupported comparison kind for cir.cmp");
947 }
948 };
949
950 cir::CmpOpKind kind = clangCmpToCIRCmp(e->getOpcode());
951 if (lhsTy->getAs<MemberPointerType>()) {
953 assert(e->getOpcode() == BO_EQ || e->getOpcode() == BO_NE);
954 mlir::Value lhs = cgf.emitScalarExpr(e->getLHS());
955 mlir::Value rhs = cgf.emitScalarExpr(e->getRHS());
956 result = builder.createCompare(loc, kind, lhs, rhs);
957 } else if (!lhsTy->isAnyComplexType() && !rhsTy->isAnyComplexType()) {
958 BinOpInfo boInfo = emitBinOps(e);
959 mlir::Value lhs = boInfo.lhs;
960 mlir::Value rhs = boInfo.rhs;
961
962 if (lhsTy->isVectorType()) {
963 if (!e->getType()->isVectorType()) {
964 // If AltiVec, the comparison results in a numeric type, so we use
965 // intrinsics comparing vectors and giving 0 or 1 as a result
966 cgf.cgm.errorNYI(loc, "AltiVec comparison");
967 } else {
968 // Other kinds of vectors. Element-wise comparison returning
969 // a vector.
970 result = builder.create<cir::VecCmpOp>(
971 cgf.getLoc(boInfo.loc), cgf.convertType(boInfo.fullType), kind,
972 boInfo.lhs, boInfo.rhs);
973 }
974 } else if (boInfo.isFixedPointOp()) {
976 cgf.cgm.errorNYI(loc, "fixed point comparisons");
977 result = builder.getBool(false, loc);
978 } else {
979 // integers and pointers
980 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers &&
981 mlir::isa<cir::PointerType>(lhs.getType()) &&
982 mlir::isa<cir::PointerType>(rhs.getType())) {
983 cgf.cgm.errorNYI(loc, "strict vtable pointer comparisons");
984 }
985
986 cir::CmpOpKind kind = clangCmpToCIRCmp(e->getOpcode());
987 result = builder.createCompare(loc, kind, lhs, rhs);
988 }
989 } else {
990 // Complex Comparison: can only be an equality comparison.
991 assert(e->getOpcode() == BO_EQ || e->getOpcode() == BO_NE);
992
993 BinOpInfo boInfo = emitBinOps(e);
994 result = builder.create<cir::CmpOp>(loc, kind, boInfo.lhs, boInfo.rhs);
995 }
996
997 return emitScalarConversion(result, cgf.getContext().BoolTy, e->getType(),
998 e->getExprLoc());
999 }
1000
1001// Comparisons.
1002#define VISITCOMP(CODE) \
1003 mlir::Value VisitBin##CODE(const BinaryOperator *E) { return emitCmp(E); }
1004 VISITCOMP(LT)
1005 VISITCOMP(GT)
1006 VISITCOMP(LE)
1007 VISITCOMP(GE)
1008 VISITCOMP(EQ)
1009 VISITCOMP(NE)
1010#undef VISITCOMP
1011
1012 mlir::Value VisitBinAssign(const BinaryOperator *e) {
1013 const bool ignore = std::exchange(ignoreResultAssign, false);
1014
1015 mlir::Value rhs;
1016 LValue lhs;
1017
1018 switch (e->getLHS()->getType().getObjCLifetime()) {
1024 break;
1026 // __block variables need to have the rhs evaluated first, plus this
1027 // should improve codegen just a little.
1028 rhs = Visit(e->getRHS());
1030 // TODO(cir): This needs to be emitCheckedLValue() once we support
1031 // sanitizers
1032 lhs = cgf.emitLValue(e->getLHS());
1033
1034 // Store the value into the LHS. Bit-fields are handled specially because
1035 // the result is altered by the store, i.e., [C99 6.5.16p1]
1036 // 'An assignment expression has the value of the left operand after the
1037 // assignment...'.
1038 if (lhs.isBitField()) {
1039 rhs = cgf.emitStoreThroughBitfieldLValue(RValue::get(rhs), lhs);
1040 } else {
1041 cgf.emitNullabilityCheck(lhs, rhs, e->getExprLoc());
1043 cgf, cgf.getLoc(e->getSourceRange())};
1044 cgf.emitStoreThroughLValue(RValue::get(rhs), lhs);
1045 }
1046 }
1047
1048 // If the result is clearly ignored, return now.
1049 if (ignore)
1050 return nullptr;
1051
1052 // The result of an assignment in C is the assigned r-value.
1053 if (!cgf.getLangOpts().CPlusPlus)
1054 return rhs;
1055
1056 // If the lvalue is non-volatile, return the computed value of the
1057 // assignment.
1058 if (!lhs.isVolatile())
1059 return rhs;
1060
1061 // Otherwise, reload the value.
1062 return emitLoadOfLValue(lhs, e->getExprLoc());
1063 }
1064
1065 mlir::Value VisitBinComma(const BinaryOperator *e) {
1066 cgf.emitIgnoredExpr(e->getLHS());
1067 // NOTE: We don't need to EnsureInsertPoint() like LLVM codegen.
1068 return Visit(e->getRHS());
1069 }
1070
1071 mlir::Value VisitBinLAnd(const clang::BinaryOperator *e) {
1072 if (e->getType()->isVectorType()) {
1073 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1074 auto vecTy = mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
1075 mlir::Value zeroValue = builder.getNullValue(vecTy.getElementType(), loc);
1076 SmallVector<mlir::Value, 16> elements(vecTy.getSize(), zeroValue);
1077 auto zeroVec = cir::VecCreateOp::create(builder, loc, vecTy, elements);
1078
1079 mlir::Value lhs = Visit(e->getLHS());
1080 mlir::Value rhs = Visit(e->getRHS());
1081
1082 auto cmpOpKind = cir::CmpOpKind::ne;
1083 lhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, lhs, zeroVec);
1084 rhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, rhs, zeroVec);
1085 mlir::Value vecOr = builder.createAnd(loc, lhs, rhs);
1086 return builder.createIntCast(vecOr, vecTy);
1087 }
1088
1090 mlir::Type resTy = cgf.convertType(e->getType());
1091 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1092
1093 CIRGenFunction::ConditionalEvaluation eval(cgf);
1094
1095 mlir::Value lhsCondV = cgf.evaluateExprAsBool(e->getLHS());
1096 auto resOp = builder.create<cir::TernaryOp>(
1097 loc, lhsCondV, /*trueBuilder=*/
1098 [&](mlir::OpBuilder &b, mlir::Location loc) {
1099 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1100 b.getInsertionBlock()};
1101 cgf.curLexScope->setAsTernary();
1102 b.create<cir::YieldOp>(loc, cgf.evaluateExprAsBool(e->getRHS()));
1103 },
1104 /*falseBuilder*/
1105 [&](mlir::OpBuilder &b, mlir::Location loc) {
1106 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1107 b.getInsertionBlock()};
1108 cgf.curLexScope->setAsTernary();
1109 auto res = b.create<cir::ConstantOp>(loc, builder.getFalseAttr());
1110 b.create<cir::YieldOp>(loc, res.getRes());
1111 });
1112 return maybePromoteBoolResult(resOp.getResult(), resTy);
1113 }
1114
1115 mlir::Value VisitBinLOr(const clang::BinaryOperator *e) {
1116 if (e->getType()->isVectorType()) {
1117 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1118 auto vecTy = mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
1119 mlir::Value zeroValue = builder.getNullValue(vecTy.getElementType(), loc);
1120 SmallVector<mlir::Value, 16> elements(vecTy.getSize(), zeroValue);
1121 auto zeroVec = cir::VecCreateOp::create(builder, loc, vecTy, elements);
1122
1123 mlir::Value lhs = Visit(e->getLHS());
1124 mlir::Value rhs = Visit(e->getRHS());
1125
1126 auto cmpOpKind = cir::CmpOpKind::ne;
1127 lhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, lhs, zeroVec);
1128 rhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, rhs, zeroVec);
1129 mlir::Value vecOr = builder.createOr(loc, lhs, rhs);
1130 return builder.createIntCast(vecOr, vecTy);
1131 }
1132
1134 mlir::Type resTy = cgf.convertType(e->getType());
1135 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1136
1137 CIRGenFunction::ConditionalEvaluation eval(cgf);
1138
1139 mlir::Value lhsCondV = cgf.evaluateExprAsBool(e->getLHS());
1140 auto resOp = builder.create<cir::TernaryOp>(
1141 loc, lhsCondV, /*trueBuilder=*/
1142 [&](mlir::OpBuilder &b, mlir::Location loc) {
1143 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1144 b.getInsertionBlock()};
1145 cgf.curLexScope->setAsTernary();
1146 auto res = b.create<cir::ConstantOp>(loc, builder.getTrueAttr());
1147 b.create<cir::YieldOp>(loc, res.getRes());
1148 },
1149 /*falseBuilder*/
1150 [&](mlir::OpBuilder &b, mlir::Location loc) {
1151 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1152 b.getInsertionBlock()};
1153 cgf.curLexScope->setAsTernary();
1154 b.create<cir::YieldOp>(loc, cgf.evaluateExprAsBool(e->getRHS()));
1155 });
1156
1157 return maybePromoteBoolResult(resOp.getResult(), resTy);
1158 }
1159
1160 mlir::Value VisitAtomicExpr(AtomicExpr *e) {
1161 return cgf.emitAtomicExpr(e).getValue();
1162 }
1163};
1164
1165LValue ScalarExprEmitter::emitCompoundAssignLValue(
1166 const CompoundAssignOperator *e,
1167 mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &),
1168 mlir::Value &result) {
1170 return cgf.emitScalarCompoundAssignWithComplex(e, result);
1171
1172 QualType lhsTy = e->getLHS()->getType();
1173 BinOpInfo opInfo;
1174
1175 // Emit the RHS first. __block variables need to have the rhs evaluated
1176 // first, plus this should improve codegen a little.
1177
1178 QualType promotionTypeCR = getPromotionType(e->getComputationResultType());
1179 if (promotionTypeCR.isNull())
1180 promotionTypeCR = e->getComputationResultType();
1181
1182 QualType promotionTypeLHS = getPromotionType(e->getComputationLHSType());
1183 QualType promotionTypeRHS = getPromotionType(e->getRHS()->getType());
1184
1185 if (!promotionTypeRHS.isNull())
1186 opInfo.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionTypeRHS);
1187 else
1188 opInfo.rhs = Visit(e->getRHS());
1189
1190 opInfo.fullType = promotionTypeCR;
1191 opInfo.compType = opInfo.fullType;
1192 if (const auto *vecType = dyn_cast_or_null<VectorType>(opInfo.fullType))
1193 opInfo.compType = vecType->getElementType();
1194 opInfo.opcode = e->getOpcode();
1195 opInfo.fpfeatures = e->getFPFeaturesInEffect(cgf.getLangOpts());
1196 opInfo.e = e;
1197 opInfo.loc = e->getSourceRange();
1198
1199 // Load/convert the LHS
1200 LValue lhsLV = cgf.emitLValue(e->getLHS());
1201
1202 if (lhsTy->getAs<AtomicType>()) {
1203 cgf.cgm.errorNYI(result.getLoc(), "atomic lvalue assign");
1204 return LValue();
1205 }
1206
1207 opInfo.lhs = emitLoadOfLValue(lhsLV, e->getExprLoc());
1208
1209 CIRGenFunction::SourceLocRAIIObject sourceloc{
1210 cgf, cgf.getLoc(e->getSourceRange())};
1211 SourceLocation loc = e->getExprLoc();
1212 if (!promotionTypeLHS.isNull())
1213 opInfo.lhs = emitScalarConversion(opInfo.lhs, lhsTy, promotionTypeLHS, loc);
1214 else
1215 opInfo.lhs = emitScalarConversion(opInfo.lhs, lhsTy,
1216 e->getComputationLHSType(), loc);
1217
1218 // Expand the binary operator.
1219 result = (this->*func)(opInfo);
1220
1221 // Convert the result back to the LHS type,
1222 // potentially with Implicit Conversion sanitizer check.
1223 result = emitScalarConversion(result, promotionTypeCR, lhsTy, loc,
1224 ScalarConversionOpts(cgf.sanOpts));
1225
1226 // Store the result value into the LHS lvalue. Bit-fields are handled
1227 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
1228 // 'An assignment expression has the value of the left operand after the
1229 // assignment...'.
1230 if (lhsLV.isBitField())
1231 cgf.emitStoreThroughBitfieldLValue(RValue::get(result), lhsLV);
1232 else
1233 cgf.emitStoreThroughLValue(RValue::get(result), lhsLV);
1234
1235 if (cgf.getLangOpts().OpenMP)
1236 cgf.cgm.errorNYI(e->getSourceRange(), "openmp");
1237
1238 return lhsLV;
1239}
1240
1241mlir::Value ScalarExprEmitter::emitComplexToScalarConversion(mlir::Location lov,
1242 mlir::Value value,
1243 CastKind kind,
1244 QualType destTy) {
1245 cir::CastKind castOpKind;
1246 switch (kind) {
1247 case CK_FloatingComplexToReal:
1248 castOpKind = cir::CastKind::float_complex_to_real;
1249 break;
1250 case CK_IntegralComplexToReal:
1251 castOpKind = cir::CastKind::int_complex_to_real;
1252 break;
1253 case CK_FloatingComplexToBoolean:
1254 castOpKind = cir::CastKind::float_complex_to_bool;
1255 break;
1256 case CK_IntegralComplexToBoolean:
1257 castOpKind = cir::CastKind::int_complex_to_bool;
1258 break;
1259 default:
1260 llvm_unreachable("invalid complex-to-scalar cast kind");
1261 }
1262
1263 return builder.createCast(lov, castOpKind, value, cgf.convertType(destTy));
1264}
1265
1266mlir::Value ScalarExprEmitter::emitPromoted(const Expr *e,
1267 QualType promotionType) {
1268 e = e->IgnoreParens();
1269 if (const auto *bo = dyn_cast<BinaryOperator>(e)) {
1270 switch (bo->getOpcode()) {
1271#define HANDLE_BINOP(OP) \
1272 case BO_##OP: \
1273 return emit##OP(emitBinOps(bo, promotionType));
1274 HANDLE_BINOP(Add)
1275 HANDLE_BINOP(Sub)
1276 HANDLE_BINOP(Mul)
1277 HANDLE_BINOP(Div)
1278#undef HANDLE_BINOP
1279 default:
1280 break;
1281 }
1282 } else if (const auto *uo = dyn_cast<UnaryOperator>(e)) {
1283 switch (uo->getOpcode()) {
1284 case UO_Imag:
1285 case UO_Real:
1286 return VisitRealImag(uo, promotionType);
1287 case UO_Minus:
1288 return emitUnaryPlusOrMinus(uo, cir::UnaryOpKind::Minus, promotionType);
1289 case UO_Plus:
1290 return emitUnaryPlusOrMinus(uo, cir::UnaryOpKind::Plus, promotionType);
1291 default:
1292 break;
1293 }
1294 }
1295 mlir::Value result = Visit(const_cast<Expr *>(e));
1296 if (result) {
1297 if (!promotionType.isNull())
1298 return emitPromotedValue(result, promotionType);
1299 return emitUnPromotedValue(result, e->getType());
1300 }
1301 return result;
1302}
1303
1304mlir::Value ScalarExprEmitter::emitCompoundAssign(
1305 const CompoundAssignOperator *e,
1306 mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &)) {
1307
1308 bool ignore = std::exchange(ignoreResultAssign, false);
1309 mlir::Value rhs;
1310 LValue lhs = emitCompoundAssignLValue(e, func, rhs);
1311
1312 // If the result is clearly ignored, return now.
1313 if (ignore)
1314 return {};
1315
1316 // The result of an assignment in C is the assigned r-value.
1317 if (!cgf.getLangOpts().CPlusPlus)
1318 return rhs;
1319
1320 // If the lvalue is non-volatile, return the computed value of the assignment.
1321 if (!lhs.isVolatile())
1322 return rhs;
1323
1324 // Otherwise, reload the value.
1325 return emitLoadOfLValue(lhs, e->getExprLoc());
1326}
1327
1328mlir::Value ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *e) {
1329 mlir::Location scopeLoc = cgf.getLoc(e->getSourceRange());
1330 mlir::OpBuilder &builder = cgf.builder;
1331
1332 auto scope = cir::ScopeOp::create(
1333 builder, scopeLoc,
1334 /*scopeBuilder=*/
1335 [&](mlir::OpBuilder &b, mlir::Type &yieldTy, mlir::Location loc) {
1336 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1337 builder.getInsertionBlock()};
1338 mlir::Value scopeYieldVal = Visit(e->getSubExpr());
1339 if (scopeYieldVal) {
1340 // Defend against dominance problems caused by jumps out of expression
1341 // evaluation through the shared cleanup block.
1342 lexScope.forceCleanup();
1343 cir::YieldOp::create(builder, loc, scopeYieldVal);
1344 yieldTy = scopeYieldVal.getType();
1345 }
1346 });
1347
1348 return scope.getNumResults() > 0 ? scope->getResult(0) : nullptr;
1349}
1350
1351} // namespace
1352
1353LValue
1355 ScalarExprEmitter emitter(*this, builder);
1356 mlir::Value result;
1357 switch (e->getOpcode()) {
1358#define COMPOUND_OP(Op) \
1359 case BO_##Op##Assign: \
1360 return emitter.emitCompoundAssignLValue(e, &ScalarExprEmitter::emit##Op, \
1361 result)
1362 COMPOUND_OP(Mul);
1363 COMPOUND_OP(Div);
1364 COMPOUND_OP(Rem);
1365 COMPOUND_OP(Add);
1366 COMPOUND_OP(Sub);
1367 COMPOUND_OP(Shl);
1368 COMPOUND_OP(Shr);
1370 COMPOUND_OP(Xor);
1371 COMPOUND_OP(Or);
1372#undef COMPOUND_OP
1373
1374 case BO_PtrMemD:
1375 case BO_PtrMemI:
1376 case BO_Mul:
1377 case BO_Div:
1378 case BO_Rem:
1379 case BO_Add:
1380 case BO_Sub:
1381 case BO_Shl:
1382 case BO_Shr:
1383 case BO_LT:
1384 case BO_GT:
1385 case BO_LE:
1386 case BO_GE:
1387 case BO_EQ:
1388 case BO_NE:
1389 case BO_Cmp:
1390 case BO_And:
1391 case BO_Xor:
1392 case BO_Or:
1393 case BO_LAnd:
1394 case BO_LOr:
1395 case BO_Assign:
1396 case BO_Comma:
1397 llvm_unreachable("Not valid compound assignment operators");
1398 }
1399 llvm_unreachable("Unhandled compound assignment operator");
1400}
1401
1402/// Emit the computation of the specified expression of scalar type.
1404 assert(e && hasScalarEvaluationKind(e->getType()) &&
1405 "Invalid scalar expression to emit");
1406
1407 return ScalarExprEmitter(*this, builder).Visit(const_cast<Expr *>(e));
1408}
1409
1411 QualType promotionType) {
1412 if (!promotionType.isNull())
1413 return ScalarExprEmitter(*this, builder).emitPromoted(e, promotionType);
1414 return ScalarExprEmitter(*this, builder).Visit(const_cast<Expr *>(e));
1415}
1416
1417[[maybe_unused]] static bool mustVisitNullValue(const Expr *e) {
1418 // If a null pointer expression's type is the C++0x nullptr_t and
1419 // the expression is not a simple literal, it must be evaluated
1420 // for its potential side effects.
1422 return false;
1423 return e->getType()->isNullPtrType();
1424}
1425
1426/// If \p e is a widened promoted integer, get its base (unpromoted) type.
1427static std::optional<QualType>
1428getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e) {
1429 const Expr *base = e->IgnoreImpCasts();
1430 if (e == base)
1431 return std::nullopt;
1432
1433 QualType baseTy = base->getType();
1434 if (!astContext.isPromotableIntegerType(baseTy) ||
1435 astContext.getTypeSize(baseTy) >= astContext.getTypeSize(e->getType()))
1436 return std::nullopt;
1437
1438 return baseTy;
1439}
1440
1441/// Check if \p e is a widened promoted integer.
1442[[maybe_unused]] static bool isWidenedIntegerOp(const ASTContext &astContext,
1443 const Expr *e) {
1444 return getUnwidenedIntegerType(astContext, e).has_value();
1445}
1446
1447/// Check if we can skip the overflow check for \p Op.
1448[[maybe_unused]] static bool canElideOverflowCheck(const ASTContext &astContext,
1449 const BinOpInfo &op) {
1450 assert((isa<UnaryOperator>(op.e) || isa<BinaryOperator>(op.e)) &&
1451 "Expected a unary or binary operator");
1452
1453 // If the binop has constant inputs and we can prove there is no overflow,
1454 // we can elide the overflow check.
1455 if (!op.mayHaveIntegerOverflow())
1456 return true;
1457
1458 // If a unary op has a widened operand, the op cannot overflow.
1459 if (const auto *uo = dyn_cast<UnaryOperator>(op.e))
1460 return !uo->canOverflow();
1461
1462 // We usually don't need overflow checks for binops with widened operands.
1463 // Multiplication with promoted unsigned operands is a special case.
1464 const auto *bo = cast<BinaryOperator>(op.e);
1465 std::optional<QualType> optionalLHSTy =
1466 getUnwidenedIntegerType(astContext, bo->getLHS());
1467 if (!optionalLHSTy)
1468 return false;
1469
1470 std::optional<QualType> optionalRHSTy =
1471 getUnwidenedIntegerType(astContext, bo->getRHS());
1472 if (!optionalRHSTy)
1473 return false;
1474
1475 QualType lhsTy = *optionalLHSTy;
1476 QualType rhsTy = *optionalRHSTy;
1477
1478 // This is the simple case: binops without unsigned multiplication, and with
1479 // widened operands. No overflow check is needed here.
1480 if ((op.opcode != BO_Mul && op.opcode != BO_MulAssign) ||
1481 !lhsTy->isUnsignedIntegerType() || !rhsTy->isUnsignedIntegerType())
1482 return true;
1483
1484 // For unsigned multiplication the overflow check can be elided if either one
1485 // of the unpromoted types are less than half the size of the promoted type.
1486 unsigned promotedSize = astContext.getTypeSize(op.e->getType());
1487 return (2 * astContext.getTypeSize(lhsTy)) < promotedSize ||
1488 (2 * astContext.getTypeSize(rhsTy)) < promotedSize;
1489}
1490
1491/// Emit pointer + index arithmetic.
1493 const BinOpInfo &op,
1494 bool isSubtraction) {
1495 // Must have binary (not unary) expr here. Unary pointer
1496 // increment/decrement doesn't use this path.
1498
1499 mlir::Value pointer = op.lhs;
1500 Expr *pointerOperand = expr->getLHS();
1501 mlir::Value index = op.rhs;
1502 Expr *indexOperand = expr->getRHS();
1503
1504 // In the case of subtraction, the FE has ensured that the LHS is always the
1505 // pointer. However, addition can have the pointer on either side. We will
1506 // always have a pointer operand and an integer operand, so if the LHS wasn't
1507 // a pointer, we need to swap our values.
1508 if (!isSubtraction && !mlir::isa<cir::PointerType>(pointer.getType())) {
1509 std::swap(pointer, index);
1510 std::swap(pointerOperand, indexOperand);
1511 }
1512 assert(mlir::isa<cir::PointerType>(pointer.getType()) &&
1513 "Need a pointer operand");
1514 assert(mlir::isa<cir::IntType>(index.getType()) && "Need an integer operand");
1515
1516 // Some versions of glibc and gcc use idioms (particularly in their malloc
1517 // routines) that add a pointer-sized integer (known to be a pointer value)
1518 // to a null pointer in order to cast the value back to an integer or as
1519 // part of a pointer alignment algorithm. This is undefined behavior, but
1520 // we'd like to be able to compile programs that use it.
1521 //
1522 // Normally, we'd generate a GEP with a null-pointer base here in response
1523 // to that code, but it's also UB to dereference a pointer created that
1524 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
1525 // generate a direct cast of the integer value to a pointer.
1526 //
1527 // The idiom (p = nullptr + N) is not met if any of the following are true:
1528 //
1529 // The operation is subtraction.
1530 // The index is not pointer-sized.
1531 // The pointer type is not byte-sized.
1532 //
1534 cgf.getContext(), op.opcode, expr->getLHS(), expr->getRHS()))
1535 return cgf.getBuilder().createIntToPtr(index, pointer.getType());
1536
1537 // Differently from LLVM codegen, ABI bits for index sizes is handled during
1538 // LLVM lowering.
1539
1540 // If this is subtraction, negate the index.
1541 if (isSubtraction)
1543
1545
1546 const PointerType *pointerType =
1547 pointerOperand->getType()->getAs<PointerType>();
1548 if (!pointerType) {
1549 cgf.cgm.errorNYI("Objective-C:pointer arithmetic with non-pointer type");
1550 return nullptr;
1551 }
1552
1553 QualType elementType = pointerType->getPointeeType();
1554 if (cgf.getContext().getAsVariableArrayType(elementType)) {
1555 cgf.cgm.errorNYI("variable array type");
1556 return nullptr;
1557 }
1558
1559 if (elementType->isVoidType() || elementType->isFunctionType()) {
1560 cgf.cgm.errorNYI("void* or function pointer arithmetic");
1561 return nullptr;
1562 }
1563
1565 return cgf.getBuilder().create<cir::PtrStrideOp>(
1566 cgf.getLoc(op.e->getExprLoc()), pointer.getType(), pointer, index);
1567}
1568
1569mlir::Value ScalarExprEmitter::emitMul(const BinOpInfo &ops) {
1570 const mlir::Location loc = cgf.getLoc(ops.loc);
1571 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1572 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1573 case LangOptions::SOB_Defined:
1574 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1575 return builder.createMul(loc, ops.lhs, ops.rhs);
1576 [[fallthrough]];
1577 case LangOptions::SOB_Undefined:
1578 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1579 return builder.createNSWMul(loc, ops.lhs, ops.rhs);
1580 [[fallthrough]];
1581 case LangOptions::SOB_Trapping:
1582 if (canElideOverflowCheck(cgf.getContext(), ops))
1583 return builder.createNSWMul(loc, ops.lhs, ops.rhs);
1584 cgf.cgm.errorNYI("sanitizers");
1585 }
1586 }
1587 if (ops.fullType->isConstantMatrixType()) {
1589 cgf.cgm.errorNYI("matrix types");
1590 return nullptr;
1591 }
1592 if (ops.compType->isUnsignedIntegerType() &&
1593 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1594 !canElideOverflowCheck(cgf.getContext(), ops))
1595 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1596
1597 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1599 return builder.createFMul(loc, ops.lhs, ops.rhs);
1600 }
1601
1602 if (ops.isFixedPointOp()) {
1604 cgf.cgm.errorNYI("fixed point");
1605 return nullptr;
1606 }
1607
1608 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1609 cgf.convertType(ops.fullType),
1610 cir::BinOpKind::Mul, ops.lhs, ops.rhs);
1611}
1612mlir::Value ScalarExprEmitter::emitDiv(const BinOpInfo &ops) {
1613 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1614 cgf.convertType(ops.fullType),
1615 cir::BinOpKind::Div, ops.lhs, ops.rhs);
1616}
1617mlir::Value ScalarExprEmitter::emitRem(const BinOpInfo &ops) {
1618 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1619 cgf.convertType(ops.fullType),
1620 cir::BinOpKind::Rem, ops.lhs, ops.rhs);
1621}
1622
1623mlir::Value ScalarExprEmitter::emitAdd(const BinOpInfo &ops) {
1624 if (mlir::isa<cir::PointerType>(ops.lhs.getType()) ||
1625 mlir::isa<cir::PointerType>(ops.rhs.getType()))
1626 return emitPointerArithmetic(cgf, ops, /*isSubtraction=*/false);
1627
1628 const mlir::Location loc = cgf.getLoc(ops.loc);
1629 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1630 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1631 case LangOptions::SOB_Defined:
1632 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1633 return builder.createAdd(loc, ops.lhs, ops.rhs);
1634 [[fallthrough]];
1635 case LangOptions::SOB_Undefined:
1636 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1637 return builder.createNSWAdd(loc, ops.lhs, ops.rhs);
1638 [[fallthrough]];
1639 case LangOptions::SOB_Trapping:
1640 if (canElideOverflowCheck(cgf.getContext(), ops))
1641 return builder.createNSWAdd(loc, ops.lhs, ops.rhs);
1642 cgf.cgm.errorNYI("sanitizers");
1643 }
1644 }
1645 if (ops.fullType->isConstantMatrixType()) {
1647 cgf.cgm.errorNYI("matrix types");
1648 return nullptr;
1649 }
1650
1651 if (ops.compType->isUnsignedIntegerType() &&
1652 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1653 !canElideOverflowCheck(cgf.getContext(), ops))
1654 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1655
1656 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1658 return builder.createFAdd(loc, ops.lhs, ops.rhs);
1659 }
1660
1661 if (ops.isFixedPointOp()) {
1663 cgf.cgm.errorNYI("fixed point");
1664 return {};
1665 }
1666
1667 return builder.create<cir::BinOp>(loc, cgf.convertType(ops.fullType),
1668 cir::BinOpKind::Add, ops.lhs, ops.rhs);
1669}
1670
1671mlir::Value ScalarExprEmitter::emitSub(const BinOpInfo &ops) {
1672 const mlir::Location loc = cgf.getLoc(ops.loc);
1673 // The LHS is always a pointer if either side is.
1674 if (!mlir::isa<cir::PointerType>(ops.lhs.getType())) {
1675 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1676 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1677 case LangOptions::SOB_Defined: {
1678 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1679 return builder.createSub(loc, ops.lhs, ops.rhs);
1680 [[fallthrough]];
1681 }
1682 case LangOptions::SOB_Undefined:
1683 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1684 return builder.createNSWSub(loc, ops.lhs, ops.rhs);
1685 [[fallthrough]];
1686 case LangOptions::SOB_Trapping:
1687 if (canElideOverflowCheck(cgf.getContext(), ops))
1688 return builder.createNSWSub(loc, ops.lhs, ops.rhs);
1689 cgf.cgm.errorNYI("sanitizers");
1690 }
1691 }
1692
1693 if (ops.fullType->isConstantMatrixType()) {
1695 cgf.cgm.errorNYI("matrix types");
1696 return nullptr;
1697 }
1698
1699 if (ops.compType->isUnsignedIntegerType() &&
1700 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1701 !canElideOverflowCheck(cgf.getContext(), ops))
1702 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1703
1704 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1706 return builder.createFSub(loc, ops.lhs, ops.rhs);
1707 }
1708
1709 if (ops.isFixedPointOp()) {
1711 cgf.cgm.errorNYI("fixed point");
1712 return {};
1713 }
1714
1715 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1716 cgf.convertType(ops.fullType),
1717 cir::BinOpKind::Sub, ops.lhs, ops.rhs);
1718 }
1719
1720 // If the RHS is not a pointer, then we have normal pointer
1721 // arithmetic.
1722 if (!mlir::isa<cir::PointerType>(ops.rhs.getType()))
1723 return emitPointerArithmetic(cgf, ops, /*isSubtraction=*/true);
1724
1725 // Otherwise, this is a pointer subtraction
1726
1727 // Do the raw subtraction part.
1728 //
1729 // TODO(cir): note for LLVM lowering out of this; when expanding this into
1730 // LLVM we shall take VLA's, division by element size, etc.
1731 //
1732 // See more in `EmitSub` in CGExprScalar.cpp.
1734 cgf.cgm.errorNYI("ptrdiff");
1735 return {};
1736}
1737
1738mlir::Value ScalarExprEmitter::emitShl(const BinOpInfo &ops) {
1739 // TODO: This misses out on the sanitizer check below.
1740 if (ops.isFixedPointOp()) {
1742 cgf.cgm.errorNYI("fixed point");
1743 return {};
1744 }
1745
1746 // CIR accepts shift between different types, meaning nothing special
1747 // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type:
1748 // promote or truncate the RHS to the same size as the LHS.
1749
1750 bool sanitizeSignedBase = cgf.sanOpts.has(SanitizerKind::ShiftBase) &&
1751 ops.compType->hasSignedIntegerRepresentation() &&
1753 !cgf.getLangOpts().CPlusPlus20;
1754 bool sanitizeUnsignedBase =
1755 cgf.sanOpts.has(SanitizerKind::UnsignedShiftBase) &&
1756 ops.compType->hasUnsignedIntegerRepresentation();
1757 bool sanitizeBase = sanitizeSignedBase || sanitizeUnsignedBase;
1758 bool sanitizeExponent = cgf.sanOpts.has(SanitizerKind::ShiftExponent);
1759
1760 // OpenCL 6.3j: shift values are effectively % word size of LHS.
1761 if (cgf.getLangOpts().OpenCL)
1762 cgf.cgm.errorNYI("opencl");
1763 else if ((sanitizeBase || sanitizeExponent) &&
1764 mlir::isa<cir::IntType>(ops.lhs.getType()))
1765 cgf.cgm.errorNYI("sanitizers");
1766
1767 return builder.createShiftLeft(cgf.getLoc(ops.loc), ops.lhs, ops.rhs);
1768}
1769
1770mlir::Value ScalarExprEmitter::emitShr(const BinOpInfo &ops) {
1771 // TODO: This misses out on the sanitizer check below.
1772 if (ops.isFixedPointOp()) {
1774 cgf.cgm.errorNYI("fixed point");
1775 return {};
1776 }
1777
1778 // CIR accepts shift between different types, meaning nothing special
1779 // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type:
1780 // promote or truncate the RHS to the same size as the LHS.
1781
1782 // OpenCL 6.3j: shift values are effectively % word size of LHS.
1783 if (cgf.getLangOpts().OpenCL)
1784 cgf.cgm.errorNYI("opencl");
1785 else if (cgf.sanOpts.has(SanitizerKind::ShiftExponent) &&
1786 mlir::isa<cir::IntType>(ops.lhs.getType()))
1787 cgf.cgm.errorNYI("sanitizers");
1788
1789 // Note that we don't need to distinguish unsigned treatment at this
1790 // point since it will be handled later by LLVM lowering.
1791 return builder.createShiftRight(cgf.getLoc(ops.loc), ops.lhs, ops.rhs);
1792}
1793
1794mlir::Value ScalarExprEmitter::emitAnd(const BinOpInfo &ops) {
1795 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1796 cgf.convertType(ops.fullType),
1797 cir::BinOpKind::And, ops.lhs, ops.rhs);
1798}
1799mlir::Value ScalarExprEmitter::emitXor(const BinOpInfo &ops) {
1800 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1801 cgf.convertType(ops.fullType),
1802 cir::BinOpKind::Xor, ops.lhs, ops.rhs);
1803}
1804mlir::Value ScalarExprEmitter::emitOr(const BinOpInfo &ops) {
1805 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1806 cgf.convertType(ops.fullType),
1807 cir::BinOpKind::Or, ops.lhs, ops.rhs);
1808}
1809
1810// Emit code for an explicit or implicit cast. Implicit
1811// casts have to handle a more broad range of conversions than explicit
1812// casts, as they handle things like function to ptr-to-function decay
1813// etc.
1814mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *ce) {
1815 Expr *subExpr = ce->getSubExpr();
1816 QualType destTy = ce->getType();
1817 CastKind kind = ce->getCastKind();
1818
1819 // These cases are generally not written to ignore the result of evaluating
1820 // their sub-expressions, so we clear this now.
1821 ignoreResultAssign = false;
1822
1823 switch (kind) {
1824 case clang::CK_Dependent:
1825 llvm_unreachable("dependent cast kind in CIR gen!");
1826 case clang::CK_BuiltinFnToFnPtr:
1827 llvm_unreachable("builtin functions are handled elsewhere");
1828
1829 case CK_CPointerToObjCPointerCast:
1830 case CK_BlockPointerToObjCPointerCast:
1831 case CK_AnyPointerToBlockPointerCast:
1832 case CK_BitCast: {
1833 mlir::Value src = Visit(const_cast<Expr *>(subExpr));
1834 mlir::Type dstTy = cgf.convertType(destTy);
1835
1837
1838 if (cgf.sanOpts.has(SanitizerKind::CFIUnrelatedCast))
1839 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1840 "sanitizer support");
1841
1842 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers)
1843 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1844 "strict vtable pointers");
1845
1846 // Update heapallocsite metadata when there is an explicit pointer cast.
1848
1849 // If Src is a fixed vector and Dst is a scalable vector, and both have the
1850 // same element type, use the llvm.vector.insert intrinsic to perform the
1851 // bitcast.
1853
1854 // If Src is a scalable vector and Dst is a fixed vector, and both have the
1855 // same element type, use the llvm.vector.extract intrinsic to perform the
1856 // bitcast.
1858
1859 // Perform VLAT <-> VLST bitcast through memory.
1860 // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
1861 // require the element types of the vectors to be the same, we
1862 // need to keep this around for bitcasts between VLAT <-> VLST where
1863 // the element types of the vectors are not the same, until we figure
1864 // out a better way of doing these casts.
1866
1867 return cgf.getBuilder().createBitcast(cgf.getLoc(subExpr->getSourceRange()),
1868 src, dstTy);
1869 }
1870
1871 case CK_AtomicToNonAtomic: {
1872 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1873 "CastExpr: ", ce->getCastKindName());
1874 mlir::Location loc = cgf.getLoc(subExpr->getSourceRange());
1875 return cgf.createDummyValue(loc, destTy);
1876 }
1877 case CK_NonAtomicToAtomic:
1878 case CK_UserDefinedConversion:
1879 return Visit(const_cast<Expr *>(subExpr));
1880 case CK_NoOp: {
1881 auto v = Visit(const_cast<Expr *>(subExpr));
1882 if (v) {
1883 // CK_NoOp can model a pointer qualification conversion, which can remove
1884 // an array bound and change the IR type.
1885 // FIXME: Once pointee types are removed from IR, remove this.
1886 mlir::Type t = cgf.convertType(destTy);
1887 if (t != v.getType())
1888 cgf.getCIRGenModule().errorNYI("pointer qualification conversion");
1889 }
1890 return v;
1891 }
1892
1893 case CK_ArrayToPointerDecay:
1894 return cgf.emitArrayToPointerDecay(subExpr).getPointer();
1895
1896 case CK_NullToPointer: {
1897 if (mustVisitNullValue(subExpr))
1898 cgf.emitIgnoredExpr(subExpr);
1899
1900 // Note that DestTy is used as the MLIR type instead of a custom
1901 // nullptr type.
1902 mlir::Type ty = cgf.convertType(destTy);
1903 return builder.getNullPtr(ty, cgf.getLoc(subExpr->getExprLoc()));
1904 }
1905
1906 case CK_LValueToRValue:
1907 assert(cgf.getContext().hasSameUnqualifiedType(subExpr->getType(), destTy));
1908 assert(subExpr->isGLValue() && "lvalue-to-rvalue applied to r-value!");
1909 return Visit(const_cast<Expr *>(subExpr));
1910
1911 case CK_IntegralCast: {
1912 ScalarConversionOpts opts;
1913 if (auto *ice = dyn_cast<ImplicitCastExpr>(ce)) {
1914 if (!ice->isPartOfExplicitCast())
1915 opts = ScalarConversionOpts(cgf.sanOpts);
1916 }
1917 return emitScalarConversion(Visit(subExpr), subExpr->getType(), destTy,
1918 ce->getExprLoc(), opts);
1919 }
1920
1921 case CK_FloatingComplexToReal:
1922 case CK_IntegralComplexToReal:
1923 case CK_FloatingComplexToBoolean:
1924 case CK_IntegralComplexToBoolean: {
1925 mlir::Value value = cgf.emitComplexExpr(subExpr);
1926 return emitComplexToScalarConversion(cgf.getLoc(ce->getExprLoc()), value,
1927 kind, destTy);
1928 }
1929
1930 case CK_FloatingRealToComplex:
1931 case CK_FloatingComplexCast:
1932 case CK_IntegralRealToComplex:
1933 case CK_IntegralComplexCast:
1934 case CK_IntegralComplexToFloatingComplex:
1935 case CK_FloatingComplexToIntegralComplex:
1936 llvm_unreachable("scalar cast to non-scalar value");
1937
1938 case CK_PointerToIntegral: {
1939 assert(!destTy->isBooleanType() && "bool should use PointerToBool");
1940 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers)
1941 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1942 "strict vtable pointers");
1943 return builder.createPtrToInt(Visit(subExpr), cgf.convertType(destTy));
1944 }
1945 case CK_ToVoid:
1946 cgf.emitIgnoredExpr(subExpr);
1947 return {};
1948
1949 case CK_IntegralToFloating:
1950 case CK_FloatingToIntegral:
1951 case CK_FloatingCast:
1952 case CK_FixedPointToFloating:
1953 case CK_FloatingToFixedPoint: {
1954 if (kind == CK_FixedPointToFloating || kind == CK_FloatingToFixedPoint) {
1955 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1956 "fixed point casts");
1957 return {};
1958 }
1960 return emitScalarConversion(Visit(subExpr), subExpr->getType(), destTy,
1961 ce->getExprLoc());
1962 }
1963
1964 case CK_IntegralToBoolean:
1965 return emitIntToBoolConversion(Visit(subExpr),
1966 cgf.getLoc(ce->getSourceRange()));
1967
1968 case CK_PointerToBoolean:
1969 return emitPointerToBoolConversion(Visit(subExpr), subExpr->getType());
1970 case CK_FloatingToBoolean:
1971 return emitFloatToBoolConversion(Visit(subExpr),
1972 cgf.getLoc(subExpr->getExprLoc()));
1973 case CK_MemberPointerToBoolean: {
1974 mlir::Value memPtr = Visit(subExpr);
1975 return builder.createCast(cgf.getLoc(ce->getSourceRange()),
1976 cir::CastKind::member_ptr_to_bool, memPtr,
1977 cgf.convertType(destTy));
1978 }
1979
1980 case CK_VectorSplat: {
1981 // Create a vector object and fill all elements with the same scalar value.
1982 assert(destTy->isVectorType() && "CK_VectorSplat to non-vector type");
1983 return builder.create<cir::VecSplatOp>(
1984 cgf.getLoc(subExpr->getSourceRange()), cgf.convertType(destTy),
1985 Visit(subExpr));
1986 }
1987 case CK_FunctionToPointerDecay:
1988 return cgf.emitLValue(subExpr).getPointer();
1989
1990 default:
1991 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1992 "CastExpr: ", ce->getCastKindName());
1993 }
1994 return {};
1995}
1996
1997mlir::Value ScalarExprEmitter::VisitCallExpr(const CallExpr *e) {
1999 return emitLoadOfLValue(e);
2000
2001 auto v = cgf.emitCallExpr(e).getValue();
2003 return v;
2004}
2005
2006mlir::Value ScalarExprEmitter::VisitMemberExpr(MemberExpr *e) {
2007 // TODO(cir): The classic codegen calls tryEmitAsConstant() here. Folding
2008 // constants sound like work for MLIR optimizers, but we'll keep an assertion
2009 // for now.
2011 Expr::EvalResult result;
2012 if (e->EvaluateAsInt(result, cgf.getContext(), Expr::SE_AllowSideEffects)) {
2013 cgf.cgm.errorNYI(e->getSourceRange(), "Constant interger member expr");
2014 // Fall through to emit this as a non-constant access.
2015 }
2016 return emitLoadOfLValue(e);
2017}
2018
2019mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *e) {
2020 const unsigned numInitElements = e->getNumInits();
2021
2022 if (e->hadArrayRangeDesignator()) {
2023 cgf.cgm.errorNYI(e->getSourceRange(), "ArrayRangeDesignator");
2024 return {};
2025 }
2026
2027 if (e->getType()->isVectorType()) {
2028 const auto vectorType =
2029 mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
2030
2031 SmallVector<mlir::Value, 16> elements;
2032 for (Expr *init : e->inits()) {
2033 elements.push_back(Visit(init));
2034 }
2035
2036 // Zero-initialize any remaining values.
2037 if (numInitElements < vectorType.getSize()) {
2038 const mlir::Value zeroValue = cgf.getBuilder().getNullValue(
2039 vectorType.getElementType(), cgf.getLoc(e->getSourceRange()));
2040 std::fill_n(std::back_inserter(elements),
2041 vectorType.getSize() - numInitElements, zeroValue);
2042 }
2043
2044 return cgf.getBuilder().create<cir::VecCreateOp>(
2045 cgf.getLoc(e->getSourceRange()), vectorType, elements);
2046 }
2047
2048 // C++11 value-initialization for the scalar.
2049 if (numInitElements == 0)
2050 return emitNullValue(e->getType(), cgf.getLoc(e->getExprLoc()));
2051
2052 return Visit(e->getInit(0));
2053}
2054
2055mlir::Value CIRGenFunction::emitScalarConversion(mlir::Value src,
2056 QualType srcTy, QualType dstTy,
2057 SourceLocation loc) {
2060 "Invalid scalar expression to emit");
2061 return ScalarExprEmitter(*this, builder)
2062 .emitScalarConversion(src, srcTy, dstTy, loc);
2063}
2064
2066 QualType srcTy,
2067 QualType dstTy,
2068 SourceLocation loc) {
2069 assert(srcTy->isAnyComplexType() && hasScalarEvaluationKind(dstTy) &&
2070 "Invalid complex -> scalar conversion");
2071
2072 QualType complexElemTy = srcTy->castAs<ComplexType>()->getElementType();
2073 if (dstTy->isBooleanType()) {
2074 auto kind = complexElemTy->isFloatingType()
2075 ? cir::CastKind::float_complex_to_bool
2076 : cir::CastKind::int_complex_to_bool;
2077 return builder.createCast(getLoc(loc), kind, src, convertType(dstTy));
2078 }
2079
2080 auto kind = complexElemTy->isFloatingType()
2081 ? cir::CastKind::float_complex_to_real
2082 : cir::CastKind::int_complex_to_real;
2083 mlir::Value real =
2084 builder.createCast(getLoc(loc), kind, src, convertType(complexElemTy));
2085 return emitScalarConversion(real, complexElemTy, dstTy, loc);
2086}
2087
2088mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *e) {
2089 // Perform vector logical not on comparison with zero vector.
2090 if (e->getType()->isVectorType() &&
2091 e->getType()->castAs<VectorType>()->getVectorKind() ==
2093 mlir::Value oper = Visit(e->getSubExpr());
2094 mlir::Location loc = cgf.getLoc(e->getExprLoc());
2095 auto operVecTy = mlir::cast<cir::VectorType>(oper.getType());
2096 auto exprVecTy = mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
2097 mlir::Value zeroVec = builder.getNullValue(operVecTy, loc);
2098 return cir::VecCmpOp::create(builder, loc, exprVecTy, cir::CmpOpKind::eq,
2099 oper, zeroVec);
2100 }
2101
2102 // Compare operand to zero.
2103 mlir::Value boolVal = cgf.evaluateExprAsBool(e->getSubExpr());
2104
2105 // Invert value.
2106 boolVal = builder.createNot(boolVal);
2107
2108 // ZExt result to the expr type.
2109 return maybePromoteBoolResult(boolVal, cgf.convertType(e->getType()));
2110}
2111
2112mlir::Value ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *e) {
2113 QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
2114 mlir::Value result = VisitRealImag(e, promotionTy);
2115 if (result && !promotionTy.isNull())
2116 result = emitUnPromotedValue(result, e->getType());
2117 return result;
2118}
2119
2120mlir::Value ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *e) {
2121 QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
2122 mlir::Value result = VisitRealImag(e, promotionTy);
2123 if (result && !promotionTy.isNull())
2124 result = emitUnPromotedValue(result, e->getType());
2125 return result;
2126}
2127
2128mlir::Value ScalarExprEmitter::VisitRealImag(const UnaryOperator *e,
2129 QualType promotionTy) {
2130 assert(e->getOpcode() == clang::UO_Real ||
2131 e->getOpcode() == clang::UO_Imag &&
2132 "Invalid UnaryOp kind for ComplexType Real or Imag");
2133
2134 Expr *op = e->getSubExpr();
2135 mlir::Location loc = cgf.getLoc(e->getExprLoc());
2136 if (op->getType()->isAnyComplexType()) {
2137 // If it's an l-value, load through the appropriate subobject l-value.
2138 // Note that we have to ask `e` because `op` might be an l-value that
2139 // this won't work for, e.g. an Obj-C property
2140 mlir::Value complex = cgf.emitComplexExpr(op);
2141 if (e->isGLValue() && !promotionTy.isNull()) {
2142 promotionTy = promotionTy->isAnyComplexType()
2143 ? promotionTy
2144 : cgf.getContext().getComplexType(promotionTy);
2145 complex = cgf.emitPromotedValue(complex, promotionTy);
2146 }
2147
2148 return e->getOpcode() == clang::UO_Real
2149 ? builder.createComplexReal(loc, complex)
2150 : builder.createComplexImag(loc, complex);
2151 }
2152
2153 if (e->getOpcode() == UO_Real) {
2154 mlir::Value operand = promotionTy.isNull()
2155 ? Visit(op)
2156 : cgf.emitPromotedScalarExpr(op, promotionTy);
2157 return builder.createComplexReal(loc, operand);
2158 }
2159
2160 // __imag on a scalar returns zero. Emit the subexpr to ensure side
2161 // effects are evaluated, but not the actual value.
2162 if (op->isGLValue())
2163 cgf.emitLValue(op);
2164 else if (!promotionTy.isNull())
2165 cgf.emitPromotedScalarExpr(op, promotionTy);
2166 else
2167 cgf.emitScalarExpr(op);
2168
2169 mlir::Type valueTy =
2170 cgf.convertType(promotionTy.isNull() ? e->getType() : promotionTy);
2171 return builder.getNullValue(valueTy, loc);
2172}
2173
2174/// Return the size or alignment of the type of argument of the sizeof
2175/// expression as an integer.
2176mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
2177 const UnaryExprOrTypeTraitExpr *e) {
2178 const QualType typeToSize = e->getTypeOfArgument();
2179 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
2180 if (auto kind = e->getKind();
2181 kind == UETT_SizeOf || kind == UETT_DataSizeOf) {
2182 if (cgf.getContext().getAsVariableArrayType(typeToSize)) {
2184 "sizeof operator for VariableArrayType",
2185 e->getStmtClassName());
2186 return builder.getConstant(
2187 loc, cir::IntAttr::get(cgf.cgm.UInt64Ty,
2188 llvm::APSInt(llvm::APInt(64, 1), true)));
2189 }
2190 } else if (e->getKind() == UETT_OpenMPRequiredSimdAlign) {
2192 e->getSourceRange(), "sizeof operator for OpenMpRequiredSimdAlign",
2193 e->getStmtClassName());
2194 return builder.getConstant(
2195 loc, cir::IntAttr::get(cgf.cgm.UInt64Ty,
2196 llvm::APSInt(llvm::APInt(64, 1), true)));
2197 }
2198
2199 return builder.getConstant(
2200 loc, cir::IntAttr::get(cgf.cgm.UInt64Ty,
2202}
2203
2204/// Return true if the specified expression is cheap enough and side-effect-free
2205/// enough to evaluate unconditionally instead of conditionally. This is used
2206/// to convert control flow into selects in some cases.
2207/// TODO(cir): can be shared with LLVM codegen.
2209 CIRGenFunction &cgf) {
2210 // Anything that is an integer or floating point constant is fine.
2211 return e->IgnoreParens()->isEvaluatable(cgf.getContext());
2212
2213 // Even non-volatile automatic variables can't be evaluated unconditionally.
2214 // Referencing a thread_local may cause non-trivial initialization work to
2215 // occur. If we're inside a lambda and one of the variables is from the scope
2216 // outside the lambda, that function may have returned already. Reading its
2217 // locals is a bad idea. Also, these reads may introduce races there didn't
2218 // exist in the source-level program.
2219}
2220
2221mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator(
2222 const AbstractConditionalOperator *e) {
2223 CIRGenBuilderTy &builder = cgf.getBuilder();
2224 mlir::Location loc = cgf.getLoc(e->getSourceRange());
2225 ignoreResultAssign = false;
2226
2227 // Bind the common expression if necessary.
2228 CIRGenFunction::OpaqueValueMapping binding(cgf, e);
2229
2230 Expr *condExpr = e->getCond();
2231 Expr *lhsExpr = e->getTrueExpr();
2232 Expr *rhsExpr = e->getFalseExpr();
2233
2234 // If the condition constant folds and can be elided, try to avoid emitting
2235 // the condition and the dead arm.
2236 bool condExprBool;
2237 if (cgf.constantFoldsToBool(condExpr, condExprBool)) {
2238 Expr *live = lhsExpr, *dead = rhsExpr;
2239 if (!condExprBool)
2240 std::swap(live, dead);
2241
2242 // If the dead side doesn't have labels we need, just emit the Live part.
2243 if (!cgf.containsLabel(dead)) {
2244 if (condExprBool)
2246 mlir::Value result = Visit(live);
2247
2248 // If the live part is a throw expression, it acts like it has a void
2249 // type, so evaluating it returns a null Value. However, a conditional
2250 // with non-void type must return a non-null Value.
2251 if (!result && !e->getType()->isVoidType()) {
2252 cgf.cgm.errorNYI(e->getSourceRange(),
2253 "throw expression in conditional operator");
2254 result = {};
2255 }
2256
2257 return result;
2258 }
2259 }
2260
2261 QualType condType = condExpr->getType();
2262
2263 // OpenCL: If the condition is a vector, we can treat this condition like
2264 // the select function.
2265 if ((cgf.getLangOpts().OpenCL && condType->isVectorType()) ||
2266 condType->isExtVectorType()) {
2268 cgf.cgm.errorNYI(e->getSourceRange(), "vector ternary op");
2269 }
2270
2271 if (condType->isVectorType() || condType->isSveVLSBuiltinType()) {
2272 if (!condType->isVectorType()) {
2274 cgf.cgm.errorNYI(loc, "TernaryOp for SVE vector");
2275 return {};
2276 }
2277
2278 mlir::Value condValue = Visit(condExpr);
2279 mlir::Value lhsValue = Visit(lhsExpr);
2280 mlir::Value rhsValue = Visit(rhsExpr);
2281 return builder.create<cir::VecTernaryOp>(loc, condValue, lhsValue,
2282 rhsValue);
2283 }
2284
2285 // If this is a really simple expression (like x ? 4 : 5), emit this as a
2286 // select instead of as control flow. We can only do this if it is cheap
2287 // and safe to evaluate the LHS and RHS unconditionally.
2288 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, cgf) &&
2290 bool lhsIsVoid = false;
2291 mlir::Value condV = cgf.evaluateExprAsBool(condExpr);
2293
2294 mlir::Value lhs = Visit(lhsExpr);
2295 if (!lhs) {
2296 lhs = builder.getNullValue(cgf.VoidTy, loc);
2297 lhsIsVoid = true;
2298 }
2299
2300 mlir::Value rhs = Visit(rhsExpr);
2301 if (lhsIsVoid) {
2302 assert(!rhs && "lhs and rhs types must match");
2303 rhs = builder.getNullValue(cgf.VoidTy, loc);
2304 }
2305
2306 return builder.createSelect(loc, condV, lhs, rhs);
2307 }
2308
2309 mlir::Value condV = cgf.emitOpOnBoolExpr(loc, condExpr);
2310 CIRGenFunction::ConditionalEvaluation eval(cgf);
2311 SmallVector<mlir::OpBuilder::InsertPoint, 2> insertPoints{};
2312 mlir::Type yieldTy{};
2313
2314 auto emitBranch = [&](mlir::OpBuilder &b, mlir::Location loc, Expr *expr) {
2315 CIRGenFunction::LexicalScope lexScope{cgf, loc, b.getInsertionBlock()};
2317
2319 eval.beginEvaluation();
2320 mlir::Value branch = Visit(expr);
2321 eval.endEvaluation();
2322
2323 if (branch) {
2324 yieldTy = branch.getType();
2325 b.create<cir::YieldOp>(loc, branch);
2326 } else {
2327 // If LHS or RHS is a throw or void expression we need to patch
2328 // arms as to properly match yield types.
2329 insertPoints.push_back(b.saveInsertionPoint());
2330 }
2331 };
2332
2333 mlir::Value result = builder
2334 .create<cir::TernaryOp>(
2335 loc, condV,
2336 /*trueBuilder=*/
2337 [&](mlir::OpBuilder &b, mlir::Location loc) {
2338 emitBranch(b, loc, lhsExpr);
2339 },
2340 /*falseBuilder=*/
2341 [&](mlir::OpBuilder &b, mlir::Location loc) {
2342 emitBranch(b, loc, rhsExpr);
2343 })
2344 .getResult();
2345
2346 if (!insertPoints.empty()) {
2347 // If both arms are void, so be it.
2348 if (!yieldTy)
2349 yieldTy = cgf.VoidTy;
2350
2351 // Insert required yields.
2352 for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) {
2353 mlir::OpBuilder::InsertionGuard guard(builder);
2354 builder.restoreInsertionPoint(toInsert);
2355
2356 // Block does not return: build empty yield.
2357 if (mlir::isa<cir::VoidType>(yieldTy)) {
2358 builder.create<cir::YieldOp>(loc);
2359 } else { // Block returns: set null yield value.
2360 mlir::Value op0 = builder.getNullValue(yieldTy, loc);
2361 builder.create<cir::YieldOp>(loc, op0);
2362 }
2363 }
2364 }
2365
2366 return result;
2367}
2368
2370 LValue lv,
2371 cir::UnaryOpKind kind,
2372 bool isPre) {
2373 return ScalarExprEmitter(*this, builder)
2374 .emitScalarPrePostIncDec(e, lv, kind, isPre);
2375}
#define HANDLE_BINOP(OP)
static bool mustVisitNullValue(const Expr *e)
#define COMPOUND_OP(Op)
#define HANDLEBINOP(OP)
static bool isWidenedIntegerOp(const ASTContext &astContext, const Expr *e)
Check if e is a widened promoted integer.
static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
static bool isCheapEnoughToEvaluateUnconditionally(const Expr *e, CIRGenFunction &cgf)
Return true if the specified expression is cheap enough and side-effect-free enough to evaluate uncon...
static bool canElideOverflowCheck(const ASTContext &astContext, const BinOpInfo &op)
Check if we can skip the overflow check for Op.
static std::optional< QualType > getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e)
If e is a widened promoted integer, get its base (unpromoted) type.
#define VISITCOMP(CODE)
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
cir::ConstantOp getBool(bool state, mlir::Location loc)
cir::ConstantOp getNullValue(mlir::Type ty, mlir::Location loc)
mlir::Value createCast(mlir::Location loc, cir::CastKind kind, mlir::Value src, mlir::Type newTy)
mlir::Value createIntToPtr(mlir::Value src, mlir::Type newTy)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
cir::CmpOp createCompare(mlir::Location loc, cir::CmpOpKind kind, mlir::Value lhs, mlir::Value rhs)
mlir::Value createSelect(mlir::Location loc, mlir::Value condition, mlir::Value trueValue, mlir::Value falseValue)
llvm::APInt getValue() const
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CanQualType FloatTy
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
CanQualType BoolTy
bool hasSameUnqualifiedType(QualType T1, QualType T2) const
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
const VariableArrayType * getAsVariableArrayType(QualType T) const
QualType getComplexType(QualType T) const
Return the uniqued reference to the type for a complex number with the specified element type.
bool isPromotableIntegerType(QualType T) const
More type predicates useful for type checking/promotion.
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4465
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4471
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4477
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:3972
Expr * getLHS() const
Definition Expr.h:4022
SourceLocation getExprLoc() const
Definition Expr.h:4013
Expr * getRHS() const
Definition Expr.h:4024
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:4185
static bool isNullPointerArithmeticExtension(ASTContext &Ctx, Opcode Opc, const Expr *LHS, const Expr *RHS)
Return true if a binary operator using the specified opcode and operands would match the 'p = (i8*)nu...
Definition Expr.cpp:2200
Opcode getOpcode() const
Definition Expr.h:4017
BinaryOperatorKind Opcode
Definition Expr.h:3977
mlir::Value getPointer() const
Definition Address.h:81
mlir::Value createNeg(mlir::Value value)
void forceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
static bool hasScalarEvaluationKind(clang::QualType type)
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
mlir::Type convertType(clang::QualType t)
mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType)
const clang::LangOptions & getLangOpts() const
LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e, mlir::Value &result)
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind kind, bool isPre)
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
mlir::Value emitScalarExpr(const clang::Expr *e)
Emit the computation of the specified expression of scalar type.
mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType)
CIRGenBuilderTy & getBuilder()
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
clang::ASTContext & getContext() const
void emitNullabilityCheck(LValue lhs, mlir::Value rhs, clang::SourceLocation loc)
Given an assignment *lhs = rhs, emit a test that checks if rhs is nonnull, if 1LHS is marked _Nonnull...
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const clang::CodeGenOptions & getCodeGenOpts() const
mlir::Value getPointer() const
static RValue get(mlir::Value v)
Definition CIRGenValue.h:82
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:56
bool getValue() const
Definition ExprCXX.h:740
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1105
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1599
CastKind getCastKind() const
Definition Expr.h:3654
static const char * getCastKindName(CastKind CK)
Definition Expr.cpp:1946
Expr * getSubExpr()
Definition Expr.h:3660
unsigned getValue() const
Definition Expr.h:1629
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3275
CompoundAssignOperator - For compound assignments (e.g.
Definition Expr.h:4234
QualType getComputationLHSType() const
Definition Expr.h:4268
QualType getComputationResultType() const
Definition Expr.h:4271
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:4743
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool isGLValue() const
Definition Expr.h:287
@ SE_AllowSideEffects
Allow any unmodeled side effect.
Definition Expr.h:674
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3081
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3065
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
llvm::APFloat getValue() const
Definition Expr.h:1666
const Expr * getSubExpr() const
Definition Expr.h:1062
Expr * getResultExpr()
Return the result expression of this controlling expression.
Definition Expr.h:6396
unsigned getNumInits() const
Definition Expr.h:5263
bool hadArrayRangeDesignator() const
Definition Expr.h:5417
const Expr * getInit(unsigned Init) const
Definition Expr.h:5287
ArrayRef< Expr * > inits()
Definition Expr.h:5283
bool isSignedOverflowDefined() const
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3653
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:1208
Expr * getSelectedExpr() const
Definition ExprCXX.h:4641
const Expr * getSubExpr() const
Definition Expr.h:2199
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3328
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8287
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1438
QualType getCanonicalType() const
Definition TypeBase.h:8339
bool UseExcessPrecision(const ASTContext &Ctx)
Definition Type.cpp:1612
bool isCanonical() const
Definition TypeBase.h:8344
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition TypeBase.h:361
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition TypeBase.h:364
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition TypeBase.h:367
unsigned getNumSubExprs() const
getNumSubExprs - Return the size of the SubExprs array.
Definition Expr.h:4610
Expr * getExpr(unsigned Index)
getExpr - Return the Expr at the specified index.
Definition Expr.h:4616
Encodes a location in the source.
SourceLocation getBegin() const
CompoundStmt * getSubStmt()
Definition Expr.h:4546
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:334
const char * getStmtClassName() const
Definition Stmt.cpp:87
bool isVoidType() const
Definition TypeBase.h:8880
bool isBooleanType() const
Definition TypeBase.h:9010
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2225
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2205
bool isConstantMatrixType() const
Definition TypeBase.h:8685
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8924
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9167
bool isReferenceType() const
Definition TypeBase.h:8548
bool isSveVLSBuiltinType() const
Determines if this is a sizeless type supported by the 'arm_sve_vector_bits' type attribute,...
Definition Type.cpp:2608
bool hasUnsignedIntegerRepresentation() const
Determine whether this type has an unsigned integer representation of some sort, e....
Definition Type.cpp:2291
bool isExtVectorType() const
Definition TypeBase.h:8667
bool isAnyComplexType() const
Definition TypeBase.h:8659
bool isFixedPointType() const
Return true if this is a fixed point type according to ISO/IEC JTC1 SC22 WG14 N1169.
Definition TypeBase.h:8936
bool isHalfType() const
Definition TypeBase.h:8884
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition Type.cpp:2243
bool isMatrixType() const
Definition TypeBase.h:8681
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2800
bool isFunctionType() const
Definition TypeBase.h:8520
bool isVectorType() const
Definition TypeBase.h:8663
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2320
bool isFloatingType() const
Definition Type.cpp:2304
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition Type.cpp:2253
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9100
bool isNullPtrType() const
Definition TypeBase.h:8917
QualType getTypeOfArgument() const
Gets the argument type, or the type of the argument expression, whichever is appropriate.
Definition Expr.h:2694
UnaryExprOrTypeTrait getKind() const
Definition Expr.h:2657
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
SourceLocation getExprLoc() const
Definition Expr.h:2368
Expr * getSubExpr() const
Definition Expr.h:2285
Opcode getOpcode() const
Definition Expr.h:2280
static bool isIncrementOp(Opcode Op)
Definition Expr.h:2326
bool canOverflow() const
Returns true if the unary operator can cause an overflow.
Definition Expr.h:2298
Represents a GCC generic vector type.
Definition TypeBase.h:4175
VectorKind getVectorKind() const
Definition TypeBase.h:4195
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::ArgumentAdaptingMatcherFunc< internal::HasMatcher > has
Matches AST nodes that have child AST nodes that match the provided matcher.
const AstTypeMatcher< PointerType > pointerType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
unsigned kind
All of the diagnostics that can be emitted by the frontend.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
CastKind
CastKind - The kind of operation required for a conversion.
@ Generic
not a target-specific vector type
Definition TypeBase.h:4136
U cast(CodeGen::Address addr)
Definition Address.h:327
#define false
Definition stdbool.h:26
static bool instrumentation()
static bool dataMemberType()
static bool objCLifetime()
static bool addressSpace()
static bool fixedPointType()
static bool vecTernaryOp()
static bool cgFPOptionsRAII()
static bool fpConstraints()
static bool addHeapAllocSiteMetadata()
static bool mayHaveIntegerOverflow()
static bool tryEmitAsConstant()
static bool scalableVectors()
static bool emitLValueAlignmentAssumption()
static bool incrementProfileCounter()
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174