Thanks to visit codestin.com
Credit goes to clang.llvm.org

clang 22.0.0git
CGExprScalar.cpp
Go to the documentation of this file.
1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGHLSLRuntime.h"
17#include "CGObjCRuntime.h"
18#include "CGOpenMPRuntime.h"
19#include "CGRecordLayout.h"
20#include "CodeGenFunction.h"
21#include "CodeGenModule.h"
22#include "ConstantEmitter.h"
23#include "TargetInfo.h"
24#include "TrapReasonBuilder.h"
26#include "clang/AST/Attr.h"
27#include "clang/AST/DeclObjC.h"
28#include "clang/AST/Expr.h"
35#include "llvm/ADT/APFixedPoint.h"
36#include "llvm/IR/Argument.h"
37#include "llvm/IR/CFG.h"
38#include "llvm/IR/Constants.h"
39#include "llvm/IR/DataLayout.h"
40#include "llvm/IR/DerivedTypes.h"
41#include "llvm/IR/FixedPointBuilder.h"
42#include "llvm/IR/Function.h"
43#include "llvm/IR/GEPNoWrapFlags.h"
44#include "llvm/IR/GetElementPtrTypeIterator.h"
45#include "llvm/IR/GlobalVariable.h"
46#include "llvm/IR/Intrinsics.h"
47#include "llvm/IR/IntrinsicsPowerPC.h"
48#include "llvm/IR/MatrixBuilder.h"
49#include "llvm/IR/Module.h"
50#include "llvm/Support/TypeSize.h"
51#include <cstdarg>
52#include <optional>
53
54using namespace clang;
55using namespace CodeGen;
56using llvm::Value;
57
58//===----------------------------------------------------------------------===//
59// Scalar Expression Emitter
60//===----------------------------------------------------------------------===//
61
62namespace llvm {
63extern cl::opt<bool> EnableSingleByteCoverage;
64} // namespace llvm
65
66namespace {
67
68/// Determine whether the given binary operation may overflow.
69/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
70/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
71/// the returned overflow check is precise. The returned value is 'true' for
72/// all other opcodes, to be conservative.
73bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
74 BinaryOperator::Opcode Opcode, bool Signed,
75 llvm::APInt &Result) {
76 // Assume overflow is possible, unless we can prove otherwise.
77 bool Overflow = true;
78 const auto &LHSAP = LHS->getValue();
79 const auto &RHSAP = RHS->getValue();
80 if (Opcode == BO_Add) {
81 Result = Signed ? LHSAP.sadd_ov(RHSAP, Overflow)
82 : LHSAP.uadd_ov(RHSAP, Overflow);
83 } else if (Opcode == BO_Sub) {
84 Result = Signed ? LHSAP.ssub_ov(RHSAP, Overflow)
85 : LHSAP.usub_ov(RHSAP, Overflow);
86 } else if (Opcode == BO_Mul) {
87 Result = Signed ? LHSAP.smul_ov(RHSAP, Overflow)
88 : LHSAP.umul_ov(RHSAP, Overflow);
89 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
90 if (Signed && !RHS->isZero())
91 Result = LHSAP.sdiv_ov(RHSAP, Overflow);
92 else
93 return false;
94 }
95 return Overflow;
96}
97
98struct BinOpInfo {
99 Value *LHS;
100 Value *RHS;
101 QualType Ty; // Computation Type.
102 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
103 FPOptions FPFeatures;
104 const Expr *E; // Entire expr, for error unsupported. May not be binop.
105
106 /// Check if the binop can result in integer overflow.
107 bool mayHaveIntegerOverflow() const {
108 // Without constant input, we can't rule out overflow.
109 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
110 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
111 if (!LHSCI || !RHSCI)
112 return true;
113
114 llvm::APInt Result;
115 return ::mayHaveIntegerOverflow(
116 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
117 }
118
119 /// Check if the binop computes a division or a remainder.
120 bool isDivremOp() const {
121 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
122 Opcode == BO_RemAssign;
123 }
124
125 /// Check if the binop can result in an integer division by zero.
126 bool mayHaveIntegerDivisionByZero() const {
127 if (isDivremOp())
128 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
129 return CI->isZero();
130 return true;
131 }
132
133 /// Check if the binop can result in a float division by zero.
134 bool mayHaveFloatDivisionByZero() const {
135 if (isDivremOp())
136 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
137 return CFP->isZero();
138 return true;
139 }
140
141 /// Check if at least one operand is a fixed point type. In such cases, this
142 /// operation did not follow usual arithmetic conversion and both operands
143 /// might not be of the same type.
144 bool isFixedPointOp() const {
145 // We cannot simply check the result type since comparison operations return
146 // an int.
147 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
148 QualType LHSType = BinOp->getLHS()->getType();
149 QualType RHSType = BinOp->getRHS()->getType();
150 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
151 }
152 if (const auto *UnOp = dyn_cast<UnaryOperator>(E))
153 return UnOp->getSubExpr()->getType()->isFixedPointType();
154 return false;
155 }
156
157 /// Check if the RHS has a signed integer representation.
158 bool rhsHasSignedIntegerRepresentation() const {
159 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
160 QualType RHSType = BinOp->getRHS()->getType();
161 return RHSType->hasSignedIntegerRepresentation();
162 }
163 return false;
164 }
165};
166
167static bool MustVisitNullValue(const Expr *E) {
168 // If a null pointer expression's type is the C++0x nullptr_t, then
169 // it's not necessarily a simple constant and it must be evaluated
170 // for its potential side effects.
171 return E->getType()->isNullPtrType();
172}
173
174/// If \p E is a widened promoted integer, get its base (unpromoted) type.
175static std::optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
176 const Expr *E) {
177 const Expr *Base = E->IgnoreImpCasts();
178 if (E == Base)
179 return std::nullopt;
180
181 QualType BaseTy = Base->getType();
182 if (!Ctx.isPromotableIntegerType(BaseTy) ||
183 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
184 return std::nullopt;
185
186 return BaseTy;
187}
188
189/// Check if \p E is a widened promoted integer.
190static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
191 return getUnwidenedIntegerType(Ctx, E).has_value();
192}
193
194/// Check if we can skip the overflow check for \p Op.
195static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
196 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
197 "Expected a unary or binary operator");
198
199 // If the binop has constant inputs and we can prove there is no overflow,
200 // we can elide the overflow check.
201 if (!Op.mayHaveIntegerOverflow())
202 return true;
203
204 if (Op.Ty->isSignedIntegerType() &&
205 Ctx.isTypeIgnoredBySanitizer(SanitizerKind::SignedIntegerOverflow,
206 Op.Ty)) {
207 return true;
208 }
209
210 if (Op.Ty->isUnsignedIntegerType() &&
211 Ctx.isTypeIgnoredBySanitizer(SanitizerKind::UnsignedIntegerOverflow,
212 Op.Ty)) {
213 return true;
214 }
215
216 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Op.E);
217
218 if (UO && UO->getOpcode() == UO_Minus &&
221 UO->isIntegerConstantExpr(Ctx))
222 return true;
223
224 // If a unary op has a widened operand, the op cannot overflow.
225 if (UO)
226 return !UO->canOverflow();
227
228 // We usually don't need overflow checks for binops with widened operands.
229 // Multiplication with promoted unsigned operands is a special case.
230 const auto *BO = cast<BinaryOperator>(Op.E);
231 if (BO->hasExcludedOverflowPattern())
232 return true;
233
234 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
235 if (!OptionalLHSTy)
236 return false;
237
238 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
239 if (!OptionalRHSTy)
240 return false;
241
242 QualType LHSTy = *OptionalLHSTy;
243 QualType RHSTy = *OptionalRHSTy;
244
245 // This is the simple case: binops without unsigned multiplication, and with
246 // widened operands. No overflow check is needed here.
247 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
248 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
249 return true;
250
251 // For unsigned multiplication the overflow check can be elided if either one
252 // of the unpromoted types are less than half the size of the promoted type.
253 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
254 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
255 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
256}
257
258class ScalarExprEmitter
259 : public StmtVisitor<ScalarExprEmitter, Value*> {
260 CodeGenFunction &CGF;
261 CGBuilderTy &Builder;
262 bool IgnoreResultAssign;
263 llvm::LLVMContext &VMContext;
264public:
265
266 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
267 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
268 VMContext(cgf.getLLVMContext()) {
269 }
270
271 //===--------------------------------------------------------------------===//
272 // Utilities
273 //===--------------------------------------------------------------------===//
274
275 bool TestAndClearIgnoreResultAssign() {
276 bool I = IgnoreResultAssign;
277 IgnoreResultAssign = false;
278 return I;
279 }
280
281 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
282 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
283 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
284 return CGF.EmitCheckedLValue(E, TCK);
285 }
286
287 void EmitBinOpCheck(
288 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
289 const BinOpInfo &Info);
290
291 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
292 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
293 }
294
295 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
296 const AlignValueAttr *AVAttr = nullptr;
297 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
298 const ValueDecl *VD = DRE->getDecl();
299
300 if (VD->getType()->isReferenceType()) {
301 if (const auto *TTy =
302 VD->getType().getNonReferenceType()->getAs<TypedefType>())
303 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
304 } else {
305 // Assumptions for function parameters are emitted at the start of the
306 // function, so there is no need to repeat that here,
307 // unless the alignment-assumption sanitizer is enabled,
308 // then we prefer the assumption over alignment attribute
309 // on IR function param.
310 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
311 return;
312
313 AVAttr = VD->getAttr<AlignValueAttr>();
314 }
315 }
316
317 if (!AVAttr)
318 if (const auto *TTy = E->getType()->getAs<TypedefType>())
319 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
320
321 if (!AVAttr)
322 return;
323
324 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
325 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
326 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
327 }
328
329 /// EmitLoadOfLValue - Given an expression with complex type that represents a
330 /// value l-value, this method emits the address of the l-value, then loads
331 /// and returns the result.
332 Value *EmitLoadOfLValue(const Expr *E) {
333 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
334 E->getExprLoc());
335
336 EmitLValueAlignmentAssumption(E, V);
337 return V;
338 }
339
340 /// EmitConversionToBool - Convert the specified expression value to a
341 /// boolean (i1) truth value. This is equivalent to "Val != 0".
342 Value *EmitConversionToBool(Value *Src, QualType DstTy);
343
344 /// Emit a check that a conversion from a floating-point type does not
345 /// overflow.
346 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
347 Value *Src, QualType SrcType, QualType DstType,
348 llvm::Type *DstTy, SourceLocation Loc);
349
350 /// Known implicit conversion check kinds.
351 /// This is used for bitfield conversion checks as well.
352 /// Keep in sync with the enum of the same name in ubsan_handlers.h
353 enum ImplicitConversionCheckKind : unsigned char {
354 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
355 ICCK_UnsignedIntegerTruncation = 1,
356 ICCK_SignedIntegerTruncation = 2,
357 ICCK_IntegerSignChange = 3,
358 ICCK_SignedIntegerTruncationOrSignChange = 4,
359 };
360
361 /// Emit a check that an [implicit] truncation of an integer does not
362 /// discard any bits. It is not UB, so we use the value after truncation.
363 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
364 QualType DstType, SourceLocation Loc);
365
366 /// Emit a check that an [implicit] conversion of an integer does not change
367 /// the sign of the value. It is not UB, so we use the value after conversion.
368 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
369 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
370 QualType DstType, SourceLocation Loc);
371
372 /// Emit a conversion from the specified type to the specified destination
373 /// type, both of which are LLVM scalar types.
374 struct ScalarConversionOpts {
375 bool TreatBooleanAsSigned;
376 bool EmitImplicitIntegerTruncationChecks;
377 bool EmitImplicitIntegerSignChangeChecks;
378
379 ScalarConversionOpts()
380 : TreatBooleanAsSigned(false),
381 EmitImplicitIntegerTruncationChecks(false),
382 EmitImplicitIntegerSignChangeChecks(false) {}
383
384 ScalarConversionOpts(clang::SanitizerSet SanOpts)
385 : TreatBooleanAsSigned(false),
386 EmitImplicitIntegerTruncationChecks(
387 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
388 EmitImplicitIntegerSignChangeChecks(
389 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
390 };
391 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
392 llvm::Type *SrcTy, llvm::Type *DstTy,
393 ScalarConversionOpts Opts);
394 Value *
395 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
396 SourceLocation Loc,
397 ScalarConversionOpts Opts = ScalarConversionOpts());
398
399 /// Convert between either a fixed point and other fixed point or fixed point
400 /// and an integer.
401 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
402 SourceLocation Loc);
403
404 /// Emit a conversion from the specified complex type to the specified
405 /// destination type, where the destination type is an LLVM scalar type.
406 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
407 QualType SrcTy, QualType DstTy,
408 SourceLocation Loc);
409
410 /// EmitNullValue - Emit a value that corresponds to null for the given type.
411 Value *EmitNullValue(QualType Ty);
412
413 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
414 Value *EmitFloatToBoolConversion(Value *V) {
415 // Compare against 0.0 for fp scalars.
416 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
417 return Builder.CreateFCmpUNE(V, Zero, "tobool");
418 }
419
420 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
421 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
422 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
423
424 return Builder.CreateICmpNE(V, Zero, "tobool");
425 }
426
427 Value *EmitIntToBoolConversion(Value *V) {
428 // Because of the type rules of C, we often end up computing a
429 // logical value, then zero extending it to int, then wanting it
430 // as a logical value again. Optimize this common case.
431 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
432 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
433 Value *Result = ZI->getOperand(0);
434 // If there aren't any more uses, zap the instruction to save space.
435 // Note that there can be more uses, for example if this
436 // is the result of an assignment.
437 if (ZI->use_empty())
438 ZI->eraseFromParent();
439 return Result;
440 }
441 }
442
443 return Builder.CreateIsNotNull(V, "tobool");
444 }
445
446 //===--------------------------------------------------------------------===//
447 // Visitor Methods
448 //===--------------------------------------------------------------------===//
449
450 Value *Visit(Expr *E) {
451 ApplyDebugLocation DL(CGF, E);
452 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
453 }
454
455 Value *VisitStmt(Stmt *S) {
456 S->dump(llvm::errs(), CGF.getContext());
457 llvm_unreachable("Stmt can't have complex result type!");
458 }
459 Value *VisitExpr(Expr *S);
460
461 Value *VisitConstantExpr(ConstantExpr *E) {
462 // A constant expression of type 'void' generates no code and produces no
463 // value.
464 if (E->getType()->isVoidType())
465 return nullptr;
466
467 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
468 if (E->isGLValue())
469 return CGF.EmitLoadOfScalar(
470 Address(Result, CGF.convertTypeForLoadStore(E->getType()),
472 /*Volatile*/ false, E->getType(), E->getExprLoc());
473 return Result;
474 }
475 return Visit(E->getSubExpr());
476 }
477 Value *VisitParenExpr(ParenExpr *PE) {
478 return Visit(PE->getSubExpr());
479 }
480 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
481 return Visit(E->getReplacement());
482 }
483 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
484 return Visit(GE->getResultExpr());
485 }
486 Value *VisitCoawaitExpr(CoawaitExpr *S) {
487 return CGF.EmitCoawaitExpr(*S).getScalarVal();
488 }
489 Value *VisitCoyieldExpr(CoyieldExpr *S) {
490 return CGF.EmitCoyieldExpr(*S).getScalarVal();
491 }
492 Value *VisitUnaryCoawait(const UnaryOperator *E) {
493 return Visit(E->getSubExpr());
494 }
495
496 // Leaves.
497 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
498 return Builder.getInt(E->getValue());
499 }
500 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
501 return Builder.getInt(E->getValue());
502 }
503 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
504 return llvm::ConstantFP::get(VMContext, E->getValue());
505 }
506 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
507 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
508 }
509 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
510 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
511 }
512 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
513 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
514 }
515 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
516 if (E->getType()->isVoidType())
517 return nullptr;
518
519 return EmitNullValue(E->getType());
520 }
521 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
522 return EmitNullValue(E->getType());
523 }
524 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
525 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
526 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
527 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
528 return Builder.CreateBitCast(V, ConvertType(E->getType()));
529 }
530
531 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
532 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
533 }
534
535 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
536 return CGF.EmitPseudoObjectRValue(E).getScalarVal();
537 }
538
539 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
540 Value *VisitEmbedExpr(EmbedExpr *E);
541
542 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
543 if (E->isGLValue())
544 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
545 E->getExprLoc());
546
547 // Otherwise, assume the mapping is the scalar directly.
549 }
550
551 Value *VisitOpenACCAsteriskSizeExpr(OpenACCAsteriskSizeExpr *E) {
552 llvm_unreachable("Codegen for this isn't defined/implemented");
553 }
554
555 // l-values.
556 Value *VisitDeclRefExpr(DeclRefExpr *E) {
557 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
558 return CGF.emitScalarConstant(Constant, E);
559 return EmitLoadOfLValue(E);
560 }
561
562 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
563 return CGF.EmitObjCSelectorExpr(E);
564 }
565 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
566 return CGF.EmitObjCProtocolExpr(E);
567 }
568 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
569 return EmitLoadOfLValue(E);
570 }
571 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
572 if (E->getMethodDecl() &&
574 return EmitLoadOfLValue(E);
575 return CGF.EmitObjCMessageExpr(E).getScalarVal();
576 }
577
578 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
579 LValue LV = CGF.EmitObjCIsaExpr(E);
581 return V;
582 }
583
584 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
585 VersionTuple Version = E->getVersion();
586
587 // If we're checking for a platform older than our minimum deployment
588 // target, we can fold the check away.
589 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
590 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
591
592 return CGF.EmitBuiltinAvailable(Version);
593 }
594
595 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
596 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
597 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
598 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
599 Value *VisitMemberExpr(MemberExpr *E);
600 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
601 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
602 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
603 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
604 // literals aren't l-values in C++. We do so simply because that's the
605 // cleanest way to handle compound literals in C++.
606 // See the discussion here: https://reviews.llvm.org/D64464
607 return EmitLoadOfLValue(E);
608 }
609
610 Value *VisitInitListExpr(InitListExpr *E);
611
612 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
613 assert(CGF.getArrayInitIndex() &&
614 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
615 return CGF.getArrayInitIndex();
616 }
617
618 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
619 return EmitNullValue(E->getType());
620 }
621 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
622 CGF.CGM.EmitExplicitCastExprType(E, &CGF);
623 return VisitCastExpr(E);
624 }
625 Value *VisitCastExpr(CastExpr *E);
626
627 Value *VisitCallExpr(const CallExpr *E) {
629 return EmitLoadOfLValue(E);
630
631 Value *V = CGF.EmitCallExpr(E).getScalarVal();
632
633 EmitLValueAlignmentAssumption(E, V);
634 return V;
635 }
636
637 Value *VisitStmtExpr(const StmtExpr *E);
638
639 // Unary Operators.
640 Value *VisitUnaryPostDec(const UnaryOperator *E) {
641 LValue LV = EmitLValue(E->getSubExpr());
642 return EmitScalarPrePostIncDec(E, LV, false, false);
643 }
644 Value *VisitUnaryPostInc(const UnaryOperator *E) {
645 LValue LV = EmitLValue(E->getSubExpr());
646 return EmitScalarPrePostIncDec(E, LV, true, false);
647 }
648 Value *VisitUnaryPreDec(const UnaryOperator *E) {
649 LValue LV = EmitLValue(E->getSubExpr());
650 return EmitScalarPrePostIncDec(E, LV, false, true);
651 }
652 Value *VisitUnaryPreInc(const UnaryOperator *E) {
653 LValue LV = EmitLValue(E->getSubExpr());
654 return EmitScalarPrePostIncDec(E, LV, true, true);
655 }
656
657 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
658 llvm::Value *InVal,
659 bool IsInc);
660
661 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
662 bool isInc, bool isPre);
663
664
665 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
666 if (isa<MemberPointerType>(E->getType())) // never sugared
667 return CGF.CGM.getMemberPointerConstant(E);
668
669 return EmitLValue(E->getSubExpr()).getPointer(CGF);
670 }
671 Value *VisitUnaryDeref(const UnaryOperator *E) {
672 if (E->getType()->isVoidType())
673 return Visit(E->getSubExpr()); // the actual value should be unused
674 return EmitLoadOfLValue(E);
675 }
676
677 Value *VisitUnaryPlus(const UnaryOperator *E,
678 QualType PromotionType = QualType());
679 Value *VisitPlus(const UnaryOperator *E, QualType PromotionType);
680 Value *VisitUnaryMinus(const UnaryOperator *E,
681 QualType PromotionType = QualType());
682 Value *VisitMinus(const UnaryOperator *E, QualType PromotionType);
683
684 Value *VisitUnaryNot (const UnaryOperator *E);
685 Value *VisitUnaryLNot (const UnaryOperator *E);
686 Value *VisitUnaryReal(const UnaryOperator *E,
687 QualType PromotionType = QualType());
688 Value *VisitReal(const UnaryOperator *E, QualType PromotionType);
689 Value *VisitUnaryImag(const UnaryOperator *E,
690 QualType PromotionType = QualType());
691 Value *VisitImag(const UnaryOperator *E, QualType PromotionType);
692 Value *VisitUnaryExtension(const UnaryOperator *E) {
693 return Visit(E->getSubExpr());
694 }
695
696 // C++
697 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
698 return EmitLoadOfLValue(E);
699 }
700 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
701 auto &Ctx = CGF.getContext();
704 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated,
705 SLE->getType());
706 }
707
708 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
709 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
710 return Visit(DAE->getExpr());
711 }
712 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
713 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
714 return Visit(DIE->getExpr());
715 }
716 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
717 return CGF.LoadCXXThis();
718 }
719
720 Value *VisitExprWithCleanups(ExprWithCleanups *E);
721 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
722 return CGF.EmitCXXNewExpr(E);
723 }
724 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
725 CGF.EmitCXXDeleteExpr(E);
726 return nullptr;
727 }
728
729 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
730 if (E->isStoredAsBoolean())
731 return llvm::ConstantInt::get(ConvertType(E->getType()),
732 E->getBoolValue());
733 assert(E->getAPValue().isInt() && "APValue type not supported");
734 return llvm::ConstantInt::get(ConvertType(E->getType()),
735 E->getAPValue().getInt());
736 }
737
738 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
739 return Builder.getInt1(E->isSatisfied());
740 }
741
742 Value *VisitRequiresExpr(const RequiresExpr *E) {
743 return Builder.getInt1(E->isSatisfied());
744 }
745
746 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
747 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
748 }
749
750 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
751 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
752 }
753
754 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
755 // C++ [expr.pseudo]p1:
756 // The result shall only be used as the operand for the function call
757 // operator (), and the result of such a call has type void. The only
758 // effect is the evaluation of the postfix-expression before the dot or
759 // arrow.
760 CGF.EmitScalarExpr(E->getBase());
761 return nullptr;
762 }
763
764 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
765 return EmitNullValue(E->getType());
766 }
767
768 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
769 CGF.EmitCXXThrowExpr(E);
770 return nullptr;
771 }
772
773 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
774 return Builder.getInt1(E->getValue());
775 }
776
777 // Binary Operators.
778 Value *EmitMul(const BinOpInfo &Ops) {
779 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
780 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
781 case LangOptions::SOB_Defined:
782 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
783 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
784 [[fallthrough]];
785 case LangOptions::SOB_Undefined:
786 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
787 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
788 [[fallthrough]];
789 case LangOptions::SOB_Trapping:
790 if (CanElideOverflowCheck(CGF.getContext(), Ops))
791 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
792 return EmitOverflowCheckedBinOp(Ops);
793 }
794 }
795
796 if (Ops.Ty->isConstantMatrixType()) {
797 llvm::MatrixBuilder MB(Builder);
798 // We need to check the types of the operands of the operator to get the
799 // correct matrix dimensions.
800 auto *BO = cast<BinaryOperator>(Ops.E);
801 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
802 BO->getLHS()->getType().getCanonicalType());
803 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
804 BO->getRHS()->getType().getCanonicalType());
805 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
806 if (LHSMatTy && RHSMatTy)
807 return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),
808 LHSMatTy->getNumColumns(),
809 RHSMatTy->getNumColumns());
810 return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS);
811 }
812
813 if (Ops.Ty->isUnsignedIntegerType() &&
814 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
815 !CanElideOverflowCheck(CGF.getContext(), Ops))
816 return EmitOverflowCheckedBinOp(Ops);
817
818 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
819 // Preserve the old values
820 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
821 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
822 }
823 if (Ops.isFixedPointOp())
824 return EmitFixedPointBinOp(Ops);
825 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
826 }
827 /// Create a binary op that checks for overflow.
828 /// Currently only supports +, - and *.
829 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
830
831 // Check for undefined division and modulus behaviors.
832 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
833 llvm::Value *Zero,bool isDiv);
834 // Common helper for getting how wide LHS of shift is.
835 static Value *GetMaximumShiftAmount(Value *LHS, Value *RHS, bool RHSIsSigned);
836
837 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
838 // non powers of two.
839 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
840
841 Value *EmitDiv(const BinOpInfo &Ops);
842 Value *EmitRem(const BinOpInfo &Ops);
843 Value *EmitAdd(const BinOpInfo &Ops);
844 Value *EmitSub(const BinOpInfo &Ops);
845 Value *EmitShl(const BinOpInfo &Ops);
846 Value *EmitShr(const BinOpInfo &Ops);
847 Value *EmitAnd(const BinOpInfo &Ops) {
848 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
849 }
850 Value *EmitXor(const BinOpInfo &Ops) {
851 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
852 }
853 Value *EmitOr (const BinOpInfo &Ops) {
854 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
855 }
856
857 // Helper functions for fixed point binary operations.
858 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
859
860 BinOpInfo EmitBinOps(const BinaryOperator *E,
861 QualType PromotionTy = QualType());
862
863 Value *EmitPromotedValue(Value *result, QualType PromotionType);
864 Value *EmitUnPromotedValue(Value *result, QualType ExprType);
865 Value *EmitPromoted(const Expr *E, QualType PromotionType);
866
867 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
868 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
869 Value *&Result);
870
871 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
872 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
873
874 QualType getPromotionType(QualType Ty) {
875 const auto &Ctx = CGF.getContext();
876 if (auto *CT = Ty->getAs<ComplexType>()) {
877 QualType ElementType = CT->getElementType();
878 if (ElementType.UseExcessPrecision(Ctx))
879 return Ctx.getComplexType(Ctx.FloatTy);
880 }
881
882 if (Ty.UseExcessPrecision(Ctx)) {
883 if (auto *VT = Ty->getAs<VectorType>()) {
884 unsigned NumElements = VT->getNumElements();
885 return Ctx.getVectorType(Ctx.FloatTy, NumElements, VT->getVectorKind());
886 }
887 return Ctx.FloatTy;
888 }
889
890 return QualType();
891 }
892
893 // Binary operators and binary compound assignment operators.
894#define HANDLEBINOP(OP) \
895 Value *VisitBin##OP(const BinaryOperator *E) { \
896 QualType promotionTy = getPromotionType(E->getType()); \
897 auto result = Emit##OP(EmitBinOps(E, promotionTy)); \
898 if (result && !promotionTy.isNull()) \
899 result = EmitUnPromotedValue(result, E->getType()); \
900 return result; \
901 } \
902 Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) { \
903 ApplyAtomGroup Grp(CGF.getDebugInfo()); \
904 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP); \
905 }
906 HANDLEBINOP(Mul)
907 HANDLEBINOP(Div)
908 HANDLEBINOP(Rem)
909 HANDLEBINOP(Add)
910 HANDLEBINOP(Sub)
911 HANDLEBINOP(Shl)
912 HANDLEBINOP(Shr)
914 HANDLEBINOP(Xor)
916#undef HANDLEBINOP
917
918 // Comparisons.
919 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
920 llvm::CmpInst::Predicate SICmpOpc,
921 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
922#define VISITCOMP(CODE, UI, SI, FP, SIG) \
923 Value *VisitBin##CODE(const BinaryOperator *E) { \
924 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
925 llvm::FCmpInst::FP, SIG); }
926 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
927 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
928 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
929 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
930 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
931 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
932#undef VISITCOMP
933
934 Value *VisitBinAssign (const BinaryOperator *E);
935
936 Value *VisitBinLAnd (const BinaryOperator *E);
937 Value *VisitBinLOr (const BinaryOperator *E);
938 Value *VisitBinComma (const BinaryOperator *E);
939
940 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
941 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
942
943 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
944 return Visit(E->getSemanticForm());
945 }
946
947 // Other Operators.
948 Value *VisitBlockExpr(const BlockExpr *BE);
949 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
950 Value *VisitChooseExpr(ChooseExpr *CE);
951 Value *VisitVAArgExpr(VAArgExpr *VE);
952 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
953 return CGF.EmitObjCStringLiteral(E);
954 }
955 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
956 return CGF.EmitObjCBoxedExpr(E);
957 }
958 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
959 return CGF.EmitObjCArrayLiteral(E);
960 }
961 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
962 return CGF.EmitObjCDictionaryLiteral(E);
963 }
964 Value *VisitAsTypeExpr(AsTypeExpr *CE);
965 Value *VisitAtomicExpr(AtomicExpr *AE);
966 Value *VisitPackIndexingExpr(PackIndexingExpr *E) {
967 return Visit(E->getSelectedExpr());
968 }
969};
970} // end anonymous namespace.
971
972//===----------------------------------------------------------------------===//
973// Utilities
974//===----------------------------------------------------------------------===//
975
976/// EmitConversionToBool - Convert the specified expression value to a
977/// boolean (i1) truth value. This is equivalent to "Val != 0".
978Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
979 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
980
981 if (SrcType->isRealFloatingType())
982 return EmitFloatToBoolConversion(Src);
983
984 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
985 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
986
987 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
988 "Unknown scalar type to convert");
989
991 return EmitIntToBoolConversion(Src);
992
993 assert(isa<llvm::PointerType>(Src->getType()));
994 return EmitPointerToBoolConversion(Src, SrcType);
995}
996
997void ScalarExprEmitter::EmitFloatConversionCheck(
998 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
999 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
1000 assert(SrcType->isFloatingType() && "not a conversion from floating point");
1001 if (!isa<llvm::IntegerType>(DstTy))
1002 return;
1003
1004 auto CheckOrdinal = SanitizerKind::SO_FloatCastOverflow;
1005 auto CheckHandler = SanitizerHandler::FloatCastOverflow;
1006 SanitizerDebugLocation SanScope(&CGF, {CheckOrdinal}, CheckHandler);
1007 using llvm::APFloat;
1008 using llvm::APSInt;
1009
1010 llvm::Value *Check = nullptr;
1011 const llvm::fltSemantics &SrcSema =
1012 CGF.getContext().getFloatTypeSemantics(OrigSrcType);
1013
1014 // Floating-point to integer. This has undefined behavior if the source is
1015 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
1016 // to an integer).
1017 unsigned Width = CGF.getContext().getIntWidth(DstType);
1019
1020 APSInt Min = APSInt::getMinValue(Width, Unsigned);
1021 APFloat MinSrc(SrcSema, APFloat::uninitialized);
1022 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
1023 APFloat::opOverflow)
1024 // Don't need an overflow check for lower bound. Just check for
1025 // -Inf/NaN.
1026 MinSrc = APFloat::getInf(SrcSema, true);
1027 else
1028 // Find the largest value which is too small to represent (before
1029 // truncation toward zero).
1030 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
1031
1032 APSInt Max = APSInt::getMaxValue(Width, Unsigned);
1033 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
1034 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
1035 APFloat::opOverflow)
1036 // Don't need an overflow check for upper bound. Just check for
1037 // +Inf/NaN.
1038 MaxSrc = APFloat::getInf(SrcSema, false);
1039 else
1040 // Find the smallest value which is too large to represent (before
1041 // truncation toward zero).
1042 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
1043
1044 // If we're converting from __half, convert the range to float to match
1045 // the type of src.
1046 if (OrigSrcType->isHalfType()) {
1047 const llvm::fltSemantics &Sema =
1048 CGF.getContext().getFloatTypeSemantics(SrcType);
1049 bool IsInexact;
1050 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
1051 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
1052 }
1053
1054 llvm::Value *GE =
1055 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
1056 llvm::Value *LE =
1057 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
1058 Check = Builder.CreateAnd(GE, LE);
1059
1060 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
1061 CGF.EmitCheckTypeDescriptor(OrigSrcType),
1062 CGF.EmitCheckTypeDescriptor(DstType)};
1063 CGF.EmitCheck(std::make_pair(Check, CheckOrdinal), CheckHandler, StaticArgs,
1064 OrigSrc);
1065}
1066
1067// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1068// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1069static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1070 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1072 QualType DstType, CGBuilderTy &Builder) {
1073 llvm::Type *SrcTy = Src->getType();
1074 llvm::Type *DstTy = Dst->getType();
1075 (void)DstTy; // Only used in assert()
1076
1077 // This should be truncation of integral types.
1078 assert(Src != Dst);
1079 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
1080 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1081 "non-integer llvm type");
1082
1083 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1084 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1085
1086 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
1087 // Else, it is a signed truncation.
1088 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1090 if (!SrcSigned && !DstSigned) {
1091 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1092 Ordinal = SanitizerKind::SO_ImplicitUnsignedIntegerTruncation;
1093 } else {
1094 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1095 Ordinal = SanitizerKind::SO_ImplicitSignedIntegerTruncation;
1096 }
1097
1098 llvm::Value *Check = nullptr;
1099 // 1. Extend the truncated value back to the same width as the Src.
1100 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
1101 // 2. Equality-compare with the original source value
1102 Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
1103 // If the comparison result is 'i1 false', then the truncation was lossy.
1104 return std::make_pair(Kind, std::make_pair(Check, Ordinal));
1105}
1106
1108 QualType SrcType, QualType DstType) {
1109 return SrcType->isIntegerType() && DstType->isIntegerType();
1110}
1111
1112void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1113 Value *Dst, QualType DstType,
1114 SourceLocation Loc) {
1115 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
1116 return;
1117
1118 // We only care about int->int conversions here.
1119 // We ignore conversions to/from pointer and/or bool.
1121 DstType))
1122 return;
1123
1124 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1125 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1126 // This must be truncation. Else we do not care.
1127 if (SrcBits <= DstBits)
1128 return;
1129
1130 assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1131
1132 // If the integer sign change sanitizer is enabled,
1133 // and we are truncating from larger unsigned type to smaller signed type,
1134 // let that next sanitizer deal with it.
1135 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1136 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1137 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1138 (!SrcSigned && DstSigned))
1139 return;
1140
1141 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1142 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1143 Check;
1144
1145 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1146 {
1147 // We don't know the check kind until we call
1148 // EmitIntegerTruncationCheckHelper, but we want to annotate
1149 // EmitIntegerTruncationCheckHelper's instructions too.
1150 SanitizerDebugLocation SanScope(
1151 &CGF,
1152 {SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1153 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1154 CheckHandler);
1155 Check =
1156 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1157 // If the comparison result is 'i1 false', then the truncation was lossy.
1158 }
1159
1160 // Do we care about this type of truncation?
1161 if (!CGF.SanOpts.has(Check.second.second))
1162 return;
1163
1164 SanitizerDebugLocation SanScope(&CGF, {Check.second.second}, CheckHandler);
1165
1166 // Does some SSCL ignore this type?
1168 SanitizerMask::bitPosToMask(Check.second.second), DstType))
1169 return;
1170
1171 llvm::Constant *StaticArgs[] = {
1172 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1173 CGF.EmitCheckTypeDescriptor(DstType),
1174 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first),
1175 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
1176
1177 CGF.EmitCheck(Check.second, CheckHandler, StaticArgs, {Src, Dst});
1178}
1179
1180static llvm::Value *EmitIsNegativeTestHelper(Value *V, QualType VType,
1181 const char *Name,
1182 CGBuilderTy &Builder) {
1183 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1184 llvm::Type *VTy = V->getType();
1185 if (!VSigned) {
1186 // If the value is unsigned, then it is never negative.
1187 return llvm::ConstantInt::getFalse(VTy->getContext());
1188 }
1189 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1190 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1191 llvm::Twine(Name) + "." + V->getName() +
1192 ".negativitycheck");
1193}
1194
1195// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1196// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1197static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1198 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1200 QualType DstType, CGBuilderTy &Builder) {
1201 llvm::Type *SrcTy = Src->getType();
1202 llvm::Type *DstTy = Dst->getType();
1203
1204 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1205 "non-integer llvm type");
1206
1207 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1208 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1209 (void)SrcSigned; // Only used in assert()
1210 (void)DstSigned; // Only used in assert()
1211 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1212 unsigned DstBits = DstTy->getScalarSizeInBits();
1213 (void)SrcBits; // Only used in assert()
1214 (void)DstBits; // Only used in assert()
1215
1216 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1217 "either the widths should be different, or the signednesses.");
1218
1219 // 1. Was the old Value negative?
1220 llvm::Value *SrcIsNegative =
1221 EmitIsNegativeTestHelper(Src, SrcType, "src", Builder);
1222 // 2. Is the new Value negative?
1223 llvm::Value *DstIsNegative =
1224 EmitIsNegativeTestHelper(Dst, DstType, "dst", Builder);
1225 // 3. Now, was the 'negativity status' preserved during the conversion?
1226 // NOTE: conversion from negative to zero is considered to change the sign.
1227 // (We want to get 'false' when the conversion changed the sign)
1228 // So we should just equality-compare the negativity statuses.
1229 llvm::Value *Check = nullptr;
1230 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1231 // If the comparison result is 'false', then the conversion changed the sign.
1232 return std::make_pair(
1233 ScalarExprEmitter::ICCK_IntegerSignChange,
1234 std::make_pair(Check, SanitizerKind::SO_ImplicitIntegerSignChange));
1235}
1236
1237void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1238 Value *Dst, QualType DstType,
1239 SourceLocation Loc) {
1240 if (!CGF.SanOpts.has(SanitizerKind::SO_ImplicitIntegerSignChange))
1241 return;
1242
1243 llvm::Type *SrcTy = Src->getType();
1244 llvm::Type *DstTy = Dst->getType();
1245
1246 // We only care about int->int conversions here.
1247 // We ignore conversions to/from pointer and/or bool.
1249 DstType))
1250 return;
1251
1252 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1253 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1254 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1255 unsigned DstBits = DstTy->getScalarSizeInBits();
1256
1257 // Now, we do not need to emit the check in *all* of the cases.
1258 // We can avoid emitting it in some obvious cases where it would have been
1259 // dropped by the opt passes (instcombine) always anyways.
1260 // If it's a cast between effectively the same type, no check.
1261 // NOTE: this is *not* equivalent to checking the canonical types.
1262 if (SrcSigned == DstSigned && SrcBits == DstBits)
1263 return;
1264 // At least one of the values needs to have signed type.
1265 // If both are unsigned, then obviously, neither of them can be negative.
1266 if (!SrcSigned && !DstSigned)
1267 return;
1268 // If the conversion is to *larger* *signed* type, then no check is needed.
1269 // Because either sign-extension happens (so the sign will remain),
1270 // or zero-extension will happen (the sign bit will be zero.)
1271 if ((DstBits > SrcBits) && DstSigned)
1272 return;
1273 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1274 (SrcBits > DstBits) && SrcSigned) {
1275 // If the signed integer truncation sanitizer is enabled,
1276 // and this is a truncation from signed type, then no check is needed.
1277 // Because here sign change check is interchangeable with truncation check.
1278 return;
1279 }
1280 // Does an SSCL have an entry for the DstType under its respective sanitizer
1281 // section?
1282 if (DstSigned && CGF.getContext().isTypeIgnoredBySanitizer(
1283 SanitizerKind::ImplicitSignedIntegerTruncation, DstType))
1284 return;
1285 if (!DstSigned &&
1287 SanitizerKind::ImplicitUnsignedIntegerTruncation, DstType))
1288 return;
1289 // That's it. We can't rule out any more cases with the data we have.
1290
1291 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1292 SanitizerDebugLocation SanScope(
1293 &CGF,
1294 {SanitizerKind::SO_ImplicitIntegerSignChange,
1295 SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1296 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1297 CheckHandler);
1298
1299 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1300 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1301 Check;
1302
1303 // Each of these checks needs to return 'false' when an issue was detected.
1304 ImplicitConversionCheckKind CheckKind;
1305 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>,
1306 2>
1307 Checks;
1308 // So we can 'and' all the checks together, and still get 'false',
1309 // if at least one of the checks detected an issue.
1310
1311 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1312 CheckKind = Check.first;
1313 Checks.emplace_back(Check.second);
1314
1315 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1316 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1317 // If the signed integer truncation sanitizer was enabled,
1318 // and we are truncating from larger unsigned type to smaller signed type,
1319 // let's handle the case we skipped in that check.
1320 Check =
1321 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1322 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1323 Checks.emplace_back(Check.second);
1324 // If the comparison result is 'i1 false', then the truncation was lossy.
1325 }
1326
1327 llvm::Constant *StaticArgs[] = {
1328 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1329 CGF.EmitCheckTypeDescriptor(DstType),
1330 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
1331 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
1332 // EmitCheck() will 'and' all the checks together.
1333 CGF.EmitCheck(Checks, CheckHandler, StaticArgs, {Src, Dst});
1334}
1335
1336// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1337// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1338static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1339 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1341 QualType DstType, CGBuilderTy &Builder) {
1342 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1343 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1344
1345 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1346 if (!SrcSigned && !DstSigned)
1347 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1348 else
1349 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1350
1351 llvm::Value *Check = nullptr;
1352 // 1. Extend the truncated value back to the same width as the Src.
1353 Check = Builder.CreateIntCast(Dst, Src->getType(), DstSigned, "bf.anyext");
1354 // 2. Equality-compare with the original source value
1355 Check = Builder.CreateICmpEQ(Check, Src, "bf.truncheck");
1356 // If the comparison result is 'i1 false', then the truncation was lossy.
1357
1358 return std::make_pair(
1359 Kind,
1360 std::make_pair(Check, SanitizerKind::SO_ImplicitBitfieldConversion));
1361}
1362
1363// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1364// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1365static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1366 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1368 QualType DstType, CGBuilderTy &Builder) {
1369 // 1. Was the old Value negative?
1370 llvm::Value *SrcIsNegative =
1371 EmitIsNegativeTestHelper(Src, SrcType, "bf.src", Builder);
1372 // 2. Is the new Value negative?
1373 llvm::Value *DstIsNegative =
1374 EmitIsNegativeTestHelper(Dst, DstType, "bf.dst", Builder);
1375 // 3. Now, was the 'negativity status' preserved during the conversion?
1376 // NOTE: conversion from negative to zero is considered to change the sign.
1377 // (We want to get 'false' when the conversion changed the sign)
1378 // So we should just equality-compare the negativity statuses.
1379 llvm::Value *Check = nullptr;
1380 Check =
1381 Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "bf.signchangecheck");
1382 // If the comparison result is 'false', then the conversion changed the sign.
1383 return std::make_pair(
1384 ScalarExprEmitter::ICCK_IntegerSignChange,
1385 std::make_pair(Check, SanitizerKind::SO_ImplicitBitfieldConversion));
1386}
1387
1389 Value *Dst, QualType DstType,
1390 const CGBitFieldInfo &Info,
1391 SourceLocation Loc) {
1392
1393 if (!SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
1394 return;
1395
1396 // We only care about int->int conversions here.
1397 // We ignore conversions to/from pointer and/or bool.
1399 DstType))
1400 return;
1401
1402 if (DstType->isBooleanType() || SrcType->isBooleanType())
1403 return;
1404
1405 // This should be truncation of integral types.
1406 assert(isa<llvm::IntegerType>(Src->getType()) &&
1407 isa<llvm::IntegerType>(Dst->getType()) && "non-integer llvm type");
1408
1409 // TODO: Calculate src width to avoid emitting code
1410 // for unecessary cases.
1411 unsigned SrcBits = ConvertType(SrcType)->getScalarSizeInBits();
1412 unsigned DstBits = Info.Size;
1413
1414 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1415 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1416
1417 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1418 SanitizerDebugLocation SanScope(
1419 this, {SanitizerKind::SO_ImplicitBitfieldConversion}, CheckHandler);
1420
1421 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1422 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1423 Check;
1424
1425 // Truncation
1426 bool EmitTruncation = DstBits < SrcBits;
1427 // If Dst is signed and Src unsigned, we want to be more specific
1428 // about the CheckKind we emit, in this case we want to emit
1429 // ICCK_SignedIntegerTruncationOrSignChange.
1430 bool EmitTruncationFromUnsignedToSigned =
1431 EmitTruncation && DstSigned && !SrcSigned;
1432 // Sign change
1433 bool SameTypeSameSize = SrcSigned == DstSigned && SrcBits == DstBits;
1434 bool BothUnsigned = !SrcSigned && !DstSigned;
1435 bool LargerSigned = (DstBits > SrcBits) && DstSigned;
1436 // We can avoid emitting sign change checks in some obvious cases
1437 // 1. If Src and Dst have the same signedness and size
1438 // 2. If both are unsigned sign check is unecessary!
1439 // 3. If Dst is signed and bigger than Src, either
1440 // sign-extension or zero-extension will make sure
1441 // the sign remains.
1442 bool EmitSignChange = !SameTypeSameSize && !BothUnsigned && !LargerSigned;
1443
1444 if (EmitTruncation)
1445 Check =
1446 EmitBitfieldTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1447 else if (EmitSignChange) {
1448 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1449 "either the widths should be different, or the signednesses.");
1450 Check =
1451 EmitBitfieldSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1452 } else
1453 return;
1454
1455 ScalarExprEmitter::ImplicitConversionCheckKind CheckKind = Check.first;
1456 if (EmitTruncationFromUnsignedToSigned)
1457 CheckKind = ScalarExprEmitter::ICCK_SignedIntegerTruncationOrSignChange;
1458
1459 llvm::Constant *StaticArgs[] = {
1461 EmitCheckTypeDescriptor(DstType),
1462 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
1463 llvm::ConstantInt::get(Builder.getInt32Ty(), Info.Size)};
1464
1465 EmitCheck(Check.second, CheckHandler, StaticArgs, {Src, Dst});
1466}
1467
1468Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1469 QualType DstType, llvm::Type *SrcTy,
1470 llvm::Type *DstTy,
1471 ScalarConversionOpts Opts) {
1472 // The Element types determine the type of cast to perform.
1473 llvm::Type *SrcElementTy;
1474 llvm::Type *DstElementTy;
1475 QualType SrcElementType;
1476 QualType DstElementType;
1477 if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1478 SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1479 DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1480 SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1481 DstElementType = DstType->castAs<MatrixType>()->getElementType();
1482 } else {
1483 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&
1484 "cannot cast between matrix and non-matrix types");
1485 SrcElementTy = SrcTy;
1486 DstElementTy = DstTy;
1487 SrcElementType = SrcType;
1488 DstElementType = DstType;
1489 }
1490
1491 if (isa<llvm::IntegerType>(SrcElementTy)) {
1492 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1493 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1494 InputSigned = true;
1495 }
1496
1497 if (isa<llvm::IntegerType>(DstElementTy))
1498 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1499 if (InputSigned)
1500 return Builder.CreateSIToFP(Src, DstTy, "conv");
1501 return Builder.CreateUIToFP(Src, DstTy, "conv");
1502 }
1503
1504 if (isa<llvm::IntegerType>(DstElementTy)) {
1505 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
1506 bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType();
1507
1508 // If we can't recognize overflow as undefined behavior, assume that
1509 // overflow saturates. This protects against normal optimizations if we are
1510 // compiling with non-standard FP semantics.
1511 if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) {
1512 llvm::Intrinsic::ID IID =
1513 IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat;
1514 return Builder.CreateCall(CGF.CGM.getIntrinsic(IID, {DstTy, SrcTy}), Src);
1515 }
1516
1517 if (IsSigned)
1518 return Builder.CreateFPToSI(Src, DstTy, "conv");
1519 return Builder.CreateFPToUI(Src, DstTy, "conv");
1520 }
1521
1522 if ((DstElementTy->is16bitFPTy() && SrcElementTy->is16bitFPTy())) {
1523 Value *FloatVal = Builder.CreateFPExt(Src, Builder.getFloatTy(), "fpext");
1524 return Builder.CreateFPTrunc(FloatVal, DstTy, "fptrunc");
1525 }
1526 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1527 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1528 return Builder.CreateFPExt(Src, DstTy, "conv");
1529}
1530
1531/// Emit a conversion from the specified type to the specified destination type,
1532/// both of which are LLVM scalar types.
1533Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1534 QualType DstType,
1535 SourceLocation Loc,
1536 ScalarConversionOpts Opts) {
1537 // All conversions involving fixed point types should be handled by the
1538 // EmitFixedPoint family functions. This is done to prevent bloating up this
1539 // function more, and although fixed point numbers are represented by
1540 // integers, we do not want to follow any logic that assumes they should be
1541 // treated as integers.
1542 // TODO(leonardchan): When necessary, add another if statement checking for
1543 // conversions to fixed point types from other types.
1544 if (SrcType->isFixedPointType()) {
1545 if (DstType->isBooleanType())
1546 // It is important that we check this before checking if the dest type is
1547 // an integer because booleans are technically integer types.
1548 // We do not need to check the padding bit on unsigned types if unsigned
1549 // padding is enabled because overflow into this bit is undefined
1550 // behavior.
1551 return Builder.CreateIsNotNull(Src, "tobool");
1552 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1553 DstType->isRealFloatingType())
1554 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1555
1556 llvm_unreachable(
1557 "Unhandled scalar conversion from a fixed point type to another type.");
1558 } else if (DstType->isFixedPointType()) {
1559 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1560 // This also includes converting booleans and enums to fixed point types.
1561 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1562
1563 llvm_unreachable(
1564 "Unhandled scalar conversion to a fixed point type from another type.");
1565 }
1566
1567 QualType NoncanonicalSrcType = SrcType;
1568 QualType NoncanonicalDstType = DstType;
1569
1570 SrcType = CGF.getContext().getCanonicalType(SrcType);
1571 DstType = CGF.getContext().getCanonicalType(DstType);
1572 if (SrcType == DstType) return Src;
1573
1574 if (DstType->isVoidType()) return nullptr;
1575
1576 llvm::Value *OrigSrc = Src;
1577 QualType OrigSrcType = SrcType;
1578 llvm::Type *SrcTy = Src->getType();
1579
1580 // Handle conversions to bool first, they are special: comparisons against 0.
1581 if (DstType->isBooleanType())
1582 return EmitConversionToBool(Src, SrcType);
1583
1584 llvm::Type *DstTy = ConvertType(DstType);
1585
1586 // Cast from half through float if half isn't a native type.
1587 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1588 // Cast to FP using the intrinsic if the half type itself isn't supported.
1589 if (DstTy->isFloatingPointTy()) {
1591 return Builder.CreateCall(
1592 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1593 Src);
1594 } else {
1595 // Cast to other types through float, using either the intrinsic or FPExt,
1596 // depending on whether the half type itself is supported
1597 // (as opposed to operations on half, available with NativeHalfType).
1599 Src = Builder.CreateCall(
1600 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1601 CGF.CGM.FloatTy),
1602 Src);
1603 } else {
1604 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1605 }
1606 SrcType = CGF.getContext().FloatTy;
1607 SrcTy = CGF.FloatTy;
1608 }
1609 }
1610
1611 // Ignore conversions like int -> uint.
1612 if (SrcTy == DstTy) {
1613 if (Opts.EmitImplicitIntegerSignChangeChecks)
1614 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1615 NoncanonicalDstType, Loc);
1616
1617 return Src;
1618 }
1619
1620 // Handle pointer conversions next: pointers can only be converted to/from
1621 // other pointers and integers. Check for pointer types in terms of LLVM, as
1622 // some native types (like Obj-C id) may map to a pointer type.
1623 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1624 // The source value may be an integer, or a pointer.
1625 if (isa<llvm::PointerType>(SrcTy))
1626 return Src;
1627
1628 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1629 // First, convert to the correct width so that we control the kind of
1630 // extension.
1631 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1632 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1633 llvm::Value* IntResult =
1634 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1635 // Then, cast to pointer.
1636 return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1637 }
1638
1639 if (isa<llvm::PointerType>(SrcTy)) {
1640 // Must be an ptr to int cast.
1641 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1642 return Builder.CreatePtrToInt(Src, DstTy, "conv");
1643 }
1644
1645 // A scalar can be splatted to an extended vector of the same element type
1646 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1647 // Sema should add casts to make sure that the source expression's type is
1648 // the same as the vector's element type (sans qualifiers)
1649 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1650 SrcType.getTypePtr() &&
1651 "Splatted expr doesn't match with vector element type?");
1652
1653 // Splat the element across to all elements
1654 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
1655 return Builder.CreateVectorSplat(NumElements, Src, "splat");
1656 }
1657
1658 if (SrcType->isMatrixType() && DstType->isMatrixType())
1659 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1660
1661 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1662 // Allow bitcast from vector to integer/fp of the same size.
1663 llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits();
1664 llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits();
1665 if (SrcSize == DstSize)
1666 return Builder.CreateBitCast(Src, DstTy, "conv");
1667
1668 // Conversions between vectors of different sizes are not allowed except
1669 // when vectors of half are involved. Operations on storage-only half
1670 // vectors require promoting half vector operands to float vectors and
1671 // truncating the result, which is either an int or float vector, to a
1672 // short or half vector.
1673
1674 // Source and destination are both expected to be vectors.
1675 llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1676 llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1677 (void)DstElementTy;
1678
1679 assert(((SrcElementTy->isIntegerTy() &&
1680 DstElementTy->isIntegerTy()) ||
1681 (SrcElementTy->isFloatingPointTy() &&
1682 DstElementTy->isFloatingPointTy())) &&
1683 "unexpected conversion between a floating-point vector and an "
1684 "integer vector");
1685
1686 // Truncate an i32 vector to an i16 vector.
1687 if (SrcElementTy->isIntegerTy())
1688 return Builder.CreateIntCast(Src, DstTy, false, "conv");
1689
1690 // Truncate a float vector to a half vector.
1691 if (SrcSize > DstSize)
1692 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1693
1694 // Promote a half vector to a float vector.
1695 return Builder.CreateFPExt(Src, DstTy, "conv");
1696 }
1697
1698 // Finally, we have the arithmetic types: real int/float.
1699 Value *Res = nullptr;
1700 llvm::Type *ResTy = DstTy;
1701
1702 // An overflowing conversion has undefined behavior if either the source type
1703 // or the destination type is a floating-point type. However, we consider the
1704 // range of representable values for all floating-point types to be
1705 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1706 // floating-point type.
1707 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1708 OrigSrcType->isFloatingType())
1709 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1710 Loc);
1711
1712 // Cast to half through float if half isn't a native type.
1713 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1714 // Make sure we cast in a single step if from another FP type.
1715 if (SrcTy->isFloatingPointTy()) {
1716 // Use the intrinsic if the half type itself isn't supported
1717 // (as opposed to operations on half, available with NativeHalfType).
1719 return Builder.CreateCall(
1720 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1721 // If the half type is supported, just use an fptrunc.
1722 return Builder.CreateFPTrunc(Src, DstTy);
1723 }
1724 DstTy = CGF.FloatTy;
1725 }
1726
1727 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1728
1729 if (DstTy != ResTy) {
1731 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
1732 Res = Builder.CreateCall(
1733 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1734 Res);
1735 } else {
1736 Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
1737 }
1738 }
1739
1740 if (Opts.EmitImplicitIntegerTruncationChecks)
1741 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1742 NoncanonicalDstType, Loc);
1743
1744 if (Opts.EmitImplicitIntegerSignChangeChecks)
1745 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1746 NoncanonicalDstType, Loc);
1747
1748 return Res;
1749}
1750
1751Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1752 QualType DstTy,
1753 SourceLocation Loc) {
1754 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1755 llvm::Value *Result;
1756 if (SrcTy->isRealFloatingType())
1757 Result = FPBuilder.CreateFloatingToFixed(Src,
1758 CGF.getContext().getFixedPointSemantics(DstTy));
1759 else if (DstTy->isRealFloatingType())
1760 Result = FPBuilder.CreateFixedToFloating(Src,
1762 ConvertType(DstTy));
1763 else {
1764 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy);
1765 auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy);
1766
1767 if (DstTy->isIntegerType())
1768 Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema,
1769 DstFPSema.getWidth(),
1770 DstFPSema.isSigned());
1771 else if (SrcTy->isIntegerType())
1772 Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(),
1773 DstFPSema);
1774 else
1775 Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema);
1776 }
1777 return Result;
1778}
1779
1780/// Emit a conversion from the specified complex type to the specified
1781/// destination type, where the destination type is an LLVM scalar type.
1782Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1783 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1784 SourceLocation Loc) {
1785 // Get the source element type.
1786 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1787
1788 // Handle conversions to bool first, they are special: comparisons against 0.
1789 if (DstTy->isBooleanType()) {
1790 // Complex != 0 -> (Real != 0) | (Imag != 0)
1791 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1792 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1793 return Builder.CreateOr(Src.first, Src.second, "tobool");
1794 }
1795
1796 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1797 // the imaginary part of the complex value is discarded and the value of the
1798 // real part is converted according to the conversion rules for the
1799 // corresponding real type.
1800 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1801}
1802
1803Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1804 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1805}
1806
1807/// Emit a sanitization check for the given "binary" operation (which
1808/// might actually be a unary increment which has been lowered to a binary
1809/// operation). The check passes if all values in \p Checks (which are \c i1),
1810/// are \c true.
1811void ScalarExprEmitter::EmitBinOpCheck(
1812 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
1813 const BinOpInfo &Info) {
1814 assert(CGF.IsSanitizerScope);
1815 SanitizerHandler Check;
1816 SmallVector<llvm::Constant *, 4> StaticData;
1817 SmallVector<llvm::Value *, 2> DynamicData;
1818 TrapReason TR;
1819
1820 BinaryOperatorKind Opcode = Info.Opcode;
1823
1824 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1825 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1826 if (UO && UO->getOpcode() == UO_Minus) {
1827 Check = SanitizerHandler::NegateOverflow;
1828 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1829 DynamicData.push_back(Info.RHS);
1830 } else {
1831 if (BinaryOperator::isShiftOp(Opcode)) {
1832 // Shift LHS negative or too large, or RHS out of bounds.
1833 Check = SanitizerHandler::ShiftOutOfBounds;
1834 const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1835 StaticData.push_back(
1836 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1837 StaticData.push_back(
1838 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1839 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1840 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1841 Check = SanitizerHandler::DivremOverflow;
1842 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1843 } else {
1844 // Arithmetic overflow (+, -, *).
1845 int ArithOverflowKind = 0;
1846 switch (Opcode) {
1847 case BO_Add: {
1848 Check = SanitizerHandler::AddOverflow;
1849 ArithOverflowKind = diag::UBSanArithKind::Add;
1850 break;
1851 }
1852 case BO_Sub: {
1853 Check = SanitizerHandler::SubOverflow;
1854 ArithOverflowKind = diag::UBSanArithKind::Sub;
1855 break;
1856 }
1857 case BO_Mul: {
1858 Check = SanitizerHandler::MulOverflow;
1859 ArithOverflowKind = diag::UBSanArithKind::Mul;
1860 break;
1861 }
1862 default:
1863 llvm_unreachable("unexpected opcode for bin op check");
1864 }
1865 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1867 SanitizerKind::UnsignedIntegerOverflow) ||
1869 SanitizerKind::SignedIntegerOverflow)) {
1870 // Only pay the cost for constructing the trap diagnostic if they are
1871 // going to be used.
1872 CGF.CGM.BuildTrapReason(diag::trap_ubsan_arith_overflow, TR)
1873 << Info.Ty->isSignedIntegerOrEnumerationType() << ArithOverflowKind
1874 << Info.E;
1875 }
1876 }
1877 DynamicData.push_back(Info.LHS);
1878 DynamicData.push_back(Info.RHS);
1879 }
1880
1881 CGF.EmitCheck(Checks, Check, StaticData, DynamicData, &TR);
1882}
1883
1884//===----------------------------------------------------------------------===//
1885// Visitor Methods
1886//===----------------------------------------------------------------------===//
1887
1888Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1889 CGF.ErrorUnsupported(E, "scalar expression");
1890 if (E->getType()->isVoidType())
1891 return nullptr;
1892 return llvm::PoisonValue::get(CGF.ConvertType(E->getType()));
1893}
1894
1895Value *
1896ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
1897 ASTContext &Context = CGF.getContext();
1898 unsigned AddrSpace =
1900 llvm::Constant *GlobalConstStr = Builder.CreateGlobalString(
1901 E->ComputeName(Context), "__usn_str", AddrSpace);
1902
1903 llvm::Type *ExprTy = ConvertType(E->getType());
1904 return Builder.CreatePointerBitCastOrAddrSpaceCast(GlobalConstStr, ExprTy,
1905 "usn_addr_cast");
1906}
1907
1908Value *ScalarExprEmitter::VisitEmbedExpr(EmbedExpr *E) {
1909 assert(E->getDataElementCount() == 1);
1910 auto It = E->begin();
1911 return Builder.getInt((*It)->getValue());
1912}
1913
1914Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1915 // Vector Mask Case
1916 if (E->getNumSubExprs() == 2) {
1917 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1918 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1919 Value *Mask;
1920
1921 auto *LTy = cast<llvm::FixedVectorType>(LHS->getType());
1922 unsigned LHSElts = LTy->getNumElements();
1923
1924 Mask = RHS;
1925
1926 auto *MTy = cast<llvm::FixedVectorType>(Mask->getType());
1927
1928 // Mask off the high bits of each shuffle index.
1929 Value *MaskBits =
1930 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1931 Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1932
1933 // newv = undef
1934 // mask = mask & maskbits
1935 // for each elt
1936 // n = extract mask i
1937 // x = extract val n
1938 // newv = insert newv, x, i
1939 auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
1940 MTy->getNumElements());
1941 Value* NewV = llvm::PoisonValue::get(RTy);
1942 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1943 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1944 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1945
1946 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1947 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1948 }
1949 return NewV;
1950 }
1951
1952 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1953 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1954
1955 SmallVector<int, 32> Indices;
1956 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1957 llvm::APSInt Idx = E->getShuffleMaskIdx(i - 2);
1958 // Check for -1 and output it as undef in the IR.
1959 if (Idx.isSigned() && Idx.isAllOnes())
1960 Indices.push_back(-1);
1961 else
1962 Indices.push_back(Idx.getZExtValue());
1963 }
1964
1965 return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle");
1966}
1967
1968Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1969 QualType SrcType = E->getSrcExpr()->getType(),
1970 DstType = E->getType();
1971
1972 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
1973
1974 SrcType = CGF.getContext().getCanonicalType(SrcType);
1975 DstType = CGF.getContext().getCanonicalType(DstType);
1976 if (SrcType == DstType) return Src;
1977
1978 assert(SrcType->isVectorType() &&
1979 "ConvertVector source type must be a vector");
1980 assert(DstType->isVectorType() &&
1981 "ConvertVector destination type must be a vector");
1982
1983 llvm::Type *SrcTy = Src->getType();
1984 llvm::Type *DstTy = ConvertType(DstType);
1985
1986 // Ignore conversions like int -> uint.
1987 if (SrcTy == DstTy)
1988 return Src;
1989
1990 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
1991 DstEltType = DstType->castAs<VectorType>()->getElementType();
1992
1993 assert(SrcTy->isVectorTy() &&
1994 "ConvertVector source IR type must be a vector");
1995 assert(DstTy->isVectorTy() &&
1996 "ConvertVector destination IR type must be a vector");
1997
1998 llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(),
1999 *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType();
2000
2001 if (DstEltType->isBooleanType()) {
2002 assert((SrcEltTy->isFloatingPointTy() ||
2003 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
2004
2005 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
2006 if (SrcEltTy->isFloatingPointTy()) {
2007 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2008 return Builder.CreateFCmpUNE(Src, Zero, "tobool");
2009 } else {
2010 return Builder.CreateICmpNE(Src, Zero, "tobool");
2011 }
2012 }
2013
2014 // We have the arithmetic types: real int/float.
2015 Value *Res = nullptr;
2016
2017 if (isa<llvm::IntegerType>(SrcEltTy)) {
2018 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
2019 if (isa<llvm::IntegerType>(DstEltTy))
2020 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
2021 else {
2022 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2023 if (InputSigned)
2024 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
2025 else
2026 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
2027 }
2028 } else if (isa<llvm::IntegerType>(DstEltTy)) {
2029 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
2030 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2031 if (DstEltType->isSignedIntegerOrEnumerationType())
2032 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
2033 else
2034 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
2035 } else {
2036 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
2037 "Unknown real conversion");
2038 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2039 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
2040 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
2041 else
2042 Res = Builder.CreateFPExt(Src, DstTy, "conv");
2043 }
2044
2045 return Res;
2046}
2047
2048Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
2049 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
2050 CGF.EmitIgnoredExpr(E->getBase());
2051 return CGF.emitScalarConstant(Constant, E);
2052 } else {
2053 Expr::EvalResult Result;
2055 llvm::APSInt Value = Result.Val.getInt();
2056 CGF.EmitIgnoredExpr(E->getBase());
2057 return Builder.getInt(Value);
2058 }
2059 }
2060
2061 llvm::Value *Result = EmitLoadOfLValue(E);
2062
2063 // If -fdebug-info-for-profiling is specified, emit a pseudo variable and its
2064 // debug info for the pointer, even if there is no variable associated with
2065 // the pointer's expression.
2066 if (CGF.CGM.getCodeGenOpts().DebugInfoForProfiling && CGF.getDebugInfo()) {
2067 if (llvm::LoadInst *Load = dyn_cast<llvm::LoadInst>(Result)) {
2068 if (llvm::GetElementPtrInst *GEP =
2069 dyn_cast<llvm::GetElementPtrInst>(Load->getPointerOperand())) {
2070 if (llvm::Instruction *Pointer =
2071 dyn_cast<llvm::Instruction>(GEP->getPointerOperand())) {
2072 QualType Ty = E->getBase()->getType();
2073 if (!E->isArrow())
2074 Ty = CGF.getContext().getPointerType(Ty);
2075 CGF.getDebugInfo()->EmitPseudoVariable(Builder, Pointer, Ty);
2076 }
2077 }
2078 }
2079 }
2080 return Result;
2081}
2082
2083Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
2084 TestAndClearIgnoreResultAssign();
2085
2086 // Emit subscript expressions in rvalue context's. For most cases, this just
2087 // loads the lvalue formed by the subscript expr. However, we have to be
2088 // careful, because the base of a vector subscript is occasionally an rvalue,
2089 // so we can't get it as an lvalue.
2090 if (!E->getBase()->getType()->isVectorType() &&
2092 return EmitLoadOfLValue(E);
2093
2094 // Handle the vector case. The base must be a vector, the index must be an
2095 // integer value.
2096 Value *Base = Visit(E->getBase());
2097 Value *Idx = Visit(E->getIdx());
2098 QualType IdxTy = E->getIdx()->getType();
2099
2100 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
2101 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
2102
2103 return Builder.CreateExtractElement(Base, Idx, "vecext");
2104}
2105
2106Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
2107 TestAndClearIgnoreResultAssign();
2108
2109 // Handle the vector case. The base must be a vector, the index must be an
2110 // integer value.
2111 Value *RowIdx = CGF.EmitMatrixIndexExpr(E->getRowIdx());
2112 Value *ColumnIdx = CGF.EmitMatrixIndexExpr(E->getColumnIdx());
2113
2114 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
2115 unsigned NumRows = MatrixTy->getNumRows();
2116 llvm::MatrixBuilder MB(Builder);
2117 Value *Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows);
2118 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
2119 MB.CreateIndexAssumption(Idx, MatrixTy->getNumElementsFlattened());
2120
2121 Value *Matrix = Visit(E->getBase());
2122
2123 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
2124 return Builder.CreateExtractElement(Matrix, Idx, "matrixext");
2125}
2126
2127static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
2128 unsigned Off) {
2129 int MV = SVI->getMaskValue(Idx);
2130 if (MV == -1)
2131 return -1;
2132 return Off + MV;
2133}
2134
2135static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
2136 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
2137 "Index operand too large for shufflevector mask!");
2138 return C->getZExtValue();
2139}
2140
2141Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
2142 bool Ignore = TestAndClearIgnoreResultAssign();
2143 (void)Ignore;
2144 unsigned NumInitElements = E->getNumInits();
2145 assert((Ignore == false ||
2146 (NumInitElements == 0 && E->getType()->isVoidType())) &&
2147 "init list ignored");
2148
2149 // HLSL initialization lists in the AST are an expansion which can contain
2150 // side-effecting expressions wrapped in opaque value expressions. To properly
2151 // emit these we need to emit the opaque values before we emit the argument
2152 // expressions themselves. This is a little hacky, but it prevents us needing
2153 // to do a bigger AST-level change for a language feature that we need
2154 // deprecate in the near future. See related HLSL language proposals in the
2155 // proposals (https://github.com/microsoft/hlsl-specs/blob/main/proposals):
2156 // * 0005-strict-initializer-lists.md
2157 // * 0032-constructors.md
2158 if (CGF.getLangOpts().HLSL)
2160
2161 if (E->hadArrayRangeDesignator())
2162 CGF.ErrorUnsupported(E, "GNU array range designator extension");
2163
2164 llvm::VectorType *VType =
2165 dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
2166
2167 if (!VType) {
2168 if (NumInitElements == 0) {
2169 // C++11 value-initialization for the scalar.
2170 return EmitNullValue(E->getType());
2171 }
2172 // We have a scalar in braces. Just use the first element.
2173 return Visit(E->getInit(0));
2174 }
2175
2176 if (isa<llvm::ScalableVectorType>(VType)) {
2177 if (NumInitElements == 0) {
2178 // C++11 value-initialization for the vector.
2179 return EmitNullValue(E->getType());
2180 }
2181
2182 if (NumInitElements == 1) {
2183 Expr *InitVector = E->getInit(0);
2184
2185 // Initialize from another scalable vector of the same type.
2186 if (InitVector->getType().getCanonicalType() ==
2188 return Visit(InitVector);
2189 }
2190
2191 llvm_unreachable("Unexpected initialization of a scalable vector!");
2192 }
2193
2194 unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
2195
2196 // Loop over initializers collecting the Value for each, and remembering
2197 // whether the source was swizzle (ExtVectorElementExpr). This will allow
2198 // us to fold the shuffle for the swizzle into the shuffle for the vector
2199 // initializer, since LLVM optimizers generally do not want to touch
2200 // shuffles.
2201 unsigned CurIdx = 0;
2202 bool VIsPoisonShuffle = false;
2203 llvm::Value *V = llvm::PoisonValue::get(VType);
2204 for (unsigned i = 0; i != NumInitElements; ++i) {
2205 Expr *IE = E->getInit(i);
2206 Value *Init = Visit(IE);
2207 SmallVector<int, 16> Args;
2208
2209 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
2210
2211 // Handle scalar elements. If the scalar initializer is actually one
2212 // element of a different vector of the same width, use shuffle instead of
2213 // extract+insert.
2214 if (!VVT) {
2215 if (isa<ExtVectorElementExpr>(IE)) {
2216 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
2217
2218 if (cast<llvm::FixedVectorType>(EI->getVectorOperandType())
2219 ->getNumElements() == ResElts) {
2220 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
2221 Value *LHS = nullptr, *RHS = nullptr;
2222 if (CurIdx == 0) {
2223 // insert into poison -> shuffle (src, poison)
2224 // shufflemask must use an i32
2225 Args.push_back(getAsInt32(C, CGF.Int32Ty));
2226 Args.resize(ResElts, -1);
2227
2228 LHS = EI->getVectorOperand();
2229 RHS = V;
2230 VIsPoisonShuffle = true;
2231 } else if (VIsPoisonShuffle) {
2232 // insert into poison shuffle && size match -> shuffle (v, src)
2233 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
2234 for (unsigned j = 0; j != CurIdx; ++j)
2235 Args.push_back(getMaskElt(SVV, j, 0));
2236 Args.push_back(ResElts + C->getZExtValue());
2237 Args.resize(ResElts, -1);
2238
2239 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
2240 RHS = EI->getVectorOperand();
2241 VIsPoisonShuffle = false;
2242 }
2243 if (!Args.empty()) {
2244 V = Builder.CreateShuffleVector(LHS, RHS, Args);
2245 ++CurIdx;
2246 continue;
2247 }
2248 }
2249 }
2250 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
2251 "vecinit");
2252 VIsPoisonShuffle = false;
2253 ++CurIdx;
2254 continue;
2255 }
2256
2257 unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements();
2258
2259 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
2260 // input is the same width as the vector being constructed, generate an
2261 // optimized shuffle of the swizzle input into the result.
2262 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
2263 if (isa<ExtVectorElementExpr>(IE)) {
2264 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
2265 Value *SVOp = SVI->getOperand(0);
2266 auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType());
2267
2268 if (OpTy->getNumElements() == ResElts) {
2269 for (unsigned j = 0; j != CurIdx; ++j) {
2270 // If the current vector initializer is a shuffle with poison, merge
2271 // this shuffle directly into it.
2272 if (VIsPoisonShuffle) {
2273 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));
2274 } else {
2275 Args.push_back(j);
2276 }
2277 }
2278 for (unsigned j = 0, je = InitElts; j != je; ++j)
2279 Args.push_back(getMaskElt(SVI, j, Offset));
2280 Args.resize(ResElts, -1);
2281
2282 if (VIsPoisonShuffle)
2283 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
2284
2285 Init = SVOp;
2286 }
2287 }
2288
2289 // Extend init to result vector length, and then shuffle its contribution
2290 // to the vector initializer into V.
2291 if (Args.empty()) {
2292 for (unsigned j = 0; j != InitElts; ++j)
2293 Args.push_back(j);
2294 Args.resize(ResElts, -1);
2295 Init = Builder.CreateShuffleVector(Init, Args, "vext");
2296
2297 Args.clear();
2298 for (unsigned j = 0; j != CurIdx; ++j)
2299 Args.push_back(j);
2300 for (unsigned j = 0; j != InitElts; ++j)
2301 Args.push_back(j + Offset);
2302 Args.resize(ResElts, -1);
2303 }
2304
2305 // If V is poison, make sure it ends up on the RHS of the shuffle to aid
2306 // merging subsequent shuffles into this one.
2307 if (CurIdx == 0)
2308 std::swap(V, Init);
2309 V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");
2310 VIsPoisonShuffle = isa<llvm::PoisonValue>(Init);
2311 CurIdx += InitElts;
2312 }
2313
2314 // FIXME: evaluate codegen vs. shuffling against constant null vector.
2315 // Emit remaining default initializers.
2316 llvm::Type *EltTy = VType->getElementType();
2317
2318 // Emit remaining default initializers
2319 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
2320 Value *Idx = Builder.getInt32(CurIdx);
2321 llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
2322 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
2323 }
2324 return V;
2325}
2326
2328 return !D->isWeak();
2329}
2330
2331static bool isLValueKnownNonNull(CodeGenFunction &CGF, const Expr *E) {
2332 E = E->IgnoreParens();
2333
2334 if (const auto *UO = dyn_cast<UnaryOperator>(E))
2335 if (UO->getOpcode() == UO_Deref)
2336 return CGF.isPointerKnownNonNull(UO->getSubExpr());
2337
2338 if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
2339 return isDeclRefKnownNonNull(CGF, DRE->getDecl());
2340
2341 if (const auto *ME = dyn_cast<MemberExpr>(E)) {
2342 if (isa<FieldDecl>(ME->getMemberDecl()))
2343 return true;
2344 return isDeclRefKnownNonNull(CGF, ME->getMemberDecl());
2345 }
2346
2347 // Array subscripts? Anything else?
2348
2349 return false;
2350}
2351
2353 assert(E->getType()->isSignableType(getContext()));
2354
2355 E = E->IgnoreParens();
2356
2357 if (isa<CXXThisExpr>(E))
2358 return true;
2359
2360 if (const auto *UO = dyn_cast<UnaryOperator>(E))
2361 if (UO->getOpcode() == UO_AddrOf)
2362 return isLValueKnownNonNull(*this, UO->getSubExpr());
2363
2364 if (const auto *CE = dyn_cast<CastExpr>(E))
2365 if (CE->getCastKind() == CK_FunctionToPointerDecay ||
2366 CE->getCastKind() == CK_ArrayToPointerDecay)
2367 return isLValueKnownNonNull(*this, CE->getSubExpr());
2368
2369 // Maybe honor __nonnull?
2370
2371 return false;
2372}
2373
2375 const Expr *E = CE->getSubExpr();
2376
2377 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
2378 return false;
2379
2380 if (isa<CXXThisExpr>(E->IgnoreParens())) {
2381 // We always assume that 'this' is never null.
2382 return false;
2383 }
2384
2385 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2386 // And that glvalue casts are never null.
2387 if (ICE->isGLValue())
2388 return false;
2389 }
2390
2391 return true;
2392}
2393
2394// RHS is an aggregate type
2396 QualType RHSTy, QualType LHSTy,
2397 SourceLocation Loc) {
2399 SmallVector<QualType, 16> SrcTypes; // Flattened type
2400 CGF.FlattenAccessAndType(RHSVal, RHSTy, LoadGEPList, SrcTypes);
2401 // LHS is either a vector or a builtin?
2402 // if its a vector create a temp alloca to store into and return that
2403 if (auto *VecTy = LHSTy->getAs<VectorType>()) {
2404 assert(SrcTypes.size() >= VecTy->getNumElements() &&
2405 "Flattened type on RHS must have more elements than vector on LHS.");
2406 llvm::Value *V =
2407 CGF.Builder.CreateLoad(CGF.CreateIRTemp(LHSTy, "flatcast.tmp"));
2408 // write to V.
2409 for (unsigned I = 0, E = VecTy->getNumElements(); I < E; I++) {
2410 llvm::Value *Load = CGF.Builder.CreateLoad(LoadGEPList[I].first, "load");
2411 llvm::Value *Idx = LoadGEPList[I].second;
2412 Load = Idx ? CGF.Builder.CreateExtractElement(Load, Idx, "vec.extract")
2413 : Load;
2414 llvm::Value *Cast = CGF.EmitScalarConversion(
2415 Load, SrcTypes[I], VecTy->getElementType(), Loc);
2416 V = CGF.Builder.CreateInsertElement(V, Cast, I);
2417 }
2418 return V;
2419 }
2420 // i its a builtin just do an extract element or load.
2421 assert(LHSTy->isBuiltinType() &&
2422 "Destination type must be a vector or builtin type.");
2423 llvm::Value *Load = CGF.Builder.CreateLoad(LoadGEPList[0].first, "load");
2424 llvm::Value *Idx = LoadGEPList[0].second;
2425 Load =
2426 Idx ? CGF.Builder.CreateExtractElement(Load, Idx, "vec.extract") : Load;
2427 return CGF.EmitScalarConversion(Load, LHSTy, SrcTypes[0], Loc);
2428}
2429
2430// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
2431// have to handle a more broad range of conversions than explicit casts, as they
2432// handle things like function to ptr-to-function decay etc.
2433Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
2434 Expr *E = CE->getSubExpr();
2435 QualType DestTy = CE->getType();
2436 CastKind Kind = CE->getCastKind();
2437 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, CE);
2438
2439 // These cases are generally not written to ignore the result of
2440 // evaluating their sub-expressions, so we clear this now.
2441 bool Ignored = TestAndClearIgnoreResultAssign();
2442
2443 // Since almost all cast kinds apply to scalars, this switch doesn't have
2444 // a default case, so the compiler will warn on a missing case. The cases
2445 // are in the same order as in the CastKind enum.
2446 switch (Kind) {
2447 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
2448 case CK_BuiltinFnToFnPtr:
2449 llvm_unreachable("builtin functions are handled elsewhere");
2450
2451 case CK_LValueBitCast:
2452 case CK_ObjCObjectLValueCast: {
2453 Address Addr = EmitLValue(E).getAddress();
2454 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2455 LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
2456 return EmitLoadOfLValue(LV, CE->getExprLoc());
2457 }
2458
2459 case CK_LValueToRValueBitCast: {
2460 LValue SourceLVal = CGF.EmitLValue(E);
2461 Address Addr =
2462 SourceLVal.getAddress().withElementType(CGF.ConvertTypeForMem(DestTy));
2463 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2464 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2465 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2466 }
2467
2468 case CK_CPointerToObjCPointerCast:
2469 case CK_BlockPointerToObjCPointerCast:
2470 case CK_AnyPointerToBlockPointerCast:
2471 case CK_BitCast: {
2472 Value *Src = Visit(E);
2473 llvm::Type *SrcTy = Src->getType();
2474 llvm::Type *DstTy = ConvertType(DestTy);
2475
2476 // FIXME: this is a gross but seemingly necessary workaround for an issue
2477 // manifesting when a target uses a non-default AS for indirect sret args,
2478 // but the source HLL is generic, wherein a valid C-cast or reinterpret_cast
2479 // on the address of a local struct that gets returned by value yields an
2480 // invalid bitcast from the a pointer to the IndirectAS to a pointer to the
2481 // DefaultAS. We can only do this subversive thing because sret args are
2482 // manufactured and them residing in the IndirectAS is a target specific
2483 // detail, and doing an AS cast here still retains the semantics the user
2484 // expects. It is desirable to remove this iff a better solution is found.
2485 if (auto A = dyn_cast<llvm::Argument>(Src); A && A->hasStructRetAttr())
2487 CGF, Src, E->getType().getAddressSpace(), DstTy);
2488
2489 assert(
2490 (!SrcTy->isPtrOrPtrVectorTy() || !DstTy->isPtrOrPtrVectorTy() ||
2491 SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace()) &&
2492 "Address-space cast must be used to convert address spaces");
2493
2494 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
2495 if (auto *PT = DestTy->getAs<PointerType>()) {
2497 PT->getPointeeType(),
2498 Address(Src,
2500 E->getType()->castAs<PointerType>()->getPointeeType()),
2501 CGF.getPointerAlign()),
2502 /*MayBeNull=*/true, CodeGenFunction::CFITCK_UnrelatedCast,
2503 CE->getBeginLoc());
2504 }
2505 }
2506
2507 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2508 const QualType SrcType = E->getType();
2509
2510 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2511 // Casting to pointer that could carry dynamic information (provided by
2512 // invariant.group) requires launder.
2513 Src = Builder.CreateLaunderInvariantGroup(Src);
2514 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2515 // Casting to pointer that does not carry dynamic information (provided
2516 // by invariant.group) requires stripping it. Note that we don't do it
2517 // if the source could not be dynamic type and destination could be
2518 // dynamic because dynamic information is already laundered. It is
2519 // because launder(strip(src)) == launder(src), so there is no need to
2520 // add extra strip before launder.
2521 Src = Builder.CreateStripInvariantGroup(Src);
2522 }
2523 }
2524
2525 // Update heapallocsite metadata when there is an explicit pointer cast.
2526 if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
2527 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE) &&
2528 !isa<CastExpr>(E)) {
2529 QualType PointeeType = DestTy->getPointeeType();
2530 if (!PointeeType.isNull())
2531 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
2532 CE->getExprLoc());
2533 }
2534 }
2535
2536 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2537 // same element type, use the llvm.vector.insert intrinsic to perform the
2538 // bitcast.
2539 if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2540 if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2541 // If we are casting a fixed i8 vector to a scalable i1 predicate
2542 // vector, use a vector insert and bitcast the result.
2543 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
2544 FixedSrcTy->getElementType()->isIntegerTy(8)) {
2545 ScalableDstTy = llvm::ScalableVectorType::get(
2546 FixedSrcTy->getElementType(),
2547 llvm::divideCeil(
2548 ScalableDstTy->getElementCount().getKnownMinValue(), 8));
2549 }
2550 if (FixedSrcTy->getElementType() == ScalableDstTy->getElementType()) {
2551 llvm::Value *PoisonVec = llvm::PoisonValue::get(ScalableDstTy);
2552 llvm::Value *Result = Builder.CreateInsertVector(
2553 ScalableDstTy, PoisonVec, Src, uint64_t(0), "cast.scalable");
2554 ScalableDstTy = cast<llvm::ScalableVectorType>(
2555 llvm::VectorType::getWithSizeAndScalar(ScalableDstTy, DstTy));
2556 if (Result->getType() != ScalableDstTy)
2557 Result = Builder.CreateBitCast(Result, ScalableDstTy);
2558 if (Result->getType() != DstTy)
2559 Result = Builder.CreateExtractVector(DstTy, Result, uint64_t(0));
2560 return Result;
2561 }
2562 }
2563 }
2564
2565 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2566 // same element type, use the llvm.vector.extract intrinsic to perform the
2567 // bitcast.
2568 if (auto *ScalableSrcTy = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
2569 if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2570 // If we are casting a scalable i1 predicate vector to a fixed i8
2571 // vector, bitcast the source and use a vector extract.
2572 if (ScalableSrcTy->getElementType()->isIntegerTy(1) &&
2573 FixedDstTy->getElementType()->isIntegerTy(8)) {
2574 if (!ScalableSrcTy->getElementCount().isKnownMultipleOf(8)) {
2575 ScalableSrcTy = llvm::ScalableVectorType::get(
2576 ScalableSrcTy->getElementType(),
2577 llvm::alignTo<8>(
2578 ScalableSrcTy->getElementCount().getKnownMinValue()));
2579 llvm::Value *ZeroVec = llvm::Constant::getNullValue(ScalableSrcTy);
2580 Src = Builder.CreateInsertVector(ScalableSrcTy, ZeroVec, Src,
2581 uint64_t(0));
2582 }
2583
2584 ScalableSrcTy = llvm::ScalableVectorType::get(
2585 FixedDstTy->getElementType(),
2586 ScalableSrcTy->getElementCount().getKnownMinValue() / 8);
2587 Src = Builder.CreateBitCast(Src, ScalableSrcTy);
2588 }
2589 if (ScalableSrcTy->getElementType() == FixedDstTy->getElementType())
2590 return Builder.CreateExtractVector(DstTy, Src, uint64_t(0),
2591 "cast.fixed");
2592 }
2593 }
2594
2595 // Perform VLAT <-> VLST bitcast through memory.
2596 // TODO: since the llvm.vector.{insert,extract} intrinsics
2597 // require the element types of the vectors to be the same, we
2598 // need to keep this around for bitcasts between VLAT <-> VLST where
2599 // the element types of the vectors are not the same, until we figure
2600 // out a better way of doing these casts.
2601 if ((isa<llvm::FixedVectorType>(SrcTy) &&
2605 Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value");
2606 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
2607 CGF.EmitStoreOfScalar(Src, LV);
2608 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2609 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2610 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2611 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2612 }
2613
2614 llvm::Value *Result = Builder.CreateBitCast(Src, DstTy);
2615 return CGF.authPointerToPointerCast(Result, E->getType(), DestTy);
2616 }
2617 case CK_AddressSpaceConversion: {
2618 Expr::EvalResult Result;
2619 if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2620 Result.Val.isNullPointer()) {
2621 // If E has side effect, it is emitted even if its final result is a
2622 // null pointer. In that case, a DCE pass should be able to
2623 // eliminate the useless instructions emitted during translating E.
2624 if (Result.HasSideEffects)
2625 Visit(E);
2627 ConvertType(DestTy)), DestTy);
2628 }
2629 // Since target may map different address spaces in AST to the same address
2630 // space, an address space conversion may end up as a bitcast.
2632 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
2633 ConvertType(DestTy));
2634 }
2635 case CK_AtomicToNonAtomic:
2636 case CK_NonAtomicToAtomic:
2637 case CK_UserDefinedConversion:
2638 return Visit(E);
2639
2640 case CK_NoOp: {
2641 return CE->changesVolatileQualification() ? EmitLoadOfLValue(CE) : Visit(E);
2642 }
2643
2644 case CK_BaseToDerived: {
2645 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2646 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2647
2648 Address Base = CGF.EmitPointerWithAlignment(E);
2649 Address Derived =
2650 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2651 CE->path_begin(), CE->path_end(),
2653
2654 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2655 // performed and the object is not of the derived type.
2656 if (CGF.sanitizePerformTypeCheck())
2658 Derived, DestTy->getPointeeType());
2659
2660 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2661 CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived,
2662 /*MayBeNull=*/true,
2664 CE->getBeginLoc());
2665
2666 return CGF.getAsNaturalPointerTo(Derived, CE->getType()->getPointeeType());
2667 }
2668 case CK_UncheckedDerivedToBase:
2669 case CK_DerivedToBase: {
2670 // The EmitPointerWithAlignment path does this fine; just discard
2671 // the alignment.
2673 CE->getType()->getPointeeType());
2674 }
2675
2676 case CK_Dynamic: {
2677 Address V = CGF.EmitPointerWithAlignment(E);
2678 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2679 return CGF.EmitDynamicCast(V, DCE);
2680 }
2681
2682 case CK_ArrayToPointerDecay:
2684 CE->getType()->getPointeeType());
2685 case CK_FunctionToPointerDecay:
2686 return EmitLValue(E).getPointer(CGF);
2687
2688 case CK_NullToPointer:
2689 if (MustVisitNullValue(E))
2690 CGF.EmitIgnoredExpr(E);
2691
2692 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2693 DestTy);
2694
2695 case CK_NullToMemberPointer: {
2696 if (MustVisitNullValue(E))
2697 CGF.EmitIgnoredExpr(E);
2698
2699 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2700 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2701 }
2702
2703 case CK_ReinterpretMemberPointer:
2704 case CK_BaseToDerivedMemberPointer:
2705 case CK_DerivedToBaseMemberPointer: {
2706 Value *Src = Visit(E);
2707
2708 // Note that the AST doesn't distinguish between checked and
2709 // unchecked member pointer conversions, so we always have to
2710 // implement checked conversions here. This is inefficient when
2711 // actual control flow may be required in order to perform the
2712 // check, which it is for data member pointers (but not member
2713 // function pointers on Itanium and ARM).
2714 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2715 }
2716
2717 case CK_ARCProduceObject:
2718 return CGF.EmitARCRetainScalarExpr(E);
2719 case CK_ARCConsumeObject:
2720 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2721 case CK_ARCReclaimReturnedObject:
2722 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2723 case CK_ARCExtendBlockObject:
2724 return CGF.EmitARCExtendBlockObject(E);
2725
2726 case CK_CopyAndAutoreleaseBlockObject:
2727 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2728
2729 case CK_FloatingRealToComplex:
2730 case CK_FloatingComplexCast:
2731 case CK_IntegralRealToComplex:
2732 case CK_IntegralComplexCast:
2733 case CK_IntegralComplexToFloatingComplex:
2734 case CK_FloatingComplexToIntegralComplex:
2735 case CK_ConstructorConversion:
2736 case CK_ToUnion:
2737 case CK_HLSLArrayRValue:
2738 llvm_unreachable("scalar cast to non-scalar value");
2739
2740 case CK_LValueToRValue:
2741 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2742 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2743 return Visit(E);
2744
2745 case CK_IntegralToPointer: {
2746 Value *Src = Visit(E);
2747
2748 // First, convert to the correct width so that we control the kind of
2749 // extension.
2750 auto DestLLVMTy = ConvertType(DestTy);
2751 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2752 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2753 llvm::Value* IntResult =
2754 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2755
2756 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2757
2758 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2759 // Going from integer to pointer that could be dynamic requires reloading
2760 // dynamic information from invariant.group.
2761 if (DestTy.mayBeDynamicClass())
2762 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2763 }
2764
2765 IntToPtr = CGF.authPointerToPointerCast(IntToPtr, E->getType(), DestTy);
2766 return IntToPtr;
2767 }
2768 case CK_PointerToIntegral: {
2769 assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2770 auto *PtrExpr = Visit(E);
2771
2772 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2773 const QualType SrcType = E->getType();
2774
2775 // Casting to integer requires stripping dynamic information as it does
2776 // not carries it.
2777 if (SrcType.mayBeDynamicClass())
2778 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2779 }
2780
2781 PtrExpr = CGF.authPointerToPointerCast(PtrExpr, E->getType(), DestTy);
2782 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2783 }
2784 case CK_ToVoid: {
2785 CGF.EmitIgnoredExpr(E);
2786 return nullptr;
2787 }
2788 case CK_MatrixCast: {
2789 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2790 CE->getExprLoc());
2791 }
2792 // CK_HLSLAggregateSplatCast only handles splatting to vectors from a vec1
2793 // Casts were inserted in Sema to Cast the Src Expr to a Scalar and
2794 // To perform any necessary Scalar Cast, so this Cast can be handled
2795 // by the regular Vector Splat cast code.
2796 case CK_HLSLAggregateSplatCast:
2797 case CK_VectorSplat: {
2798 llvm::Type *DstTy = ConvertType(DestTy);
2799 Value *Elt = Visit(E);
2800 // Splat the element across to all elements
2801 llvm::ElementCount NumElements =
2802 cast<llvm::VectorType>(DstTy)->getElementCount();
2803 return Builder.CreateVectorSplat(NumElements, Elt, "splat");
2804 }
2805
2806 case CK_FixedPointCast:
2807 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2808 CE->getExprLoc());
2809
2810 case CK_FixedPointToBoolean:
2811 assert(E->getType()->isFixedPointType() &&
2812 "Expected src type to be fixed point type");
2813 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
2814 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2815 CE->getExprLoc());
2816
2817 case CK_FixedPointToIntegral:
2818 assert(E->getType()->isFixedPointType() &&
2819 "Expected src type to be fixed point type");
2820 assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
2821 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2822 CE->getExprLoc());
2823
2824 case CK_IntegralToFixedPoint:
2825 assert(E->getType()->isIntegerType() &&
2826 "Expected src type to be an integer");
2827 assert(DestTy->isFixedPointType() &&
2828 "Expected dest type to be fixed point type");
2829 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2830 CE->getExprLoc());
2831
2832 case CK_IntegralCast: {
2833 if (E->getType()->isExtVectorType() && DestTy->isExtVectorType()) {
2834 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2835 return Builder.CreateIntCast(Visit(E), ConvertType(DestTy),
2837 "conv");
2838 }
2839 ScalarConversionOpts Opts;
2840 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2841 if (!ICE->isPartOfExplicitCast())
2842 Opts = ScalarConversionOpts(CGF.SanOpts);
2843 }
2844 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2845 CE->getExprLoc(), Opts);
2846 }
2847 case CK_IntegralToFloating: {
2848 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2849 // TODO: Support constrained FP intrinsics.
2850 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2851 if (SrcElTy->isSignedIntegerOrEnumerationType())
2852 return Builder.CreateSIToFP(Visit(E), ConvertType(DestTy), "conv");
2853 return Builder.CreateUIToFP(Visit(E), ConvertType(DestTy), "conv");
2854 }
2855 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2856 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2857 CE->getExprLoc());
2858 }
2859 case CK_FloatingToIntegral: {
2860 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2861 // TODO: Support constrained FP intrinsics.
2862 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2863 if (DstElTy->isSignedIntegerOrEnumerationType())
2864 return Builder.CreateFPToSI(Visit(E), ConvertType(DestTy), "conv");
2865 return Builder.CreateFPToUI(Visit(E), ConvertType(DestTy), "conv");
2866 }
2867 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2868 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2869 CE->getExprLoc());
2870 }
2871 case CK_FloatingCast: {
2872 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2873 // TODO: Support constrained FP intrinsics.
2874 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2875 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2876 if (DstElTy->castAs<BuiltinType>()->getKind() <
2877 SrcElTy->castAs<BuiltinType>()->getKind())
2878 return Builder.CreateFPTrunc(Visit(E), ConvertType(DestTy), "conv");
2879 return Builder.CreateFPExt(Visit(E), ConvertType(DestTy), "conv");
2880 }
2881 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2882 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2883 CE->getExprLoc());
2884 }
2885 case CK_FixedPointToFloating:
2886 case CK_FloatingToFixedPoint: {
2887 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2888 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2889 CE->getExprLoc());
2890 }
2891 case CK_BooleanToSignedIntegral: {
2892 ScalarConversionOpts Opts;
2893 Opts.TreatBooleanAsSigned = true;
2894 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2895 CE->getExprLoc(), Opts);
2896 }
2897 case CK_IntegralToBoolean:
2898 return EmitIntToBoolConversion(Visit(E));
2899 case CK_PointerToBoolean:
2900 return EmitPointerToBoolConversion(Visit(E), E->getType());
2901 case CK_FloatingToBoolean: {
2902 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2903 return EmitFloatToBoolConversion(Visit(E));
2904 }
2905 case CK_MemberPointerToBoolean: {
2906 llvm::Value *MemPtr = Visit(E);
2907 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2908 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2909 }
2910
2911 case CK_FloatingComplexToReal:
2912 case CK_IntegralComplexToReal:
2913 return CGF.EmitComplexExpr(E, false, true).first;
2914
2915 case CK_FloatingComplexToBoolean:
2916 case CK_IntegralComplexToBoolean: {
2918
2919 // TODO: kill this function off, inline appropriate case here
2920 return EmitComplexToScalarConversion(V, E->getType(), DestTy,
2921 CE->getExprLoc());
2922 }
2923
2924 case CK_ZeroToOCLOpaqueType: {
2925 assert((DestTy->isEventT() || DestTy->isQueueT() ||
2926 DestTy->isOCLIntelSubgroupAVCType()) &&
2927 "CK_ZeroToOCLEvent cast on non-event type");
2928 return llvm::Constant::getNullValue(ConvertType(DestTy));
2929 }
2930
2931 case CK_IntToOCLSampler:
2932 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2933
2934 case CK_HLSLVectorTruncation: {
2935 assert((DestTy->isVectorType() || DestTy->isBuiltinType()) &&
2936 "Destination type must be a vector or builtin type.");
2937 Value *Vec = Visit(E);
2938 if (auto *VecTy = DestTy->getAs<VectorType>()) {
2939 SmallVector<int> Mask;
2940 unsigned NumElts = VecTy->getNumElements();
2941 for (unsigned I = 0; I != NumElts; ++I)
2942 Mask.push_back(I);
2943
2944 return Builder.CreateShuffleVector(Vec, Mask, "trunc");
2945 }
2946 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.SizeTy);
2947 return Builder.CreateExtractElement(Vec, Zero, "cast.vtrunc");
2948 }
2949 case CK_HLSLElementwiseCast: {
2950 RValue RV = CGF.EmitAnyExpr(E);
2951 SourceLocation Loc = CE->getExprLoc();
2952 QualType SrcTy = E->getType();
2953
2954 assert(RV.isAggregate() && "Not a valid HLSL Elementwise Cast.");
2955 // RHS is an aggregate
2956 Address SrcVal = RV.getAggregateAddress();
2957 return EmitHLSLElementwiseCast(CGF, SrcVal, SrcTy, DestTy, Loc);
2958 }
2959 } // end of switch
2960
2961 llvm_unreachable("unknown scalar cast");
2962}
2963
2964Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2965 CodeGenFunction::StmtExprEvaluation eval(CGF);
2966 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
2967 !E->getType()->isVoidType());
2968 if (!RetAlloca.isValid())
2969 return nullptr;
2970 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
2971 E->getExprLoc());
2972}
2973
2974Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2975 CodeGenFunction::RunCleanupsScope Scope(CGF);
2976 Value *V = Visit(E->getSubExpr());
2977 // Defend against dominance problems caused by jumps out of expression
2978 // evaluation through the shared cleanup block.
2979 Scope.ForceCleanup({&V});
2980 return V;
2981}
2982
2983//===----------------------------------------------------------------------===//
2984// Unary Operators
2985//===----------------------------------------------------------------------===//
2986
2988 llvm::Value *InVal, bool IsInc,
2989 FPOptions FPFeatures) {
2990 BinOpInfo BinOp;
2991 BinOp.LHS = InVal;
2992 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
2993 BinOp.Ty = E->getType();
2994 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2995 BinOp.FPFeatures = FPFeatures;
2996 BinOp.E = E;
2997 return BinOp;
2998}
2999
3000llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
3001 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
3002 llvm::Value *Amount =
3003 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
3004 StringRef Name = IsInc ? "inc" : "dec";
3005 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3006 case LangOptions::SOB_Defined:
3007 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3008 return Builder.CreateAdd(InVal, Amount, Name);
3009 [[fallthrough]];
3010 case LangOptions::SOB_Undefined:
3011 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3012 return Builder.CreateNSWAdd(InVal, Amount, Name);
3013 [[fallthrough]];
3014 case LangOptions::SOB_Trapping:
3015 BinOpInfo Info = createBinOpInfoFromIncDec(
3016 E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts()));
3017 if (!E->canOverflow() || CanElideOverflowCheck(CGF.getContext(), Info))
3018 return Builder.CreateNSWAdd(InVal, Amount, Name);
3019 return EmitOverflowCheckedBinOp(Info);
3020 }
3021 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
3022}
3023
3024/// For the purposes of overflow pattern exclusion, does this match the
3025/// "while(i--)" pattern?
3026static bool matchesPostDecrInWhile(const UnaryOperator *UO, bool isInc,
3027 bool isPre, ASTContext &Ctx) {
3028 if (isInc || isPre)
3029 return false;
3030
3031 // -fsanitize-undefined-ignore-overflow-pattern=unsigned-post-decr-while
3034 return false;
3035
3036 // all Parents (usually just one) must be a WhileStmt
3037 for (const auto &Parent : Ctx.getParentMapContext().getParents(*UO))
3038 if (!Parent.get<WhileStmt>())
3039 return false;
3040
3041 return true;
3042}
3043
3044namespace {
3045/// Handles check and update for lastprivate conditional variables.
3046class OMPLastprivateConditionalUpdateRAII {
3047private:
3048 CodeGenFunction &CGF;
3049 const UnaryOperator *E;
3050
3051public:
3052 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
3053 const UnaryOperator *E)
3054 : CGF(CGF), E(E) {}
3055 ~OMPLastprivateConditionalUpdateRAII() {
3056 if (CGF.getLangOpts().OpenMP)
3058 CGF, E->getSubExpr());
3059 }
3060};
3061} // namespace
3062
3063llvm::Value *
3064ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
3065 bool isInc, bool isPre) {
3066 ApplyAtomGroup Grp(CGF.getDebugInfo());
3067 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
3068 QualType type = E->getSubExpr()->getType();
3069 llvm::PHINode *atomicPHI = nullptr;
3070 llvm::Value *value;
3071 llvm::Value *input;
3072 llvm::Value *Previous = nullptr;
3073 QualType SrcType = E->getType();
3074
3075 int amount = (isInc ? 1 : -1);
3076 bool isSubtraction = !isInc;
3077
3078 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
3079 type = atomicTy->getValueType();
3080 if (isInc && type->isBooleanType()) {
3081 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
3082 if (isPre) {
3083 Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified())
3084 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
3085 return Builder.getTrue();
3086 }
3087 // For atomic bool increment, we just store true and return it for
3088 // preincrement, do an atomic swap with true for postincrement
3089 return Builder.CreateAtomicRMW(
3090 llvm::AtomicRMWInst::Xchg, LV.getAddress(), True,
3091 llvm::AtomicOrdering::SequentiallyConsistent);
3092 }
3093 // Special case for atomic increment / decrement on integers, emit
3094 // atomicrmw instructions. We skip this if we want to be doing overflow
3095 // checking, and fall into the slow path with the atomic cmpxchg loop.
3096 if (!type->isBooleanType() && type->isIntegerType() &&
3097 !(type->isUnsignedIntegerType() &&
3098 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3099 CGF.getLangOpts().getSignedOverflowBehavior() !=
3100 LangOptions::SOB_Trapping) {
3101 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
3102 llvm::AtomicRMWInst::Sub;
3103 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
3104 llvm::Instruction::Sub;
3105 llvm::Value *amt = CGF.EmitToMemory(
3106 llvm::ConstantInt::get(ConvertType(type), 1, true), type);
3107 llvm::Value *old =
3108 Builder.CreateAtomicRMW(aop, LV.getAddress(), amt,
3109 llvm::AtomicOrdering::SequentiallyConsistent);
3110 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
3111 }
3112 // Special case for atomic increment/decrement on floats.
3113 // Bail out non-power-of-2-sized floating point types (e.g., x86_fp80).
3114 if (type->isFloatingType()) {
3115 llvm::Type *Ty = ConvertType(type);
3116 if (llvm::has_single_bit(Ty->getScalarSizeInBits())) {
3117 llvm::AtomicRMWInst::BinOp aop =
3118 isInc ? llvm::AtomicRMWInst::FAdd : llvm::AtomicRMWInst::FSub;
3119 llvm::Instruction::BinaryOps op =
3120 isInc ? llvm::Instruction::FAdd : llvm::Instruction::FSub;
3121 llvm::Value *amt = llvm::ConstantFP::get(Ty, 1.0);
3122 llvm::AtomicRMWInst *old =
3123 CGF.emitAtomicRMWInst(aop, LV.getAddress(), amt,
3124 llvm::AtomicOrdering::SequentiallyConsistent);
3125
3126 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
3127 }
3128 }
3129 value = EmitLoadOfLValue(LV, E->getExprLoc());
3130 input = value;
3131 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
3132 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3133 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
3134 value = CGF.EmitToMemory(value, type);
3135 Builder.CreateBr(opBB);
3136 Builder.SetInsertPoint(opBB);
3137 atomicPHI = Builder.CreatePHI(value->getType(), 2);
3138 atomicPHI->addIncoming(value, startBB);
3139 value = atomicPHI;
3140 } else {
3141 value = EmitLoadOfLValue(LV, E->getExprLoc());
3142 input = value;
3143 }
3144
3145 // Special case of integer increment that we have to check first: bool++.
3146 // Due to promotion rules, we get:
3147 // bool++ -> bool = bool + 1
3148 // -> bool = (int)bool + 1
3149 // -> bool = ((int)bool + 1 != 0)
3150 // An interesting aspect of this is that increment is always true.
3151 // Decrement does not have this property.
3152 if (isInc && type->isBooleanType()) {
3153 value = Builder.getTrue();
3154
3155 // Most common case by far: integer increment.
3156 } else if (type->isIntegerType()) {
3157 QualType promotedType;
3158 bool canPerformLossyDemotionCheck = false;
3159
3160 bool excludeOverflowPattern =
3161 matchesPostDecrInWhile(E, isInc, isPre, CGF.getContext());
3162
3164 promotedType = CGF.getContext().getPromotedIntegerType(type);
3165 assert(promotedType != type && "Shouldn't promote to the same type.");
3166 canPerformLossyDemotionCheck = true;
3167 canPerformLossyDemotionCheck &=
3169 CGF.getContext().getCanonicalType(promotedType);
3170 canPerformLossyDemotionCheck &=
3172 type, promotedType);
3173 assert((!canPerformLossyDemotionCheck ||
3174 type->isSignedIntegerOrEnumerationType() ||
3175 promotedType->isSignedIntegerOrEnumerationType() ||
3176 ConvertType(type)->getScalarSizeInBits() ==
3177 ConvertType(promotedType)->getScalarSizeInBits()) &&
3178 "The following check expects that if we do promotion to different "
3179 "underlying canonical type, at least one of the types (either "
3180 "base or promoted) will be signed, or the bitwidths will match.");
3181 }
3182 if (CGF.SanOpts.hasOneOf(
3183 SanitizerKind::ImplicitIntegerArithmeticValueChange |
3184 SanitizerKind::ImplicitBitfieldConversion) &&
3185 canPerformLossyDemotionCheck) {
3186 // While `x += 1` (for `x` with width less than int) is modeled as
3187 // promotion+arithmetics+demotion, and we can catch lossy demotion with
3188 // ease; inc/dec with width less than int can't overflow because of
3189 // promotion rules, so we omit promotion+demotion, which means that we can
3190 // not catch lossy "demotion". Because we still want to catch these cases
3191 // when the sanitizer is enabled, we perform the promotion, then perform
3192 // the increment/decrement in the wider type, and finally
3193 // perform the demotion. This will catch lossy demotions.
3194
3195 // We have a special case for bitfields defined using all the bits of the
3196 // type. In this case we need to do the same trick as for the integer
3197 // sanitizer checks, i.e., promotion -> increment/decrement -> demotion.
3198
3199 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());
3200 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
3201 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3202 // Do pass non-default ScalarConversionOpts so that sanitizer check is
3203 // emitted if LV is not a bitfield, otherwise the bitfield sanitizer
3204 // checks will take care of the conversion.
3205 ScalarConversionOpts Opts;
3206 if (!LV.isBitField())
3207 Opts = ScalarConversionOpts(CGF.SanOpts);
3208 else if (CGF.SanOpts.has(SanitizerKind::ImplicitBitfieldConversion)) {
3209 Previous = value;
3210 SrcType = promotedType;
3211 }
3212
3213 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),
3214 Opts);
3215
3216 // Note that signed integer inc/dec with width less than int can't
3217 // overflow because of promotion rules; we're just eliding a few steps
3218 // here.
3219 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
3220 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
3221 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
3222 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3223 !excludeOverflowPattern &&
3225 SanitizerKind::UnsignedIntegerOverflow, E->getType())) {
3226 value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
3227 E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
3228 } else {
3229 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
3230 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3231 }
3232
3233 // Next most common: pointer increment.
3234 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
3235 QualType type = ptr->getPointeeType();
3236
3237 // VLA types don't have constant size.
3238 if (const VariableArrayType *vla
3240 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
3241 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
3242 llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());
3243 if (CGF.getLangOpts().PointerOverflowDefined)
3244 value = Builder.CreateGEP(elemTy, value, numElts, "vla.inc");
3245 else
3246 value = CGF.EmitCheckedInBoundsGEP(
3247 elemTy, value, numElts, /*SignedIndices=*/false, isSubtraction,
3248 E->getExprLoc(), "vla.inc");
3249
3250 // Arithmetic on function pointers (!) is just +-1.
3251 } else if (type->isFunctionType()) {
3252 llvm::Value *amt = Builder.getInt32(amount);
3253
3254 if (CGF.getLangOpts().PointerOverflowDefined)
3255 value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr");
3256 else
3257 value =
3258 CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt,
3259 /*SignedIndices=*/false, isSubtraction,
3260 E->getExprLoc(), "incdec.funcptr");
3261
3262 // For everything else, we can just do a simple increment.
3263 } else {
3264 llvm::Value *amt = Builder.getInt32(amount);
3265 llvm::Type *elemTy = CGF.ConvertTypeForMem(type);
3266 if (CGF.getLangOpts().PointerOverflowDefined)
3267 value = Builder.CreateGEP(elemTy, value, amt, "incdec.ptr");
3268 else
3269 value = CGF.EmitCheckedInBoundsGEP(
3270 elemTy, value, amt, /*SignedIndices=*/false, isSubtraction,
3271 E->getExprLoc(), "incdec.ptr");
3272 }
3273
3274 // Vector increment/decrement.
3275 } else if (type->isVectorType()) {
3276 if (type->hasIntegerRepresentation()) {
3277 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
3278
3279 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3280 } else {
3281 value = Builder.CreateFAdd(
3282 value,
3283 llvm::ConstantFP::get(value->getType(), amount),
3284 isInc ? "inc" : "dec");
3285 }
3286
3287 // Floating point.
3288 } else if (type->isRealFloatingType()) {
3289 // Add the inc/dec to the real part.
3290 llvm::Value *amt;
3291 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
3292
3293 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3294 // Another special case: half FP increment should be done via float
3296 value = Builder.CreateCall(
3297 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
3298 CGF.CGM.FloatTy),
3299 input, "incdec.conv");
3300 } else {
3301 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
3302 }
3303 }
3304
3305 if (value->getType()->isFloatTy())
3306 amt = llvm::ConstantFP::get(VMContext,
3307 llvm::APFloat(static_cast<float>(amount)));
3308 else if (value->getType()->isDoubleTy())
3309 amt = llvm::ConstantFP::get(VMContext,
3310 llvm::APFloat(static_cast<double>(amount)));
3311 else {
3312 // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128.
3313 // Convert from float.
3314 llvm::APFloat F(static_cast<float>(amount));
3315 bool ignored;
3316 const llvm::fltSemantics *FS;
3317 // Don't use getFloatTypeSemantics because Half isn't
3318 // necessarily represented using the "half" LLVM type.
3319 if (value->getType()->isFP128Ty())
3320 FS = &CGF.getTarget().getFloat128Format();
3321 else if (value->getType()->isHalfTy())
3322 FS = &CGF.getTarget().getHalfFormat();
3323 else if (value->getType()->isBFloatTy())
3324 FS = &CGF.getTarget().getBFloat16Format();
3325 else if (value->getType()->isPPC_FP128Ty())
3326 FS = &CGF.getTarget().getIbm128Format();
3327 else
3328 FS = &CGF.getTarget().getLongDoubleFormat();
3329 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
3330 amt = llvm::ConstantFP::get(VMContext, F);
3331 }
3332 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
3333
3334 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3336 value = Builder.CreateCall(
3337 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
3338 CGF.CGM.FloatTy),
3339 value, "incdec.conv");
3340 } else {
3341 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
3342 }
3343 }
3344
3345 // Fixed-point types.
3346 } else if (type->isFixedPointType()) {
3347 // Fixed-point types are tricky. In some cases, it isn't possible to
3348 // represent a 1 or a -1 in the type at all. Piggyback off of
3349 // EmitFixedPointBinOp to avoid having to reimplement saturation.
3350 BinOpInfo Info;
3351 Info.E = E;
3352 Info.Ty = E->getType();
3353 Info.Opcode = isInc ? BO_Add : BO_Sub;
3354 Info.LHS = value;
3355 Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
3356 // If the type is signed, it's better to represent this as +(-1) or -(-1),
3357 // since -1 is guaranteed to be representable.
3358 if (type->isSignedFixedPointType()) {
3359 Info.Opcode = isInc ? BO_Sub : BO_Add;
3360 Info.RHS = Builder.CreateNeg(Info.RHS);
3361 }
3362 // Now, convert from our invented integer literal to the type of the unary
3363 // op. This will upscale and saturate if necessary. This value can become
3364 // undef in some cases.
3365 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3366 auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty);
3367 Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema);
3368 value = EmitFixedPointBinOp(Info);
3369
3370 // Objective-C pointer types.
3371 } else {
3372 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
3373
3374 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
3375 if (!isInc) size = -size;
3376 llvm::Value *sizeValue =
3377 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
3378
3379 if (CGF.getLangOpts().PointerOverflowDefined)
3380 value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr");
3381 else
3382 value = CGF.EmitCheckedInBoundsGEP(
3383 CGF.Int8Ty, value, sizeValue, /*SignedIndices=*/false, isSubtraction,
3384 E->getExprLoc(), "incdec.objptr");
3385 value = Builder.CreateBitCast(value, input->getType());
3386 }
3387
3388 if (atomicPHI) {
3389 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3390 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3391 auto Pair = CGF.EmitAtomicCompareExchange(
3392 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
3393 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
3394 llvm::Value *success = Pair.second;
3395 atomicPHI->addIncoming(old, curBlock);
3396 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3397 Builder.SetInsertPoint(contBB);
3398 return isPre ? value : input;
3399 }
3400
3401 // Store the updated result through the lvalue.
3402 if (LV.isBitField()) {
3403 Value *Src = Previous ? Previous : value;
3404 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
3405 CGF.EmitBitfieldConversionCheck(Src, SrcType, value, E->getType(),
3406 LV.getBitFieldInfo(), E->getExprLoc());
3407 } else
3408 CGF.EmitStoreThroughLValue(RValue::get(value), LV);
3409
3410 // If this is a postinc, return the value read from memory, otherwise use the
3411 // updated value.
3412 return isPre ? value : input;
3413}
3414
3415
3416Value *ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator *E,
3417 QualType PromotionType) {
3418 QualType promotionTy = PromotionType.isNull()
3419 ? getPromotionType(E->getSubExpr()->getType())
3420 : PromotionType;
3421 Value *result = VisitPlus(E, promotionTy);
3422 if (result && !promotionTy.isNull())
3423 result = EmitUnPromotedValue(result, E->getType());
3424 return result;
3425}
3426
3427Value *ScalarExprEmitter::VisitPlus(const UnaryOperator *E,
3428 QualType PromotionType) {
3429 // This differs from gcc, though, most likely due to a bug in gcc.
3430 TestAndClearIgnoreResultAssign();
3431 if (!PromotionType.isNull())
3432 return CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
3433 return Visit(E->getSubExpr());
3434}
3435
3436Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E,
3437 QualType PromotionType) {
3438 QualType promotionTy = PromotionType.isNull()
3439 ? getPromotionType(E->getSubExpr()->getType())
3440 : PromotionType;
3441 Value *result = VisitMinus(E, promotionTy);
3442 if (result && !promotionTy.isNull())
3443 result = EmitUnPromotedValue(result, E->getType());
3444 return result;
3445}
3446
3447Value *ScalarExprEmitter::VisitMinus(const UnaryOperator *E,
3448 QualType PromotionType) {
3449 TestAndClearIgnoreResultAssign();
3450 Value *Op;
3451 if (!PromotionType.isNull())
3452 Op = CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
3453 else
3454 Op = Visit(E->getSubExpr());
3455
3456 // Generate a unary FNeg for FP ops.
3457 if (Op->getType()->isFPOrFPVectorTy())
3458 return Builder.CreateFNeg(Op, "fneg");
3459
3460 // Emit unary minus with EmitSub so we handle overflow cases etc.
3461 BinOpInfo BinOp;
3462 BinOp.RHS = Op;
3463 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
3464 BinOp.Ty = E->getType();
3465 BinOp.Opcode = BO_Sub;
3466 BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3467 BinOp.E = E;
3468 return EmitSub(BinOp);
3469}
3470
3471Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
3472 TestAndClearIgnoreResultAssign();
3473 Value *Op = Visit(E->getSubExpr());
3474 return Builder.CreateNot(Op, "not");
3475}
3476
3477Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
3478 // Perform vector logical not on comparison with zero vector.
3479 if (E->getType()->isVectorType() &&
3480 E->getType()->castAs<VectorType>()->getVectorKind() ==
3481 VectorKind::Generic) {
3482 Value *Oper = Visit(E->getSubExpr());
3483 Value *Zero = llvm::Constant::getNullValue(Oper->getType());
3484 Value *Result;
3485 if (Oper->getType()->isFPOrFPVectorTy()) {
3486 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
3487 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
3488 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
3489 } else
3490 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
3491 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
3492 }
3493
3494 // Compare operand to zero.
3495 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
3496
3497 // Invert value.
3498 // TODO: Could dynamically modify easy computations here. For example, if
3499 // the operand is an icmp ne, turn into icmp eq.
3500 BoolVal = Builder.CreateNot(BoolVal, "lnot");
3501
3502 // ZExt result to the expr type.
3503 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
3504}
3505
3506Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
3507 // Try folding the offsetof to a constant.
3508 Expr::EvalResult EVResult;
3509 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
3510 llvm::APSInt Value = EVResult.Val.getInt();
3511 return Builder.getInt(Value);
3512 }
3513
3514 // Loop over the components of the offsetof to compute the value.
3515 unsigned n = E->getNumComponents();
3516 llvm::Type* ResultType = ConvertType(E->getType());
3517 llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
3518 QualType CurrentType = E->getTypeSourceInfo()->getType();
3519 for (unsigned i = 0; i != n; ++i) {
3520 OffsetOfNode ON = E->getComponent(i);
3521 llvm::Value *Offset = nullptr;
3522 switch (ON.getKind()) {
3523 case OffsetOfNode::Array: {
3524 // Compute the index
3525 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
3526 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
3527 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
3528 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
3529
3530 // Save the element type
3531 CurrentType =
3532 CGF.getContext().getAsArrayType(CurrentType)->getElementType();
3533
3534 // Compute the element size
3535 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
3536 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
3537
3538 // Multiply out to compute the result
3539 Offset = Builder.CreateMul(Idx, ElemSize);
3540 break;
3541 }
3542
3543 case OffsetOfNode::Field: {
3544 FieldDecl *MemberDecl = ON.getField();
3545 auto *RD = CurrentType->castAsRecordDecl();
3546 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
3547
3548 // Compute the index of the field in its parent.
3549 unsigned i = 0;
3550 // FIXME: It would be nice if we didn't have to loop here!
3551 for (RecordDecl::field_iterator Field = RD->field_begin(),
3552 FieldEnd = RD->field_end();
3553 Field != FieldEnd; ++Field, ++i) {
3554 if (*Field == MemberDecl)
3555 break;
3556 }
3557 assert(i < RL.getFieldCount() && "offsetof field in wrong type");
3558
3559 // Compute the offset to the field
3560 int64_t OffsetInt = RL.getFieldOffset(i) /
3561 CGF.getContext().getCharWidth();
3562 Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
3563
3564 // Save the element type.
3565 CurrentType = MemberDecl->getType();
3566 break;
3567 }
3568
3570 llvm_unreachable("dependent __builtin_offsetof");
3571
3572 case OffsetOfNode::Base: {
3573 if (ON.getBase()->isVirtual()) {
3574 CGF.ErrorUnsupported(E, "virtual base in offsetof");
3575 continue;
3576 }
3577
3578 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(
3579 CurrentType->castAsCanonical<RecordType>()->getOriginalDecl());
3580
3581 // Save the element type.
3582 CurrentType = ON.getBase()->getType();
3583
3584 // Compute the offset to the base.
3585 auto *BaseRD = CurrentType->castAsCXXRecordDecl();
3586 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
3587 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
3588 break;
3589 }
3590 }
3591 Result = Builder.CreateAdd(Result, Offset);
3592 }
3593 return Result;
3594}
3595
3596/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
3597/// argument of the sizeof expression as an integer.
3598Value *
3599ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
3600 const UnaryExprOrTypeTraitExpr *E) {
3601 QualType TypeToSize = E->getTypeOfArgument();
3602 if (auto Kind = E->getKind();
3603 Kind == UETT_SizeOf || Kind == UETT_DataSizeOf || Kind == UETT_CountOf) {
3604 if (const VariableArrayType *VAT =
3605 CGF.getContext().getAsVariableArrayType(TypeToSize)) {
3606 // For _Countof, we only want to evaluate if the extent is actually
3607 // variable as opposed to a multi-dimensional array whose extent is
3608 // constant but whose element type is variable.
3609 bool EvaluateExtent = true;
3610 if (Kind == UETT_CountOf && VAT->getElementType()->isArrayType()) {
3611 EvaluateExtent =
3612 !VAT->getSizeExpr()->isIntegerConstantExpr(CGF.getContext());
3613 }
3614 if (EvaluateExtent) {
3615 if (E->isArgumentType()) {
3616 // sizeof(type) - make sure to emit the VLA size.
3617 CGF.EmitVariablyModifiedType(TypeToSize);
3618 } else {
3619 // C99 6.5.3.4p2: If the argument is an expression of type
3620 // VLA, it is evaluated.
3622 }
3623
3624 // For _Countof, we just want to return the size of a single dimension.
3625 if (Kind == UETT_CountOf)
3626 return CGF.getVLAElements1D(VAT).NumElts;
3627
3628 // For sizeof and __datasizeof, we need to scale the number of elements
3629 // by the size of the array element type.
3630 auto VlaSize = CGF.getVLASize(VAT);
3631
3632 // Scale the number of non-VLA elements by the non-VLA element size.
3633 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
3634 if (!eltSize.isOne())
3635 return CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize),
3636 VlaSize.NumElts);
3637 return VlaSize.NumElts;
3638 }
3639 }
3640 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
3641 auto Alignment =
3642 CGF.getContext()
3645 .getQuantity();
3646 return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
3647 } else if (E->getKind() == UETT_VectorElements) {
3648 auto *VecTy = cast<llvm::VectorType>(ConvertType(E->getTypeOfArgument()));
3649 return Builder.CreateElementCount(CGF.SizeTy, VecTy->getElementCount());
3650 }
3651
3652 // If this isn't sizeof(vla), the result must be constant; use the constant
3653 // folding logic so we don't have to duplicate it here.
3654 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
3655}
3656
3657Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E,
3658 QualType PromotionType) {
3659 QualType promotionTy = PromotionType.isNull()
3660 ? getPromotionType(E->getSubExpr()->getType())
3661 : PromotionType;
3662 Value *result = VisitReal(E, promotionTy);
3663 if (result && !promotionTy.isNull())
3664 result = EmitUnPromotedValue(result, E->getType());
3665 return result;
3666}
3667
3668Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E,
3669 QualType PromotionType) {
3670 Expr *Op = E->getSubExpr();
3671 if (Op->getType()->isAnyComplexType()) {
3672 // If it's an l-value, load through the appropriate subobject l-value.
3673 // Note that we have to ask E because Op might be an l-value that
3674 // this won't work for, e.g. an Obj-C property.
3675 if (E->isGLValue()) {
3676 if (!PromotionType.isNull()) {
3678 Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true);
3679 PromotionType = PromotionType->isAnyComplexType()
3680 ? PromotionType
3681 : CGF.getContext().getComplexType(PromotionType);
3682 return result.first ? CGF.EmitPromotedValue(result, PromotionType).first
3683 : result.first;
3684 }
3685
3686 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3687 .getScalarVal();
3688 }
3689 // Otherwise, calculate and project.
3690 return CGF.EmitComplexExpr(Op, false, true).first;
3691 }
3692
3693 if (!PromotionType.isNull())
3694 return CGF.EmitPromotedScalarExpr(Op, PromotionType);
3695 return Visit(Op);
3696}
3697
3698Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E,
3699 QualType PromotionType) {
3700 QualType promotionTy = PromotionType.isNull()
3701 ? getPromotionType(E->getSubExpr()->getType())
3702 : PromotionType;
3703 Value *result = VisitImag(E, promotionTy);
3704 if (result && !promotionTy.isNull())
3705 result = EmitUnPromotedValue(result, E->getType());
3706 return result;
3707}
3708
3709Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E,
3710 QualType PromotionType) {
3711 Expr *Op = E->getSubExpr();
3712 if (Op->getType()->isAnyComplexType()) {
3713 // If it's an l-value, load through the appropriate subobject l-value.
3714 // Note that we have to ask E because Op might be an l-value that
3715 // this won't work for, e.g. an Obj-C property.
3716 if (Op->isGLValue()) {
3717 if (!PromotionType.isNull()) {
3719 Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign);
3720 PromotionType = PromotionType->isAnyComplexType()
3721 ? PromotionType
3722 : CGF.getContext().getComplexType(PromotionType);
3723 return result.second
3724 ? CGF.EmitPromotedValue(result, PromotionType).second
3725 : result.second;
3726 }
3727
3728 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3729 .getScalarVal();
3730 }
3731 // Otherwise, calculate and project.
3732 return CGF.EmitComplexExpr(Op, true, false).second;
3733 }
3734
3735 // __imag on a scalar returns zero. Emit the subexpr to ensure side
3736 // effects are evaluated, but not the actual value.
3737 if (Op->isGLValue())
3738 CGF.EmitLValue(Op);
3739 else if (!PromotionType.isNull())
3740 CGF.EmitPromotedScalarExpr(Op, PromotionType);
3741 else
3742 CGF.EmitScalarExpr(Op, true);
3743 if (!PromotionType.isNull())
3744 return llvm::Constant::getNullValue(ConvertType(PromotionType));
3745 return llvm::Constant::getNullValue(ConvertType(E->getType()));
3746}
3747
3748//===----------------------------------------------------------------------===//
3749// Binary Operators
3750//===----------------------------------------------------------------------===//
3751
3752Value *ScalarExprEmitter::EmitPromotedValue(Value *result,
3753 QualType PromotionType) {
3754 return CGF.Builder.CreateFPExt(result, ConvertType(PromotionType), "ext");
3755}
3756
3757Value *ScalarExprEmitter::EmitUnPromotedValue(Value *result,
3758 QualType ExprType) {
3759 return CGF.Builder.CreateFPTrunc(result, ConvertType(ExprType), "unpromotion");
3760}
3761
3762Value *ScalarExprEmitter::EmitPromoted(const Expr *E, QualType PromotionType) {
3763 E = E->IgnoreParens();
3764 if (auto BO = dyn_cast<BinaryOperator>(E)) {
3765 switch (BO->getOpcode()) {
3766#define HANDLE_BINOP(OP) \
3767 case BO_##OP: \
3768 return Emit##OP(EmitBinOps(BO, PromotionType));
3769 HANDLE_BINOP(Add)
3770 HANDLE_BINOP(Sub)
3771 HANDLE_BINOP(Mul)
3772 HANDLE_BINOP(Div)
3773#undef HANDLE_BINOP
3774 default:
3775 break;
3776 }
3777 } else if (auto UO = dyn_cast<UnaryOperator>(E)) {
3778 switch (UO->getOpcode()) {
3779 case UO_Imag:
3780 return VisitImag(UO, PromotionType);
3781 case UO_Real:
3782 return VisitReal(UO, PromotionType);
3783 case UO_Minus:
3784 return VisitMinus(UO, PromotionType);
3785 case UO_Plus:
3786 return VisitPlus(UO, PromotionType);
3787 default:
3788 break;
3789 }
3790 }
3791 auto result = Visit(const_cast<Expr *>(E));
3792 if (result) {
3793 if (!PromotionType.isNull())
3794 return EmitPromotedValue(result, PromotionType);
3795 else
3796 return EmitUnPromotedValue(result, E->getType());
3797 }
3798 return result;
3799}
3800
3801BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E,
3802 QualType PromotionType) {
3803 TestAndClearIgnoreResultAssign();
3804 BinOpInfo Result;
3805 Result.LHS = CGF.EmitPromotedScalarExpr(E->getLHS(), PromotionType);
3806 Result.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionType);
3807 if (!PromotionType.isNull())
3808 Result.Ty = PromotionType;
3809 else
3810 Result.Ty = E->getType();
3811 Result.Opcode = E->getOpcode();
3812 Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3813 Result.E = E;
3814 return Result;
3815}
3816
3817LValue ScalarExprEmitter::EmitCompoundAssignLValue(
3818 const CompoundAssignOperator *E,
3819 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
3820 Value *&Result) {
3821 QualType LHSTy = E->getLHS()->getType();
3822 BinOpInfo OpInfo;
3823
3826
3827 // Emit the RHS first. __block variables need to have the rhs evaluated
3828 // first, plus this should improve codegen a little.
3829
3830 QualType PromotionTypeCR;
3831 PromotionTypeCR = getPromotionType(E->getComputationResultType());
3832 if (PromotionTypeCR.isNull())
3833 PromotionTypeCR = E->getComputationResultType();
3834 QualType PromotionTypeLHS = getPromotionType(E->getComputationLHSType());
3835 QualType PromotionTypeRHS = getPromotionType(E->getRHS()->getType());
3836 if (!PromotionTypeRHS.isNull())
3837 OpInfo.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionTypeRHS);
3838 else
3839 OpInfo.RHS = Visit(E->getRHS());
3840 OpInfo.Ty = PromotionTypeCR;
3841 OpInfo.Opcode = E->getOpcode();
3842 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3843 OpInfo.E = E;
3844 // Load/convert the LHS.
3845 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3846
3847 llvm::PHINode *atomicPHI = nullptr;
3848 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
3849 QualType type = atomicTy->getValueType();
3850 if (!type->isBooleanType() && type->isIntegerType() &&
3851 !(type->isUnsignedIntegerType() &&
3852 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3853 CGF.getLangOpts().getSignedOverflowBehavior() !=
3854 LangOptions::SOB_Trapping) {
3855 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
3856 llvm::Instruction::BinaryOps Op;
3857 switch (OpInfo.Opcode) {
3858 // We don't have atomicrmw operands for *, %, /, <<, >>
3859 case BO_MulAssign: case BO_DivAssign:
3860 case BO_RemAssign:
3861 case BO_ShlAssign:
3862 case BO_ShrAssign:
3863 break;
3864 case BO_AddAssign:
3865 AtomicOp = llvm::AtomicRMWInst::Add;
3866 Op = llvm::Instruction::Add;
3867 break;
3868 case BO_SubAssign:
3869 AtomicOp = llvm::AtomicRMWInst::Sub;
3870 Op = llvm::Instruction::Sub;
3871 break;
3872 case BO_AndAssign:
3873 AtomicOp = llvm::AtomicRMWInst::And;
3874 Op = llvm::Instruction::And;
3875 break;
3876 case BO_XorAssign:
3877 AtomicOp = llvm::AtomicRMWInst::Xor;
3878 Op = llvm::Instruction::Xor;
3879 break;
3880 case BO_OrAssign:
3881 AtomicOp = llvm::AtomicRMWInst::Or;
3882 Op = llvm::Instruction::Or;
3883 break;
3884 default:
3885 llvm_unreachable("Invalid compound assignment type");
3886 }
3887 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
3888 llvm::Value *Amt = CGF.EmitToMemory(
3889 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
3890 E->getExprLoc()),
3891 LHSTy);
3892
3893 llvm::AtomicRMWInst *OldVal =
3894 CGF.emitAtomicRMWInst(AtomicOp, LHSLV.getAddress(), Amt);
3895
3896 // Since operation is atomic, the result type is guaranteed to be the
3897 // same as the input in LLVM terms.
3898 Result = Builder.CreateBinOp(Op, OldVal, Amt);
3899 return LHSLV;
3900 }
3901 }
3902 // FIXME: For floating point types, we should be saving and restoring the
3903 // floating point environment in the loop.
3904 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3905 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
3906 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3907 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
3908 Builder.CreateBr(opBB);
3909 Builder.SetInsertPoint(opBB);
3910 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
3911 atomicPHI->addIncoming(OpInfo.LHS, startBB);
3912 OpInfo.LHS = atomicPHI;
3913 }
3914 else
3915 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3916
3917 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
3918 SourceLocation Loc = E->getExprLoc();
3919 if (!PromotionTypeLHS.isNull())
3920 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, PromotionTypeLHS,
3921 E->getExprLoc());
3922 else
3923 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
3924 E->getComputationLHSType(), Loc);
3925
3926 // Expand the binary operator.
3927 Result = (this->*Func)(OpInfo);
3928
3929 // Convert the result back to the LHS type,
3930 // potentially with Implicit Conversion sanitizer check.
3931 // If LHSLV is a bitfield, use default ScalarConversionOpts
3932 // to avoid emit any implicit integer checks.
3933 Value *Previous = nullptr;
3934 if (LHSLV.isBitField()) {
3935 Previous = Result;
3936 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc);
3937 } else
3938 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc,
3939 ScalarConversionOpts(CGF.SanOpts));
3940
3941 if (atomicPHI) {
3942 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3943 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3944 auto Pair = CGF.EmitAtomicCompareExchange(
3945 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
3946 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
3947 llvm::Value *success = Pair.second;
3948 atomicPHI->addIncoming(old, curBlock);
3949 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3950 Builder.SetInsertPoint(contBB);
3951 return LHSLV;
3952 }
3953
3954 // Store the result value into the LHS lvalue. Bit-fields are handled
3955 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
3956 // 'An assignment expression has the value of the left operand after the
3957 // assignment...'.
3958 if (LHSLV.isBitField()) {
3959 Value *Src = Previous ? Previous : Result;
3960 QualType SrcType = E->getRHS()->getType();
3961 QualType DstType = E->getLHS()->getType();
3963 CGF.EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
3964 LHSLV.getBitFieldInfo(), E->getExprLoc());
3965 } else
3967
3968 if (CGF.getLangOpts().OpenMP)
3970 E->getLHS());
3971 return LHSLV;
3972}
3973
3974Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
3975 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
3976 bool Ignore = TestAndClearIgnoreResultAssign();
3977 Value *RHS = nullptr;
3978 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
3979
3980 // If the result is clearly ignored, return now.
3981 if (Ignore)
3982 return nullptr;
3983
3984 // The result of an assignment in C is the assigned r-value.
3985 if (!CGF.getLangOpts().CPlusPlus)
3986 return RHS;
3987
3988 // If the lvalue is non-volatile, return the computed value of the assignment.
3989 if (!LHS.isVolatileQualified())
3990 return RHS;
3991
3992 // Otherwise, reload the value.
3993 return EmitLoadOfLValue(LHS, E->getExprLoc());
3994}
3995
3996void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
3997 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
3998 SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>, 2>
3999 Checks;
4000
4001 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
4002 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
4003 SanitizerKind::SO_IntegerDivideByZero));
4004 }
4005
4006 const auto *BO = cast<BinaryOperator>(Ops.E);
4007 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
4008 Ops.Ty->hasSignedIntegerRepresentation() &&
4009 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
4010 Ops.mayHaveIntegerOverflow()) {
4011 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
4012
4013 llvm::Value *IntMin =
4014 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
4015 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
4016
4017 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
4018 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
4019 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
4020 Checks.push_back(
4021 std::make_pair(NotOverflow, SanitizerKind::SO_SignedIntegerOverflow));
4022 }
4023
4024 if (Checks.size() > 0)
4025 EmitBinOpCheck(Checks, Ops);
4026}
4027
4028Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
4029 {
4030 SanitizerDebugLocation SanScope(&CGF,
4031 {SanitizerKind::SO_IntegerDivideByZero,
4032 SanitizerKind::SO_SignedIntegerOverflow,
4033 SanitizerKind::SO_FloatDivideByZero},
4034 SanitizerHandler::DivremOverflow);
4035 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
4036 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
4037 Ops.Ty->isIntegerType() &&
4038 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4039 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4040 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
4041 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
4042 Ops.Ty->isRealFloatingType() &&
4043 Ops.mayHaveFloatDivisionByZero()) {
4044 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4045 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
4046 EmitBinOpCheck(
4047 std::make_pair(NonZero, SanitizerKind::SO_FloatDivideByZero), Ops);
4048 }
4049 }
4050
4051 if (Ops.Ty->isConstantMatrixType()) {
4052 llvm::MatrixBuilder MB(Builder);
4053 // We need to check the types of the operands of the operator to get the
4054 // correct matrix dimensions.
4055 auto *BO = cast<BinaryOperator>(Ops.E);
4056 (void)BO;
4057 assert(
4059 "first operand must be a matrix");
4060 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
4061 "second operand must be an arithmetic type");
4062 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4063 return MB.CreateScalarDiv(Ops.LHS, Ops.RHS,
4064 Ops.Ty->hasUnsignedIntegerRepresentation());
4065 }
4066
4067 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
4068 llvm::Value *Val;
4069 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4070 Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
4071 CGF.SetDivFPAccuracy(Val);
4072 return Val;
4073 }
4074 else if (Ops.isFixedPointOp())
4075 return EmitFixedPointBinOp(Ops);
4076 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
4077 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
4078 else
4079 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
4080}
4081
4082Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
4083 // Rem in C can't be a floating point type: C99 6.5.5p2.
4084 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
4085 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
4086 Ops.Ty->isIntegerType() &&
4087 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4088 SanitizerDebugLocation SanScope(&CGF,
4089 {SanitizerKind::SO_IntegerDivideByZero,
4090 SanitizerKind::SO_SignedIntegerOverflow},
4091 SanitizerHandler::DivremOverflow);
4092 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4093 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
4094 }
4095
4096 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4097 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
4098
4099 if (CGF.getLangOpts().HLSL && Ops.Ty->hasFloatingRepresentation())
4100 return Builder.CreateFRem(Ops.LHS, Ops.RHS, "rem");
4101
4102 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
4103}
4104
4105Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
4106 unsigned IID;
4107 unsigned OpID = 0;
4108 SanitizerHandler OverflowKind;
4109
4110 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
4111 switch (Ops.Opcode) {
4112 case BO_Add:
4113 case BO_AddAssign:
4114 OpID = 1;
4115 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
4116 llvm::Intrinsic::uadd_with_overflow;
4117 OverflowKind = SanitizerHandler::AddOverflow;
4118 break;
4119 case BO_Sub:
4120 case BO_SubAssign:
4121 OpID = 2;
4122 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
4123 llvm::Intrinsic::usub_with_overflow;
4124 OverflowKind = SanitizerHandler::SubOverflow;
4125 break;
4126 case BO_Mul:
4127 case BO_MulAssign:
4128 OpID = 3;
4129 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
4130 llvm::Intrinsic::umul_with_overflow;
4131 OverflowKind = SanitizerHandler::MulOverflow;
4132 break;
4133 default:
4134 llvm_unreachable("Unsupported operation for overflow detection");
4135 }
4136 OpID <<= 1;
4137 if (isSigned)
4138 OpID |= 1;
4139
4140 SanitizerDebugLocation SanScope(&CGF,
4141 {SanitizerKind::SO_SignedIntegerOverflow,
4142 SanitizerKind::SO_UnsignedIntegerOverflow},
4143 OverflowKind);
4144 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
4145
4146 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
4147
4148 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
4149 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
4150 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
4151
4152 // Handle overflow with llvm.trap if no custom handler has been specified.
4153 const std::string *handlerName =
4155 if (handlerName->empty()) {
4156 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
4157 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
4158 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
4159 llvm::Value *NotOverflow = Builder.CreateNot(overflow);
4161 isSigned ? SanitizerKind::SO_SignedIntegerOverflow
4162 : SanitizerKind::SO_UnsignedIntegerOverflow;
4163 EmitBinOpCheck(std::make_pair(NotOverflow, Ordinal), Ops);
4164 } else
4165 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
4166 return result;
4167 }
4168
4169 // Branch in case of overflow.
4170 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
4171 llvm::BasicBlock *continueBB =
4172 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
4173 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
4174
4175 Builder.CreateCondBr(overflow, overflowBB, continueBB);
4176
4177 // If an overflow handler is set, then we want to call it and then use its
4178 // result, if it returns.
4179 Builder.SetInsertPoint(overflowBB);
4180
4181 // Get the overflow handler.
4182 llvm::Type *Int8Ty = CGF.Int8Ty;
4183 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
4184 llvm::FunctionType *handlerTy =
4185 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
4186 llvm::FunctionCallee handler =
4187 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
4188
4189 // Sign extend the args to 64-bit, so that we can use the same handler for
4190 // all types of overflow.
4191 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
4192 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
4193
4194 // Call the handler with the two arguments, the operation, and the size of
4195 // the result.
4196 llvm::Value *handlerArgs[] = {
4197 lhs,
4198 rhs,
4199 Builder.getInt8(OpID),
4200 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
4201 };
4202 llvm::Value *handlerResult =
4203 CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
4204
4205 // Truncate the result back to the desired size.
4206 handlerResult = Builder.CreateTrunc(handlerResult, opTy);
4207 Builder.CreateBr(continueBB);
4208
4209 Builder.SetInsertPoint(continueBB);
4210 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
4211 phi->addIncoming(result, initialBB);
4212 phi->addIncoming(handlerResult, overflowBB);
4213
4214 return phi;
4215}
4216
4217/// BO_Add/BO_Sub are handled by EmitPointerWithAlignment to preserve alignment
4218/// information.
4219/// This function is used for BO_AddAssign/BO_SubAssign.
4220static Value *emitPointerArithmetic(CodeGenFunction &CGF, const BinOpInfo &op,
4221 bool isSubtraction) {
4222 // Must have binary (not unary) expr here. Unary pointer
4223 // increment/decrement doesn't use this path.
4225
4226 Value *pointer = op.LHS;
4227 Expr *pointerOperand = expr->getLHS();
4228 Value *index = op.RHS;
4229 Expr *indexOperand = expr->getRHS();
4230
4231 // In a subtraction, the LHS is always the pointer.
4232 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
4233 std::swap(pointer, index);
4234 std::swap(pointerOperand, indexOperand);
4235 }
4236
4237 return CGF.EmitPointerArithmetic(expr, pointerOperand, pointer, indexOperand,
4238 index, isSubtraction);
4239}
4240
4241/// Emit pointer + index arithmetic.
4243 const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer,
4244 Expr *indexOperand, llvm::Value *index, bool isSubtraction) {
4245 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
4246
4247 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
4248 auto &DL = CGM.getDataLayout();
4249 auto *PtrTy = cast<llvm::PointerType>(pointer->getType());
4250
4251 // Some versions of glibc and gcc use idioms (particularly in their malloc
4252 // routines) that add a pointer-sized integer (known to be a pointer value)
4253 // to a null pointer in order to cast the value back to an integer or as
4254 // part of a pointer alignment algorithm. This is undefined behavior, but
4255 // we'd like to be able to compile programs that use it.
4256 //
4257 // Normally, we'd generate a GEP with a null-pointer base here in response
4258 // to that code, but it's also UB to dereference a pointer created that
4259 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
4260 // generate a direct cast of the integer value to a pointer.
4261 //
4262 // The idiom (p = nullptr + N) is not met if any of the following are true:
4263 //
4264 // The operation is subtraction.
4265 // The index is not pointer-sized.
4266 // The pointer type is not byte-sized.
4267 //
4268 // Note that we do not suppress the pointer overflow check in this case.
4270 getContext(), BO->getOpcode(), pointerOperand, indexOperand)) {
4271 llvm::Value *Ptr = Builder.CreateIntToPtr(index, pointer->getType());
4272 if (getLangOpts().PointerOverflowDefined ||
4273 !SanOpts.has(SanitizerKind::PointerOverflow) ||
4274 NullPointerIsDefined(Builder.GetInsertBlock()->getParent(),
4275 PtrTy->getPointerAddressSpace()))
4276 return Ptr;
4277 // The inbounds GEP of null is valid iff the index is zero.
4278 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
4279 auto CheckHandler = SanitizerHandler::PointerOverflow;
4280 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
4281 llvm::Value *IsZeroIndex = Builder.CreateIsNull(index);
4282 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(BO->getExprLoc())};
4283 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
4284 llvm::Value *IntPtr = llvm::Constant::getNullValue(IntPtrTy);
4285 llvm::Value *ComputedGEP = Builder.CreateZExtOrTrunc(index, IntPtrTy);
4286 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
4287 EmitCheck({{IsZeroIndex, CheckOrdinal}}, CheckHandler, StaticArgs,
4288 DynamicArgs);
4289 return Ptr;
4290 }
4291
4292 if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
4293 // Zero-extend or sign-extend the pointer value according to
4294 // whether the index is signed or not.
4295 index = Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
4296 "idx.ext");
4297 }
4298
4299 // If this is subtraction, negate the index.
4300 if (isSubtraction)
4301 index = Builder.CreateNeg(index, "idx.neg");
4302
4303 if (SanOpts.has(SanitizerKind::ArrayBounds))
4304 EmitBoundsCheck(BO, pointerOperand, index, indexOperand->getType(),
4305 /*Accessed*/ false);
4306
4307 const PointerType *pointerType =
4308 pointerOperand->getType()->getAs<PointerType>();
4309 if (!pointerType) {
4310 QualType objectType = pointerOperand->getType()
4312 ->getPointeeType();
4313 llvm::Value *objectSize =
4314 CGM.getSize(getContext().getTypeSizeInChars(objectType));
4315
4316 index = Builder.CreateMul(index, objectSize);
4317
4318 llvm::Value *result = Builder.CreateGEP(Int8Ty, pointer, index, "add.ptr");
4319 return Builder.CreateBitCast(result, pointer->getType());
4320 }
4321
4322 QualType elementType = pointerType->getPointeeType();
4323 if (const VariableArrayType *vla =
4324 getContext().getAsVariableArrayType(elementType)) {
4325 // The element count here is the total number of non-VLA elements.
4326 llvm::Value *numElements = getVLASize(vla).NumElts;
4327
4328 // Effectively, the multiply by the VLA size is part of the GEP.
4329 // GEP indexes are signed, and scaling an index isn't permitted to
4330 // signed-overflow, so we use the same semantics for our explicit
4331 // multiply. We suppress this if overflow is not undefined behavior.
4332 llvm::Type *elemTy = ConvertTypeForMem(vla->getElementType());
4333 if (getLangOpts().PointerOverflowDefined) {
4334 index = Builder.CreateMul(index, numElements, "vla.index");
4335 pointer = Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
4336 } else {
4337 index = Builder.CreateNSWMul(index, numElements, "vla.index");
4338 pointer =
4339 EmitCheckedInBoundsGEP(elemTy, pointer, index, isSigned,
4340 isSubtraction, BO->getExprLoc(), "add.ptr");
4341 }
4342 return pointer;
4343 }
4344
4345 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
4346 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
4347 // future proof.
4348 llvm::Type *elemTy;
4349 if (elementType->isVoidType() || elementType->isFunctionType())
4350 elemTy = Int8Ty;
4351 else
4352 elemTy = ConvertTypeForMem(elementType);
4353
4354 if (getLangOpts().PointerOverflowDefined)
4355 return Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
4356
4357 return EmitCheckedInBoundsGEP(elemTy, pointer, index, isSigned, isSubtraction,
4358 BO->getExprLoc(), "add.ptr");
4359}
4360
4361// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
4362// Addend. Use negMul and negAdd to negate the first operand of the Mul or
4363// the add operand respectively. This allows fmuladd to represent a*b-c, or
4364// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
4365// efficient operations.
4366static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
4367 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4368 bool negMul, bool negAdd) {
4369 Value *MulOp0 = MulOp->getOperand(0);
4370 Value *MulOp1 = MulOp->getOperand(1);
4371 if (negMul)
4372 MulOp0 = Builder.CreateFNeg(MulOp0, "neg");
4373 if (negAdd)
4374 Addend = Builder.CreateFNeg(Addend, "neg");
4375
4376 Value *FMulAdd = nullptr;
4377 if (Builder.getIsFPConstrained()) {
4378 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
4379 "Only constrained operation should be created when Builder is in FP "
4380 "constrained mode");
4381 FMulAdd = Builder.CreateConstrainedFPCall(
4382 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,
4383 Addend->getType()),
4384 {MulOp0, MulOp1, Addend});
4385 } else {
4386 FMulAdd = Builder.CreateCall(
4387 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
4388 {MulOp0, MulOp1, Addend});
4389 }
4390 MulOp->eraseFromParent();
4391
4392 return FMulAdd;
4393}
4394
4395// Check whether it would be legal to emit an fmuladd intrinsic call to
4396// represent op and if so, build the fmuladd.
4397//
4398// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
4399// Does NOT check the type of the operation - it's assumed that this function
4400// will be called from contexts where it's known that the type is contractable.
4401static Value* tryEmitFMulAdd(const BinOpInfo &op,
4402 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4403 bool isSub=false) {
4404
4405 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
4406 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
4407 "Only fadd/fsub can be the root of an fmuladd.");
4408
4409 // Check whether this op is marked as fusable.
4410 if (!op.FPFeatures.allowFPContractWithinStatement())
4411 return nullptr;
4412
4413 Value *LHS = op.LHS;
4414 Value *RHS = op.RHS;
4415
4416 // Peek through fneg to look for fmul. Make sure fneg has no users, and that
4417 // it is the only use of its operand.
4418 bool NegLHS = false;
4419 if (auto *LHSUnOp = dyn_cast<llvm::UnaryOperator>(LHS)) {
4420 if (LHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4421 LHSUnOp->use_empty() && LHSUnOp->getOperand(0)->hasOneUse()) {
4422 LHS = LHSUnOp->getOperand(0);
4423 NegLHS = true;
4424 }
4425 }
4426
4427 bool NegRHS = false;
4428 if (auto *RHSUnOp = dyn_cast<llvm::UnaryOperator>(RHS)) {
4429 if (RHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4430 RHSUnOp->use_empty() && RHSUnOp->getOperand(0)->hasOneUse()) {
4431 RHS = RHSUnOp->getOperand(0);
4432 NegRHS = true;
4433 }
4434 }
4435
4436 // We have a potentially fusable op. Look for a mul on one of the operands.
4437 // Also, make sure that the mul result isn't used directly. In that case,
4438 // there's no point creating a muladd operation.
4439 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(LHS)) {
4440 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4441 (LHSBinOp->use_empty() || NegLHS)) {
4442 // If we looked through fneg, erase it.
4443 if (NegLHS)
4444 cast<llvm::Instruction>(op.LHS)->eraseFromParent();
4445 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
4446 }
4447 }
4448 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(RHS)) {
4449 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4450 (RHSBinOp->use_empty() || NegRHS)) {
4451 // If we looked through fneg, erase it.
4452 if (NegRHS)
4453 cast<llvm::Instruction>(op.RHS)->eraseFromParent();
4454 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
4455 }
4456 }
4457
4458 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(LHS)) {
4459 if (LHSBinOp->getIntrinsicID() ==
4460 llvm::Intrinsic::experimental_constrained_fmul &&
4461 (LHSBinOp->use_empty() || NegLHS)) {
4462 // If we looked through fneg, erase it.
4463 if (NegLHS)
4464 cast<llvm::Instruction>(op.LHS)->eraseFromParent();
4465 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
4466 }
4467 }
4468 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(RHS)) {
4469 if (RHSBinOp->getIntrinsicID() ==
4470 llvm::Intrinsic::experimental_constrained_fmul &&
4471 (RHSBinOp->use_empty() || NegRHS)) {
4472 // If we looked through fneg, erase it.
4473 if (NegRHS)
4474 cast<llvm::Instruction>(op.RHS)->eraseFromParent();
4475 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
4476 }
4477 }
4478
4479 return nullptr;
4480}
4481
4482Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
4483 if (op.LHS->getType()->isPointerTy() ||
4484 op.RHS->getType()->isPointerTy())
4486
4487 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4488 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4489 case LangOptions::SOB_Defined:
4490 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4491 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4492 [[fallthrough]];
4493 case LangOptions::SOB_Undefined:
4494 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4495 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
4496 [[fallthrough]];
4497 case LangOptions::SOB_Trapping:
4498 if (CanElideOverflowCheck(CGF.getContext(), op))
4499 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
4500 return EmitOverflowCheckedBinOp(op);
4501 }
4502 }
4503
4504 // For vector and matrix adds, try to fold into a fmuladd.
4505 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4506 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4507 // Try to form an fmuladd.
4508 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
4509 return FMulAdd;
4510 }
4511
4512 if (op.Ty->isConstantMatrixType()) {
4513 llvm::MatrixBuilder MB(Builder);
4514 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4515 return MB.CreateAdd(op.LHS, op.RHS);
4516 }
4517
4518 if (op.Ty->isUnsignedIntegerType() &&
4519 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
4520 !CanElideOverflowCheck(CGF.getContext(), op))
4521 return EmitOverflowCheckedBinOp(op);
4522
4523 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4524 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4525 return Builder.CreateFAdd(op.LHS, op.RHS, "add");
4526 }
4527
4528 if (op.isFixedPointOp())
4529 return EmitFixedPointBinOp(op);
4530
4531 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4532}
4533
4534/// The resulting value must be calculated with exact precision, so the operands
4535/// may not be the same type.
4536Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
4537 using llvm::APSInt;
4538 using llvm::ConstantInt;
4539
4540 // This is either a binary operation where at least one of the operands is
4541 // a fixed-point type, or a unary operation where the operand is a fixed-point
4542 // type. The result type of a binary operation is determined by
4543 // Sema::handleFixedPointConversions().
4544 QualType ResultTy = op.Ty;
4545 QualType LHSTy, RHSTy;
4546 if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) {
4547 RHSTy = BinOp->getRHS()->getType();
4548 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) {
4549 // For compound assignment, the effective type of the LHS at this point
4550 // is the computation LHS type, not the actual LHS type, and the final
4551 // result type is not the type of the expression but rather the
4552 // computation result type.
4553 LHSTy = CAO->getComputationLHSType();
4554 ResultTy = CAO->getComputationResultType();
4555 } else
4556 LHSTy = BinOp->getLHS()->getType();
4557 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) {
4558 LHSTy = UnOp->getSubExpr()->getType();
4559 RHSTy = UnOp->getSubExpr()->getType();
4560 }
4561 ASTContext &Ctx = CGF.getContext();
4562 Value *LHS = op.LHS;
4563 Value *RHS = op.RHS;
4564
4565 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);
4566 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);
4567 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
4568 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
4569
4570 // Perform the actual operation.
4571 Value *Result;
4572 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
4573 switch (op.Opcode) {
4574 case BO_AddAssign:
4575 case BO_Add:
4576 Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema);
4577 break;
4578 case BO_SubAssign:
4579 case BO_Sub:
4580 Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema);
4581 break;
4582 case BO_MulAssign:
4583 case BO_Mul:
4584 Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema);
4585 break;
4586 case BO_DivAssign:
4587 case BO_Div:
4588 Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema);
4589 break;
4590 case BO_ShlAssign:
4591 case BO_Shl:
4592 Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS);
4593 break;
4594 case BO_ShrAssign:
4595 case BO_Shr:
4596 Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS);
4597 break;
4598 case BO_LT:
4599 return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema);
4600 case BO_GT:
4601 return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema);
4602 case BO_LE:
4603 return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4604 case BO_GE:
4605 return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4606 case BO_EQ:
4607 // For equality operations, we assume any padding bits on unsigned types are
4608 // zero'd out. They could be overwritten through non-saturating operations
4609 // that cause overflow, but this leads to undefined behavior.
4610 return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema);
4611 case BO_NE:
4612 return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4613 case BO_Cmp:
4614 case BO_LAnd:
4615 case BO_LOr:
4616 llvm_unreachable("Found unimplemented fixed point binary operation");
4617 case BO_PtrMemD:
4618 case BO_PtrMemI:
4619 case BO_Rem:
4620 case BO_Xor:
4621 case BO_And:
4622 case BO_Or:
4623 case BO_Assign:
4624 case BO_RemAssign:
4625 case BO_AndAssign:
4626 case BO_XorAssign:
4627 case BO_OrAssign:
4628 case BO_Comma:
4629 llvm_unreachable("Found unsupported binary operation for fixed point types.");
4630 }
4631
4632 bool IsShift = BinaryOperator::isShiftOp(op.Opcode) ||
4634 // Convert to the result type.
4635 return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema
4636 : CommonFixedSema,
4637 ResultFixedSema);
4638}
4639
4640Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
4641 // The LHS is always a pointer if either side is.
4642 if (!op.LHS->getType()->isPointerTy()) {
4643 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4644 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4645 case LangOptions::SOB_Defined:
4646 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4647 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4648 [[fallthrough]];
4649 case LangOptions::SOB_Undefined:
4650 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4651 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
4652 [[fallthrough]];
4653 case LangOptions::SOB_Trapping:
4654 if (CanElideOverflowCheck(CGF.getContext(), op))
4655 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
4656 return EmitOverflowCheckedBinOp(op);
4657 }
4658 }
4659
4660 // For vector and matrix subs, try to fold into a fmuladd.
4661 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4662 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4663 // Try to form an fmuladd.
4664 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
4665 return FMulAdd;
4666 }
4667
4668 if (op.Ty->isConstantMatrixType()) {
4669 llvm::MatrixBuilder MB(Builder);
4670 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4671 return MB.CreateSub(op.LHS, op.RHS);
4672 }
4673
4674 if (op.Ty->isUnsignedIntegerType() &&
4675 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
4676 !CanElideOverflowCheck(CGF.getContext(), op))
4677 return EmitOverflowCheckedBinOp(op);
4678
4679 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4680 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4681 return Builder.CreateFSub(op.LHS, op.RHS, "sub");
4682 }
4683
4684 if (op.isFixedPointOp())
4685 return EmitFixedPointBinOp(op);
4686
4687 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4688 }
4689
4690 // If the RHS is not a pointer, then we have normal pointer
4691 // arithmetic.
4692 if (!op.RHS->getType()->isPointerTy())
4694
4695 // Otherwise, this is a pointer subtraction.
4696
4697 // Do the raw subtraction part.
4698 llvm::Value *LHS
4699 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
4700 llvm::Value *RHS
4701 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
4702 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
4703
4704 // Okay, figure out the element size.
4705 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
4706 QualType elementType = expr->getLHS()->getType()->getPointeeType();
4707
4708 llvm::Value *divisor = nullptr;
4709
4710 // For a variable-length array, this is going to be non-constant.
4711 if (const VariableArrayType *vla
4712 = CGF.getContext().getAsVariableArrayType(elementType)) {
4713 auto VlaSize = CGF.getVLASize(vla);
4714 elementType = VlaSize.Type;
4715 divisor = VlaSize.NumElts;
4716
4717 // Scale the number of non-VLA elements by the non-VLA element size.
4718 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
4719 if (!eltSize.isOne())
4720 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
4721
4722 // For everything elese, we can just compute it, safe in the
4723 // assumption that Sema won't let anything through that we can't
4724 // safely compute the size of.
4725 } else {
4726 CharUnits elementSize;
4727 // Handle GCC extension for pointer arithmetic on void* and
4728 // function pointer types.
4729 if (elementType->isVoidType() || elementType->isFunctionType())
4730 elementSize = CharUnits::One();
4731 else
4732 elementSize = CGF.getContext().getTypeSizeInChars(elementType);
4733
4734 // Don't even emit the divide for element size of 1.
4735 if (elementSize.isOne())
4736 return diffInChars;
4737
4738 divisor = CGF.CGM.getSize(elementSize);
4739 }
4740
4741 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
4742 // pointer difference in C is only defined in the case where both operands
4743 // are pointing to elements of an array.
4744 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
4745}
4746
4747Value *ScalarExprEmitter::GetMaximumShiftAmount(Value *LHS, Value *RHS,
4748 bool RHSIsSigned) {
4749 llvm::IntegerType *Ty;
4750 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
4751 Ty = cast<llvm::IntegerType>(VT->getElementType());
4752 else
4753 Ty = cast<llvm::IntegerType>(LHS->getType());
4754 // For a given type of LHS the maximum shift amount is width(LHS)-1, however
4755 // it can occur that width(LHS)-1 > range(RHS). Since there is no check for
4756 // this in ConstantInt::get, this results in the value getting truncated.
4757 // Constrain the return value to be max(RHS) in this case.
4758 llvm::Type *RHSTy = RHS->getType();
4759 llvm::APInt RHSMax =
4760 RHSIsSigned ? llvm::APInt::getSignedMaxValue(RHSTy->getScalarSizeInBits())
4761 : llvm::APInt::getMaxValue(RHSTy->getScalarSizeInBits());
4762 if (RHSMax.ult(Ty->getBitWidth()))
4763 return llvm::ConstantInt::get(RHSTy, RHSMax);
4764 return llvm::ConstantInt::get(RHSTy, Ty->getBitWidth() - 1);
4765}
4766
4767Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
4768 const Twine &Name) {
4769 llvm::IntegerType *Ty;
4770 if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
4771 Ty = cast<llvm::IntegerType>(VT->getElementType());
4772 else
4773 Ty = cast<llvm::IntegerType>(LHS->getType());
4774
4775 if (llvm::isPowerOf2_64(Ty->getBitWidth()))
4776 return Builder.CreateAnd(RHS, GetMaximumShiftAmount(LHS, RHS, false), Name);
4777
4778 return Builder.CreateURem(
4779 RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name);
4780}
4781
4782Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
4783 // TODO: This misses out on the sanitizer check below.
4784 if (Ops.isFixedPointOp())
4785 return EmitFixedPointBinOp(Ops);
4786
4787 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4788 // RHS to the same size as the LHS.
4789 Value *RHS = Ops.RHS;
4790 if (Ops.LHS->getType() != RHS->getType())
4791 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
4792
4793 bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
4794 Ops.Ty->hasSignedIntegerRepresentation() &&
4796 !CGF.getLangOpts().CPlusPlus20;
4797 bool SanitizeUnsignedBase =
4798 CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) &&
4799 Ops.Ty->hasUnsignedIntegerRepresentation();
4800 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
4801 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
4802 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4803 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4804 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask");
4805 else if ((SanitizeBase || SanitizeExponent) &&
4806 isa<llvm::IntegerType>(Ops.LHS->getType())) {
4807 SmallVector<SanitizerKind::SanitizerOrdinal, 3> Ordinals;
4808 if (SanitizeSignedBase)
4809 Ordinals.push_back(SanitizerKind::SO_ShiftBase);
4810 if (SanitizeUnsignedBase)
4811 Ordinals.push_back(SanitizerKind::SO_UnsignedShiftBase);
4812 if (SanitizeExponent)
4813 Ordinals.push_back(SanitizerKind::SO_ShiftExponent);
4814
4815 SanitizerDebugLocation SanScope(&CGF, Ordinals,
4816 SanitizerHandler::ShiftOutOfBounds);
4817 SmallVector<std::pair<Value *, SanitizerKind::SanitizerOrdinal>, 2> Checks;
4818 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4819 llvm::Value *WidthMinusOne =
4820 GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned);
4821 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
4822
4823 if (SanitizeExponent) {
4824 Checks.push_back(
4825 std::make_pair(ValidExponent, SanitizerKind::SO_ShiftExponent));
4826 }
4827
4828 if (SanitizeBase) {
4829 // Check whether we are shifting any non-zero bits off the top of the
4830 // integer. We only emit this check if exponent is valid - otherwise
4831 // instructions below will have undefined behavior themselves.
4832 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
4833 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
4834 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
4835 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
4836 llvm::Value *PromotedWidthMinusOne =
4837 (RHS == Ops.RHS) ? WidthMinusOne
4838 : GetMaximumShiftAmount(Ops.LHS, RHS, RHSIsSigned);
4839 CGF.EmitBlock(CheckShiftBase);
4840 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
4841 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
4842 /*NUW*/ true, /*NSW*/ true),
4843 "shl.check");
4844 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
4845 // In C99, we are not permitted to shift a 1 bit into the sign bit.
4846 // Under C++11's rules, shifting a 1 bit into the sign bit is
4847 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
4848 // define signed left shifts, so we use the C99 and C++11 rules there).
4849 // Unsigned shifts can always shift into the top bit.
4850 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
4851 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
4852 }
4853 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
4854 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
4855 CGF.EmitBlock(Cont);
4856 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
4857 BaseCheck->addIncoming(Builder.getTrue(), Orig);
4858 BaseCheck->addIncoming(ValidBase, CheckShiftBase);
4859 Checks.push_back(std::make_pair(
4860 BaseCheck, SanitizeSignedBase ? SanitizerKind::SO_ShiftBase
4861 : SanitizerKind::SO_UnsignedShiftBase));
4862 }
4863
4864 assert(!Checks.empty());
4865 EmitBinOpCheck(Checks, Ops);
4866 }
4867
4868 return Builder.CreateShl(Ops.LHS, RHS, "shl");
4869}
4870
4871Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
4872 // TODO: This misses out on the sanitizer check below.
4873 if (Ops.isFixedPointOp())
4874 return EmitFixedPointBinOp(Ops);
4875
4876 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4877 // RHS to the same size as the LHS.
4878 Value *RHS = Ops.RHS;
4879 if (Ops.LHS->getType() != RHS->getType())
4880 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
4881
4882 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4883 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4884 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask");
4885 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
4886 isa<llvm::IntegerType>(Ops.LHS->getType())) {
4887 SanitizerDebugLocation SanScope(&CGF, {SanitizerKind::SO_ShiftExponent},
4888 SanitizerHandler::ShiftOutOfBounds);
4889 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4890 llvm::Value *Valid = Builder.CreateICmpULE(
4891 Ops.RHS, GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned));
4892 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::SO_ShiftExponent), Ops);
4893 }
4894
4895 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4896 return Builder.CreateLShr(Ops.LHS, RHS, "shr");
4897 return Builder.CreateAShr(Ops.LHS, RHS, "shr");
4898}
4899
4901// return corresponding comparison intrinsic for given vector type
4902static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
4903 BuiltinType::Kind ElemKind) {
4904 switch (ElemKind) {
4905 default: llvm_unreachable("unexpected element type");
4906 case BuiltinType::Char_U:
4907 case BuiltinType::UChar:
4908 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4909 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
4910 case BuiltinType::Char_S:
4911 case BuiltinType::SChar:
4912 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4913 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
4914 case BuiltinType::UShort:
4915 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4916 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
4917 case BuiltinType::Short:
4918 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4919 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
4920 case BuiltinType::UInt:
4921 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4922 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
4923 case BuiltinType::Int:
4924 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4925 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
4926 case BuiltinType::ULong:
4927 case BuiltinType::ULongLong:
4928 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4929 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
4930 case BuiltinType::Long:
4931 case BuiltinType::LongLong:
4932 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4933 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
4934 case BuiltinType::Float:
4935 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
4936 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
4937 case BuiltinType::Double:
4938 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
4939 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
4940 case BuiltinType::UInt128:
4941 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4942 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
4943 case BuiltinType::Int128:
4944 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4945 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
4946 }
4947}
4948
4949Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
4950 llvm::CmpInst::Predicate UICmpOpc,
4951 llvm::CmpInst::Predicate SICmpOpc,
4952 llvm::CmpInst::Predicate FCmpOpc,
4953 bool IsSignaling) {
4954 TestAndClearIgnoreResultAssign();
4955 Value *Result;
4956 QualType LHSTy = E->getLHS()->getType();
4957 QualType RHSTy = E->getRHS()->getType();
4958 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
4959 assert(E->getOpcode() == BO_EQ ||
4960 E->getOpcode() == BO_NE);
4961 Value *LHS = CGF.EmitScalarExpr(E->getLHS());
4962 Value *RHS = CGF.EmitScalarExpr(E->getRHS());
4964 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
4965 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
4966 BinOpInfo BOInfo = EmitBinOps(E);
4967 Value *LHS = BOInfo.LHS;
4968 Value *RHS = BOInfo.RHS;
4969
4970 // If AltiVec, the comparison results in a numeric type, so we use
4971 // intrinsics comparing vectors and giving 0 or 1 as a result
4972 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
4973 // constants for mapping CR6 register bits to predicate result
4974 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
4975
4976 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
4977
4978 // in several cases vector arguments order will be reversed
4979 Value *FirstVecArg = LHS,
4980 *SecondVecArg = RHS;
4981
4982 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
4983 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
4984
4985 switch(E->getOpcode()) {
4986 default: llvm_unreachable("is not a comparison operation");
4987 case BO_EQ:
4988 CR6 = CR6_LT;
4989 ID = GetIntrinsic(VCMPEQ, ElementKind);
4990 break;
4991 case BO_NE:
4992 CR6 = CR6_EQ;
4993 ID = GetIntrinsic(VCMPEQ, ElementKind);
4994 break;
4995 case BO_LT:
4996 CR6 = CR6_LT;
4997 ID = GetIntrinsic(VCMPGT, ElementKind);
4998 std::swap(FirstVecArg, SecondVecArg);
4999 break;
5000 case BO_GT:
5001 CR6 = CR6_LT;
5002 ID = GetIntrinsic(VCMPGT, ElementKind);
5003 break;
5004 case BO_LE:
5005 if (ElementKind == BuiltinType::Float) {
5006 CR6 = CR6_LT;
5007 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
5008 std::swap(FirstVecArg, SecondVecArg);
5009 }
5010 else {
5011 CR6 = CR6_EQ;
5012 ID = GetIntrinsic(VCMPGT, ElementKind);
5013 }
5014 break;
5015 case BO_GE:
5016 if (ElementKind == BuiltinType::Float) {
5017 CR6 = CR6_LT;
5018 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
5019 }
5020 else {
5021 CR6 = CR6_EQ;
5022 ID = GetIntrinsic(VCMPGT, ElementKind);
5023 std::swap(FirstVecArg, SecondVecArg);
5024 }
5025 break;
5026 }
5027
5028 Value *CR6Param = Builder.getInt32(CR6);
5029 llvm::Function *F = CGF.CGM.getIntrinsic(ID);
5030 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
5031
5032 // The result type of intrinsic may not be same as E->getType().
5033 // If E->getType() is not BoolTy, EmitScalarConversion will do the
5034 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
5035 // do nothing, if ResultTy is not i1 at the same time, it will cause
5036 // crash later.
5037 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
5038 if (ResultTy->getBitWidth() > 1 &&
5039 E->getType() == CGF.getContext().BoolTy)
5040 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
5041 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
5042 E->getExprLoc());
5043 }
5044
5045 if (BOInfo.isFixedPointOp()) {
5046 Result = EmitFixedPointBinOp(BOInfo);
5047 } else if (LHS->getType()->isFPOrFPVectorTy()) {
5048 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
5049 if (!IsSignaling)
5050 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
5051 else
5052 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp");
5053 } else if (LHSTy->hasSignedIntegerRepresentation()) {
5054 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
5055 } else {
5056 // Unsigned integers and pointers.
5057
5058 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
5061
5062 // Dynamic information is required to be stripped for comparisons,
5063 // because it could leak the dynamic information. Based on comparisons
5064 // of pointers to dynamic objects, the optimizer can replace one pointer
5065 // with another, which might be incorrect in presence of invariant
5066 // groups. Comparison with null is safe because null does not carry any
5067 // dynamic information.
5068 if (LHSTy.mayBeDynamicClass())
5069 LHS = Builder.CreateStripInvariantGroup(LHS);
5070 if (RHSTy.mayBeDynamicClass())
5071 RHS = Builder.CreateStripInvariantGroup(RHS);
5072 }
5073
5074 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
5075 }
5076
5077 // If this is a vector comparison, sign extend the result to the appropriate
5078 // vector integer type and return it (don't convert to bool).
5079 if (LHSTy->isVectorType())
5080 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
5081
5082 } else {
5083 // Complex Comparison: can only be an equality comparison.
5085 QualType CETy;
5086 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
5087 LHS = CGF.EmitComplexExpr(E->getLHS());
5088 CETy = CTy->getElementType();
5089 } else {
5090 LHS.first = Visit(E->getLHS());
5091 LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
5092 CETy = LHSTy;
5093 }
5094 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
5095 RHS = CGF.EmitComplexExpr(E->getRHS());
5096 assert(CGF.getContext().hasSameUnqualifiedType(CETy,
5097 CTy->getElementType()) &&
5098 "The element types must always match.");
5099 (void)CTy;
5100 } else {
5101 RHS.first = Visit(E->getRHS());
5102 RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
5103 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
5104 "The element types must always match.");
5105 }
5106
5107 Value *ResultR, *ResultI;
5108 if (CETy->isRealFloatingType()) {
5109 // As complex comparisons can only be equality comparisons, they
5110 // are never signaling comparisons.
5111 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
5112 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
5113 } else {
5114 // Complex comparisons can only be equality comparisons. As such, signed
5115 // and unsigned opcodes are the same.
5116 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
5117 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
5118 }
5119
5120 if (E->getOpcode() == BO_EQ) {
5121 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
5122 } else {
5123 assert(E->getOpcode() == BO_NE &&
5124 "Complex comparison other than == or != ?");
5125 Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
5126 }
5127 }
5128
5129 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
5130 E->getExprLoc());
5131}
5132
5134 const BinaryOperator *E, Value **Previous, QualType *SrcType) {
5135 // In case we have the integer or bitfield sanitizer checks enabled
5136 // we want to get the expression before scalar conversion.
5137 if (auto *ICE = dyn_cast<ImplicitCastExpr>(E->getRHS())) {
5138 CastKind Kind = ICE->getCastKind();
5139 if (Kind == CK_IntegralCast || Kind == CK_LValueToRValue) {
5140 *SrcType = ICE->getSubExpr()->getType();
5141 *Previous = EmitScalarExpr(ICE->getSubExpr());
5142 // Pass default ScalarConversionOpts to avoid emitting
5143 // integer sanitizer checks as E refers to bitfield.
5144 return EmitScalarConversion(*Previous, *SrcType, ICE->getType(),
5145 ICE->getExprLoc());
5146 }
5147 }
5148 return EmitScalarExpr(E->getRHS());
5149}
5150
5151Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
5152 ApplyAtomGroup Grp(CGF.getDebugInfo());
5153 bool Ignore = TestAndClearIgnoreResultAssign();
5154
5155 Value *RHS;
5156 LValue LHS;
5157
5158 if (PointerAuthQualifier PtrAuth = E->getLHS()->getType().getPointerAuth()) {
5161 llvm::Value *RV =
5162 CGF.EmitPointerAuthQualify(PtrAuth, E->getRHS(), LV.getAddress());
5163 CGF.EmitNullabilityCheck(LV, RV, E->getExprLoc());
5165
5166 if (Ignore)
5167 return nullptr;
5168 RV = CGF.EmitPointerAuthUnqualify(PtrAuth, RV, LV.getType(),
5169 LV.getAddress(), /*nonnull*/ false);
5170 return RV;
5171 }
5172
5173 switch (E->getLHS()->getType().getObjCLifetime()) {
5175 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
5176 break;
5177
5179 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
5180 break;
5181
5183 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
5184 break;
5185
5187 RHS = Visit(E->getRHS());
5188 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
5189 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
5190 break;
5191
5193 // __block variables need to have the rhs evaluated first, plus
5194 // this should improve codegen just a little.
5195 Value *Previous = nullptr;
5196 QualType SrcType = E->getRHS()->getType();
5197 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
5198 // we want to extract that value and potentially (if the bitfield sanitizer
5199 // is enabled) use it to check for an implicit conversion.
5200 if (E->getLHS()->refersToBitField())
5201 RHS = CGF.EmitWithOriginalRHSBitfieldAssignment(E, &Previous, &SrcType);
5202 else
5203 RHS = Visit(E->getRHS());
5204
5205 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
5206
5207 // Store the value into the LHS. Bit-fields are handled specially
5208 // because the result is altered by the store, i.e., [C99 6.5.16p1]
5209 // 'An assignment expression has the value of the left operand after
5210 // the assignment...'.
5211 if (LHS.isBitField()) {
5212 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
5213 // If the expression contained an implicit conversion, make sure
5214 // to use the value before the scalar conversion.
5215 Value *Src = Previous ? Previous : RHS;
5216 QualType DstType = E->getLHS()->getType();
5217 CGF.EmitBitfieldConversionCheck(Src, SrcType, RHS, DstType,
5218 LHS.getBitFieldInfo(), E->getExprLoc());
5219 } else {
5220 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
5221 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
5222 }
5223 }
5224 // OpenMP: Handle lastprivate(condition:) in scalar assignment
5225 if (CGF.getLangOpts().OpenMP) {
5227 E->getLHS());
5228 }
5229
5230 // If the result is clearly ignored, return now.
5231 if (Ignore)
5232 return nullptr;
5233
5234 // The result of an assignment in C is the assigned r-value.
5235 if (!CGF.getLangOpts().CPlusPlus)
5236 return RHS;
5237
5238 // If the lvalue is non-volatile, return the computed value of the assignment.
5239 if (!LHS.isVolatileQualified())
5240 return RHS;
5241
5242 // Otherwise, reload the value.
5243 return EmitLoadOfLValue(LHS, E->getExprLoc());
5244}
5245
5246Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
5247 // Perform vector logical and on comparisons with zero vectors.
5248 if (E->getType()->isVectorType()) {
5250
5251 Value *LHS = Visit(E->getLHS());
5252 Value *RHS = Visit(E->getRHS());
5253 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
5254 if (LHS->getType()->isFPOrFPVectorTy()) {
5255 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5256 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
5257 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
5258 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
5259 } else {
5260 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
5261 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
5262 }
5263 Value *And = Builder.CreateAnd(LHS, RHS);
5264 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
5265 }
5266
5267 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5268 llvm::Type *ResTy = ConvertType(E->getType());
5269
5270 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
5271 // If we have 1 && X, just emit X without inserting the control flow.
5272 bool LHSCondVal;
5273 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
5274 if (LHSCondVal) { // If we have 1 && X, just emit X.
5276
5277 // If the top of the logical operator nest, reset the MCDC temp to 0.
5278 if (CGF.MCDCLogOpStack.empty())
5280
5281 CGF.MCDCLogOpStack.push_back(E);
5282
5283 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5284
5285 // If we're generating for profiling or coverage, generate a branch to a
5286 // block that increments the RHS counter needed to track branch condition
5287 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5288 // "FalseBlock" after the increment is done.
5289 if (InstrumentRegions &&
5291 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5292 llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end");
5293 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
5294 Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock);
5295 CGF.EmitBlock(RHSBlockCnt);
5297 CGF.EmitBranch(FBlock);
5298 CGF.EmitBlock(FBlock);
5299 } else
5300 CGF.markStmtMaybeUsed(E->getRHS());
5301
5302 CGF.MCDCLogOpStack.pop_back();
5303 // If the top of the logical operator nest, update the MCDC bitmap.
5304 if (CGF.MCDCLogOpStack.empty())
5306
5307 // ZExt result to int or bool.
5308 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
5309 }
5310
5311 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
5312 if (!CGF.ContainsLabel(E->getRHS())) {
5313 CGF.markStmtMaybeUsed(E->getRHS());
5314 return llvm::Constant::getNullValue(ResTy);
5315 }
5316 }
5317
5318 // If the top of the logical operator nest, reset the MCDC temp to 0.
5319 if (CGF.MCDCLogOpStack.empty())
5321
5322 CGF.MCDCLogOpStack.push_back(E);
5323
5324 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
5325 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
5326
5327 CodeGenFunction::ConditionalEvaluation eval(CGF);
5328
5329 // Branch on the LHS first. If it is false, go to the failure (cont) block.
5330 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
5331 CGF.getProfileCount(E->getRHS()));
5332
5333 // Any edges into the ContBlock are now from an (indeterminate number of)
5334 // edges from this first condition. All of these values will be false. Start
5335 // setting up the PHI node in the Cont Block for this.
5336 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
5337 "", ContBlock);
5338 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
5339 PI != PE; ++PI)
5340 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
5341
5342 eval.begin(CGF);
5343 CGF.EmitBlock(RHSBlock);
5345 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5346 eval.end(CGF);
5347
5348 // Reaquire the RHS block, as there may be subblocks inserted.
5349 RHSBlock = Builder.GetInsertBlock();
5350
5351 // If we're generating for profiling or coverage, generate a branch on the
5352 // RHS to a block that increments the RHS true counter needed to track branch
5353 // condition coverage.
5354 if (InstrumentRegions &&
5356 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5357 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
5358 Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock);
5359 CGF.EmitBlock(RHSBlockCnt);
5361 CGF.EmitBranch(ContBlock);
5362 PN->addIncoming(RHSCond, RHSBlockCnt);
5363 }
5364
5365 // Emit an unconditional branch from this block to ContBlock.
5366 {
5367 // There is no need to emit line number for unconditional branch.
5368 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
5369 CGF.EmitBlock(ContBlock);
5370 }
5371 // Insert an entry into the phi node for the edge with the value of RHSCond.
5372 PN->addIncoming(RHSCond, RHSBlock);
5373
5374 CGF.MCDCLogOpStack.pop_back();
5375 // If the top of the logical operator nest, update the MCDC bitmap.
5376 if (CGF.MCDCLogOpStack.empty())
5378
5379 // Artificial location to preserve the scope information
5380 {
5382 PN->setDebugLoc(Builder.getCurrentDebugLocation());
5383 }
5384
5385 // ZExt result to int.
5386 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
5387}
5388
5389Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
5390 // Perform vector logical or on comparisons with zero vectors.
5391 if (E->getType()->isVectorType()) {
5393
5394 Value *LHS = Visit(E->getLHS());
5395 Value *RHS = Visit(E->getRHS());
5396 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
5397 if (LHS->getType()->isFPOrFPVectorTy()) {
5398 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5399 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
5400 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
5401 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
5402 } else {
5403 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
5404 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
5405 }
5406 Value *Or = Builder.CreateOr(LHS, RHS);
5407 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
5408 }
5409
5410 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5411 llvm::Type *ResTy = ConvertType(E->getType());
5412
5413 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
5414 // If we have 0 || X, just emit X without inserting the control flow.
5415 bool LHSCondVal;
5416 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
5417 if (!LHSCondVal) { // If we have 0 || X, just emit X.
5419
5420 // If the top of the logical operator nest, reset the MCDC temp to 0.
5421 if (CGF.MCDCLogOpStack.empty())
5423
5424 CGF.MCDCLogOpStack.push_back(E);
5425
5426 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5427
5428 // If we're generating for profiling or coverage, generate a branch to a
5429 // block that increments the RHS counter need to track branch condition
5430 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5431 // "FalseBlock" after the increment is done.
5432 if (InstrumentRegions &&
5434 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5435 llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end");
5436 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
5437 Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt);
5438 CGF.EmitBlock(RHSBlockCnt);
5440 CGF.EmitBranch(FBlock);
5441 CGF.EmitBlock(FBlock);
5442 } else
5443 CGF.markStmtMaybeUsed(E->getRHS());
5444
5445 CGF.MCDCLogOpStack.pop_back();
5446 // If the top of the logical operator nest, update the MCDC bitmap.
5447 if (CGF.MCDCLogOpStack.empty())
5449
5450 // ZExt result to int or bool.
5451 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
5452 }
5453
5454 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
5455 if (!CGF.ContainsLabel(E->getRHS())) {
5456 CGF.markStmtMaybeUsed(E->getRHS());
5457 return llvm::ConstantInt::get(ResTy, 1);
5458 }
5459 }
5460
5461 // If the top of the logical operator nest, reset the MCDC temp to 0.
5462 if (CGF.MCDCLogOpStack.empty())
5464
5465 CGF.MCDCLogOpStack.push_back(E);
5466
5467 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
5468 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
5469
5470 CodeGenFunction::ConditionalEvaluation eval(CGF);
5471
5472 // Branch on the LHS first. If it is true, go to the success (cont) block.
5473 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
5475 CGF.getProfileCount(E->getRHS()));
5476
5477 // Any edges into the ContBlock are now from an (indeterminate number of)
5478 // edges from this first condition. All of these values will be true. Start
5479 // setting up the PHI node in the Cont Block for this.
5480 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
5481 "", ContBlock);
5482 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
5483 PI != PE; ++PI)
5484 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
5485
5486 eval.begin(CGF);
5487
5488 // Emit the RHS condition as a bool value.
5489 CGF.EmitBlock(RHSBlock);
5491 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5492
5493 eval.end(CGF);
5494
5495 // Reaquire the RHS block, as there may be subblocks inserted.
5496 RHSBlock = Builder.GetInsertBlock();
5497
5498 // If we're generating for profiling or coverage, generate a branch on the
5499 // RHS to a block that increments the RHS true counter needed to track branch
5500 // condition coverage.
5501 if (InstrumentRegions &&
5503 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5504 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
5505 Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt);
5506 CGF.EmitBlock(RHSBlockCnt);
5508 CGF.EmitBranch(ContBlock);
5509 PN->addIncoming(RHSCond, RHSBlockCnt);
5510 }
5511
5512 // Emit an unconditional branch from this block to ContBlock. Insert an entry
5513 // into the phi node for the edge with the value of RHSCond.
5514 CGF.EmitBlock(ContBlock);
5515 PN->addIncoming(RHSCond, RHSBlock);
5516
5517 CGF.MCDCLogOpStack.pop_back();
5518 // If the top of the logical operator nest, update the MCDC bitmap.
5519 if (CGF.MCDCLogOpStack.empty())
5521
5522 // ZExt result to int.
5523 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
5524}
5525
5526Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
5527 CGF.EmitIgnoredExpr(E->getLHS());
5528 CGF.EnsureInsertPoint();
5529 return Visit(E->getRHS());
5530}
5531
5532//===----------------------------------------------------------------------===//
5533// Other Operators
5534//===----------------------------------------------------------------------===//
5535
5536/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
5537/// expression is cheap enough and side-effect-free enough to evaluate
5538/// unconditionally instead of conditionally. This is used to convert control
5539/// flow into selects in some cases.
5541 CodeGenFunction &CGF) {
5542 // Anything that is an integer or floating point constant is fine.
5543 return E->IgnoreParens()->isEvaluatable(CGF.getContext());
5544
5545 // Even non-volatile automatic variables can't be evaluated unconditionally.
5546 // Referencing a thread_local may cause non-trivial initialization work to
5547 // occur. If we're inside a lambda and one of the variables is from the scope
5548 // outside the lambda, that function may have returned already. Reading its
5549 // locals is a bad idea. Also, these reads may introduce races there didn't
5550 // exist in the source-level program.
5551}
5552
5553
5554Value *ScalarExprEmitter::
5555VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
5556 TestAndClearIgnoreResultAssign();
5557
5558 // Bind the common expression if necessary.
5559 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
5560
5561 Expr *condExpr = E->getCond();
5562 Expr *lhsExpr = E->getTrueExpr();
5563 Expr *rhsExpr = E->getFalseExpr();
5564
5565 // If the condition constant folds and can be elided, try to avoid emitting
5566 // the condition and the dead arm.
5567 bool CondExprBool;
5568 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
5569 Expr *live = lhsExpr, *dead = rhsExpr;
5570 if (!CondExprBool) std::swap(live, dead);
5571
5572 // If the dead side doesn't have labels we need, just emit the Live part.
5573 if (!CGF.ContainsLabel(dead)) {
5574 if (CondExprBool) {
5576 CGF.incrementProfileCounter(lhsExpr);
5577 CGF.incrementProfileCounter(rhsExpr);
5578 }
5580 }
5581 Value *Result = Visit(live);
5582 CGF.markStmtMaybeUsed(dead);
5583
5584 // If the live part is a throw expression, it acts like it has a void
5585 // type, so evaluating it returns a null Value*. However, a conditional
5586 // with non-void type must return a non-null Value*.
5587 if (!Result && !E->getType()->isVoidType())
5588 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
5589
5590 return Result;
5591 }
5592 }
5593
5594 // OpenCL: If the condition is a vector, we can treat this condition like
5595 // the select function.
5596 if (CGF.getLangOpts().OpenCL && (condExpr->getType()->isVectorType() ||
5597 condExpr->getType()->isExtVectorType())) {
5599
5600 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
5601 llvm::Value *LHS = Visit(lhsExpr);
5602 llvm::Value *RHS = Visit(rhsExpr);
5603
5604 llvm::Type *condType = ConvertType(condExpr->getType());
5605 auto *vecTy = cast<llvm::FixedVectorType>(condType);
5606
5607 unsigned numElem = vecTy->getNumElements();
5608 llvm::Type *elemType = vecTy->getElementType();
5609
5610 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
5611 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
5612 llvm::Value *tmp = Builder.CreateSExt(
5613 TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext");
5614 llvm::Value *tmp2 = Builder.CreateNot(tmp);
5615
5616 // Cast float to int to perform ANDs if necessary.
5617 llvm::Value *RHSTmp = RHS;
5618 llvm::Value *LHSTmp = LHS;
5619 bool wasCast = false;
5620 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
5621 if (rhsVTy->getElementType()->isFloatingPointTy()) {
5622 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
5623 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
5624 wasCast = true;
5625 }
5626
5627 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
5628 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
5629 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
5630 if (wasCast)
5631 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
5632
5633 return tmp5;
5634 }
5635
5636 if (condExpr->getType()->isVectorType() ||
5637 condExpr->getType()->isSveVLSBuiltinType()) {
5639
5640 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
5641 llvm::Value *LHS = Visit(lhsExpr);
5642 llvm::Value *RHS = Visit(rhsExpr);
5643
5644 llvm::Type *CondType = ConvertType(condExpr->getType());
5645 auto *VecTy = cast<llvm::VectorType>(CondType);
5646
5647 if (VecTy->getElementType()->isIntegerTy(1))
5648 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
5649
5650 // OpenCL uses the MSB of the mask vector.
5651 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy);
5652 if (condExpr->getType()->isExtVectorType())
5653 CondV = Builder.CreateICmpSLT(CondV, ZeroVec, "vector_cond");
5654 else
5655 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond");
5656 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
5657 }
5658
5659 // If this is a really simple expression (like x ? 4 : 5), emit this as a
5660 // select instead of as control flow. We can only do this if it is cheap and
5661 // safe to evaluate the LHS and RHS unconditionally.
5662 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
5664 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
5665 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
5666
5668 CGF.incrementProfileCounter(lhsExpr);
5669 CGF.incrementProfileCounter(rhsExpr);
5671 } else
5672 CGF.incrementProfileCounter(E, StepV);
5673
5674 llvm::Value *LHS = Visit(lhsExpr);
5675 llvm::Value *RHS = Visit(rhsExpr);
5676 if (!LHS) {
5677 // If the conditional has void type, make sure we return a null Value*.
5678 assert(!RHS && "LHS and RHS types must match");
5679 return nullptr;
5680 }
5681 return Builder.CreateSelect(CondV, LHS, RHS, "cond");
5682 }
5683
5684 // If the top of the logical operator nest, reset the MCDC temp to 0.
5685 if (CGF.MCDCLogOpStack.empty())
5686 CGF.maybeResetMCDCCondBitmap(condExpr);
5687
5688 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
5689 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
5690 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
5691
5692 CodeGenFunction::ConditionalEvaluation eval(CGF);
5693 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
5694 CGF.getProfileCount(lhsExpr));
5695
5696 CGF.EmitBlock(LHSBlock);
5697
5698 // If the top of the logical operator nest, update the MCDC bitmap for the
5699 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5700 // may also contain a boolean expression.
5701 if (CGF.MCDCLogOpStack.empty())
5702 CGF.maybeUpdateMCDCTestVectorBitmap(condExpr);
5703
5705 CGF.incrementProfileCounter(lhsExpr);
5706 else
5708
5709 eval.begin(CGF);
5710 Value *LHS = Visit(lhsExpr);
5711 eval.end(CGF);
5712
5713 LHSBlock = Builder.GetInsertBlock();
5714 Builder.CreateBr(ContBlock);
5715
5716 CGF.EmitBlock(RHSBlock);
5717
5718 // If the top of the logical operator nest, update the MCDC bitmap for the
5719 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5720 // may also contain a boolean expression.
5721 if (CGF.MCDCLogOpStack.empty())
5722 CGF.maybeUpdateMCDCTestVectorBitmap(condExpr);
5723
5725 CGF.incrementProfileCounter(rhsExpr);
5726
5727 eval.begin(CGF);
5728 Value *RHS = Visit(rhsExpr);
5729 eval.end(CGF);
5730
5731 RHSBlock = Builder.GetInsertBlock();
5732 CGF.EmitBlock(ContBlock);
5733
5734 // If the LHS or RHS is a throw expression, it will be legitimately null.
5735 if (!LHS)
5736 return RHS;
5737 if (!RHS)
5738 return LHS;
5739
5740 // Create a PHI node for the real part.
5741 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
5742 PN->addIncoming(LHS, LHSBlock);
5743 PN->addIncoming(RHS, RHSBlock);
5744
5745 // When single byte coverage mode is enabled, add a counter to continuation
5746 // block.
5749
5750 return PN;
5751}
5752
5753Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
5754 return Visit(E->getChosenSubExpr());
5755}
5756
5757Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
5758 Address ArgValue = Address::invalid();
5759 RValue ArgPtr = CGF.EmitVAArg(VE, ArgValue);
5760
5761 return ArgPtr.getScalarVal();
5762}
5763
5764Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
5765 return CGF.EmitBlockLiteral(block);
5766}
5767
5768// Convert a vec3 to vec4, or vice versa.
5770 Value *Src, unsigned NumElementsDst) {
5771 static constexpr int Mask[] = {0, 1, 2, -1};
5772 return Builder.CreateShuffleVector(Src, llvm::ArrayRef(Mask, NumElementsDst));
5773}
5774
5775// Create cast instructions for converting LLVM value \p Src to LLVM type \p
5776// DstTy. \p Src has the same size as \p DstTy. Both are single value types
5777// but could be scalar or vectors of different lengths, and either can be
5778// pointer.
5779// There are 4 cases:
5780// 1. non-pointer -> non-pointer : needs 1 bitcast
5781// 2. pointer -> pointer : needs 1 bitcast or addrspacecast
5782// 3. pointer -> non-pointer
5783// a) pointer -> intptr_t : needs 1 ptrtoint
5784// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
5785// 4. non-pointer -> pointer
5786// a) intptr_t -> pointer : needs 1 inttoptr
5787// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
5788// Note: for cases 3b and 4b two casts are required since LLVM casts do not
5789// allow casting directly between pointer types and non-integer non-pointer
5790// types.
5792 const llvm::DataLayout &DL,
5793 Value *Src, llvm::Type *DstTy,
5794 StringRef Name = "") {
5795 auto SrcTy = Src->getType();
5796
5797 // Case 1.
5798 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
5799 return Builder.CreateBitCast(Src, DstTy, Name);
5800
5801 // Case 2.
5802 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
5803 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
5804
5805 // Case 3.
5806 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
5807 // Case 3b.
5808 if (!DstTy->isIntegerTy())
5809 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
5810 // Cases 3a and 3b.
5811 return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
5812 }
5813
5814 // Case 4b.
5815 if (!SrcTy->isIntegerTy())
5816 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
5817 // Cases 4a and 4b.
5818 return Builder.CreateIntToPtr(Src, DstTy, Name);
5819}
5820
5821Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
5822 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
5823 llvm::Type *DstTy = ConvertType(E->getType());
5824
5825 llvm::Type *SrcTy = Src->getType();
5826 unsigned NumElementsSrc =
5828 ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements()
5829 : 0;
5830 unsigned NumElementsDst =
5832 ? cast<llvm::FixedVectorType>(DstTy)->getNumElements()
5833 : 0;
5834
5835 // Use bit vector expansion for ext_vector_type boolean vectors.
5836 if (E->getType()->isExtVectorBoolType())
5837 return CGF.emitBoolVecConversion(Src, NumElementsDst, "astype");
5838
5839 // Going from vec3 to non-vec3 is a special case and requires a shuffle
5840 // vector to get a vec4, then a bitcast if the target type is different.
5841 if (NumElementsSrc == 3 && NumElementsDst != 3) {
5842 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
5843 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
5844 DstTy);
5845
5846 Src->setName("astype");
5847 return Src;
5848 }
5849
5850 // Going from non-vec3 to vec3 is a special case and requires a bitcast
5851 // to vec4 if the original type is not vec4, then a shuffle vector to
5852 // get a vec3.
5853 if (NumElementsSrc != 3 && NumElementsDst == 3) {
5854 auto *Vec4Ty = llvm::FixedVectorType::get(
5855 cast<llvm::VectorType>(DstTy)->getElementType(), 4);
5856 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
5857 Vec4Ty);
5858
5859 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
5860 Src->setName("astype");
5861 return Src;
5862 }
5863
5864 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
5865 Src, DstTy, "astype");
5866}
5867
5868Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
5869 return CGF.EmitAtomicExpr(E).getScalarVal();
5870}
5871
5872//===----------------------------------------------------------------------===//
5873// Entry Point into this File
5874//===----------------------------------------------------------------------===//
5875
5876/// Emit the computation of the specified expression of scalar type, ignoring
5877/// the result.
5878Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
5879 assert(E && hasScalarEvaluationKind(E->getType()) &&
5880 "Invalid scalar expression to emit");
5881
5882 return ScalarExprEmitter(*this, IgnoreResultAssign)
5883 .Visit(const_cast<Expr *>(E));
5884}
5885
5886/// Emit a conversion from the specified type to the specified destination type,
5887/// both of which are LLVM scalar types.
5889 QualType DstTy,
5890 SourceLocation Loc) {
5891 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
5892 "Invalid scalar expression to emit");
5893 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
5894}
5895
5896/// Emit a conversion from the specified complex type to the specified
5897/// destination type, where the destination type is an LLVM scalar type.
5899 QualType SrcTy,
5900 QualType DstTy,
5901 SourceLocation Loc) {
5902 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
5903 "Invalid complex -> scalar conversion");
5904 return ScalarExprEmitter(*this)
5905 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
5906}
5907
5908
5909Value *
5911 QualType PromotionType) {
5912 if (!PromotionType.isNull())
5913 return ScalarExprEmitter(*this).EmitPromoted(E, PromotionType);
5914 else
5915 return ScalarExprEmitter(*this).Visit(const_cast<Expr *>(E));
5916}
5917
5918
5921 bool isInc, bool isPre) {
5922 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
5923}
5924
5926 // object->isa or (*object).isa
5927 // Generate code as for: *(Class*)object
5928
5929 Expr *BaseExpr = E->getBase();
5931 if (BaseExpr->isPRValue()) {
5932 llvm::Type *BaseTy =
5934 Addr = Address(EmitScalarExpr(BaseExpr), BaseTy, getPointerAlign());
5935 } else {
5936 Addr = EmitLValue(BaseExpr).getAddress();
5937 }
5938
5939 // Cast the address to Class*.
5940 Addr = Addr.withElementType(ConvertType(E->getType()));
5941 return MakeAddrLValue(Addr, E->getType());
5942}
5943
5944
5946 const CompoundAssignOperator *E) {
5948 ScalarExprEmitter Scalar(*this);
5949 Value *Result = nullptr;
5950 switch (E->getOpcode()) {
5951#define COMPOUND_OP(Op) \
5952 case BO_##Op##Assign: \
5953 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
5954 Result)
5955 COMPOUND_OP(Mul);
5956 COMPOUND_OP(Div);
5957 COMPOUND_OP(Rem);
5958 COMPOUND_OP(Add);
5959 COMPOUND_OP(Sub);
5960 COMPOUND_OP(Shl);
5961 COMPOUND_OP(Shr);
5963 COMPOUND_OP(Xor);
5964 COMPOUND_OP(Or);
5965#undef COMPOUND_OP
5966
5967 case BO_PtrMemD:
5968 case BO_PtrMemI:
5969 case BO_Mul:
5970 case BO_Div:
5971 case BO_Rem:
5972 case BO_Add:
5973 case BO_Sub:
5974 case BO_Shl:
5975 case BO_Shr:
5976 case BO_LT:
5977 case BO_GT:
5978 case BO_LE:
5979 case BO_GE:
5980 case BO_EQ:
5981 case BO_NE:
5982 case BO_Cmp:
5983 case BO_And:
5984 case BO_Xor:
5985 case BO_Or:
5986 case BO_LAnd:
5987 case BO_LOr:
5988 case BO_Assign:
5989 case BO_Comma:
5990 llvm_unreachable("Not valid compound assignment operators");
5991 }
5992
5993 llvm_unreachable("Unhandled compound assignment operator");
5994}
5995
5997 // The total (signed) byte offset for the GEP.
5998 llvm::Value *TotalOffset;
5999 // The offset overflow flag - true if the total offset overflows.
6000 llvm::Value *OffsetOverflows;
6001};
6002
6003/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
6004/// and compute the total offset it applies from it's base pointer BasePtr.
6005/// Returns offset in bytes and a boolean flag whether an overflow happened
6006/// during evaluation.
6008 llvm::LLVMContext &VMContext,
6009 CodeGenModule &CGM,
6010 CGBuilderTy &Builder) {
6011 const auto &DL = CGM.getDataLayout();
6012
6013 // The total (signed) byte offset for the GEP.
6014 llvm::Value *TotalOffset = nullptr;
6015
6016 // Was the GEP already reduced to a constant?
6017 if (isa<llvm::Constant>(GEPVal)) {
6018 // Compute the offset by casting both pointers to integers and subtracting:
6019 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
6020 Value *BasePtr_int =
6021 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType()));
6022 Value *GEPVal_int =
6023 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType()));
6024 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int);
6025 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
6026 }
6027
6028 auto *GEP = cast<llvm::GEPOperator>(GEPVal);
6029 assert(GEP->getPointerOperand() == BasePtr &&
6030 "BasePtr must be the base of the GEP.");
6031 assert(GEP->isInBounds() && "Expected inbounds GEP");
6032
6033 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
6034
6035 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
6036 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
6037 auto *SAddIntrinsic =
6038 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
6039 auto *SMulIntrinsic =
6040 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
6041
6042 // The offset overflow flag - true if the total offset overflows.
6043 llvm::Value *OffsetOverflows = Builder.getFalse();
6044
6045 /// Return the result of the given binary operation.
6046 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
6047 llvm::Value *RHS) -> llvm::Value * {
6048 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
6049
6050 // If the operands are constants, return a constant result.
6051 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
6052 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
6053 llvm::APInt N;
6054 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
6055 /*Signed=*/true, N);
6056 if (HasOverflow)
6057 OffsetOverflows = Builder.getTrue();
6058 return llvm::ConstantInt::get(VMContext, N);
6059 }
6060 }
6061
6062 // Otherwise, compute the result with checked arithmetic.
6063 auto *ResultAndOverflow = Builder.CreateCall(
6064 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
6065 OffsetOverflows = Builder.CreateOr(
6066 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
6067 return Builder.CreateExtractValue(ResultAndOverflow, 0);
6068 };
6069
6070 // Determine the total byte offset by looking at each GEP operand.
6071 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
6072 GTI != GTE; ++GTI) {
6073 llvm::Value *LocalOffset;
6074 auto *Index = GTI.getOperand();
6075 // Compute the local offset contributed by this indexing step:
6076 if (auto *STy = GTI.getStructTypeOrNull()) {
6077 // For struct indexing, the local offset is the byte position of the
6078 // specified field.
6079 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
6080 LocalOffset = llvm::ConstantInt::get(
6081 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
6082 } else {
6083 // Otherwise this is array-like indexing. The local offset is the index
6084 // multiplied by the element size.
6085 auto *ElementSize =
6086 llvm::ConstantInt::get(IntPtrTy, GTI.getSequentialElementStride(DL));
6087 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
6088 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
6089 }
6090
6091 // If this is the first offset, set it as the total offset. Otherwise, add
6092 // the local offset into the running total.
6093 if (!TotalOffset || TotalOffset == Zero)
6094 TotalOffset = LocalOffset;
6095 else
6096 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
6097 }
6098
6099 return {TotalOffset, OffsetOverflows};
6100}
6101
6102Value *
6103CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,
6104 ArrayRef<Value *> IdxList,
6105 bool SignedIndices, bool IsSubtraction,
6106 SourceLocation Loc, const Twine &Name) {
6107 llvm::Type *PtrTy = Ptr->getType();
6108
6109 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6110 if (!SignedIndices && !IsSubtraction)
6111 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6112
6113 Value *GEPVal = Builder.CreateGEP(ElemTy, Ptr, IdxList, Name, NWFlags);
6114
6115 // If the pointer overflow sanitizer isn't enabled, do nothing.
6116 if (!SanOpts.has(SanitizerKind::PointerOverflow))
6117 return GEPVal;
6118
6119 // Perform nullptr-and-offset check unless the nullptr is defined.
6120 bool PerformNullCheck = !NullPointerIsDefined(
6121 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace());
6122 // Check for overflows unless the GEP got constant-folded,
6123 // and only in the default address space
6124 bool PerformOverflowCheck =
6125 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0;
6126
6127 if (!(PerformNullCheck || PerformOverflowCheck))
6128 return GEPVal;
6129
6130 const auto &DL = CGM.getDataLayout();
6131
6132 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
6133 auto CheckHandler = SanitizerHandler::PointerOverflow;
6134 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
6135 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
6136
6137 GEPOffsetAndOverflow EvaluatedGEP =
6138 EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder);
6139
6140 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||
6141 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&
6142 "If the offset got constant-folded, we don't expect that there was an "
6143 "overflow.");
6144
6145 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
6146
6147 // Common case: if the total offset is zero, don't emit a check.
6148 if (EvaluatedGEP.TotalOffset == Zero)
6149 return GEPVal;
6150
6151 // Now that we've computed the total offset, add it to the base pointer (with
6152 // wrapping semantics).
6153 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy);
6154 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset);
6155
6156 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>,
6157 2>
6158 Checks;
6159
6160 if (PerformNullCheck) {
6161 // If the base pointer evaluates to a null pointer value,
6162 // the only valid pointer this inbounds GEP can produce is also
6163 // a null pointer, so the offset must also evaluate to zero.
6164 // Likewise, if we have non-zero base pointer, we can not get null pointer
6165 // as a result, so the offset can not be -intptr_t(BasePtr).
6166 // In other words, both pointers are either null, or both are non-null,
6167 // or the behaviour is undefined.
6168 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr);
6169 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP);
6170 auto *Valid = Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr);
6171 Checks.emplace_back(Valid, CheckOrdinal);
6172 }
6173
6174 if (PerformOverflowCheck) {
6175 // The GEP is valid if:
6176 // 1) The total offset doesn't overflow, and
6177 // 2) The sign of the difference between the computed address and the base
6178 // pointer matches the sign of the total offset.
6179 llvm::Value *ValidGEP;
6180 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows);
6181 if (SignedIndices) {
6182 // GEP is computed as `unsigned base + signed offset`, therefore:
6183 // * If offset was positive, then the computed pointer can not be
6184 // [unsigned] less than the base pointer, unless it overflowed.
6185 // * If offset was negative, then the computed pointer can not be
6186 // [unsigned] greater than the bas pointere, unless it overflowed.
6187 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
6188 auto *PosOrZeroOffset =
6189 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero);
6190 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
6191 ValidGEP =
6192 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid);
6193 } else if (!IsSubtraction) {
6194 // GEP is computed as `unsigned base + unsigned offset`, therefore the
6195 // computed pointer can not be [unsigned] less than base pointer,
6196 // unless there was an overflow.
6197 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
6198 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
6199 } else {
6200 // GEP is computed as `unsigned base - unsigned offset`, therefore the
6201 // computed pointer can not be [unsigned] greater than base pointer,
6202 // unless there was an overflow.
6203 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
6204 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr);
6205 }
6206 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow);
6207 Checks.emplace_back(ValidGEP, CheckOrdinal);
6208 }
6209
6210 assert(!Checks.empty() && "Should have produced some checks.");
6211
6212 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
6213 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
6214 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
6215 EmitCheck(Checks, CheckHandler, StaticArgs, DynamicArgs);
6216
6217 return GEPVal;
6218}
6219
6221 Address Addr, ArrayRef<Value *> IdxList, llvm::Type *elementType,
6222 bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align,
6223 const Twine &Name) {
6224 if (!SanOpts.has(SanitizerKind::PointerOverflow)) {
6225 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6226 if (!SignedIndices && !IsSubtraction)
6227 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6228
6229 return Builder.CreateGEP(Addr, IdxList, elementType, Align, Name, NWFlags);
6230 }
6231
6232 return RawAddress(
6233 EmitCheckedInBoundsGEP(Addr.getElementType(), Addr.emitRawPointer(*this),
6234 IdxList, SignedIndices, IsSubtraction, Loc, Name),
6235 elementType, Align);
6236}
Defines the clang::ASTContext interface.
#define V(N, I)
static void EmitHLSLElementwiseCast(CodeGenFunction &CGF, Address DestVal, QualType DestTy, Address SrcVal, QualType SrcTy, SourceLocation Loc)
static llvm::Value * EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF, const BinaryOperator *E, llvm::Value *LHS, llvm::Value *RHS, CompareKind Kind, const char *NameSuffix="")
static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty)
static llvm::Value * EmitIsNegativeTestHelper(Value *V, QualType VType, const char *Name, CGBuilderTy &Builder)
static Value * createCastsForTypeOfSameSize(CGBuilderTy &Builder, const llvm::DataLayout &DL, Value *Src, llvm::Type *DstTy, StringRef Name="")
static bool isLValueKnownNonNull(CodeGenFunction &CGF, const Expr *E)
static bool matchesPostDecrInWhile(const UnaryOperator *UO, bool isInc, bool isPre, ASTContext &Ctx)
For the purposes of overflow pattern exclusion, does this match the "while(i--)" pattern?
IntrinsicType
@ VCMPGT
@ VCMPEQ
static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, BuiltinType::Kind ElemKind)
static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal, llvm::LLVMContext &VMContext, CodeGenModule &CGM, CGBuilderTy &Builder)
Evaluate given GEPVal, which is either an inbounds GEP, or a constant, and compute the total offset i...
static bool isDeclRefKnownNonNull(CodeGenFunction &CGF, const ValueDecl *D)
static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(QualType SrcType, QualType DstType)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitBitfieldTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static Value * buildFMulAdd(llvm::Instruction *MulOp, Value *Addend, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool negMul, bool negAdd)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitBitfieldSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx, unsigned Off)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static Value * ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF, Value *Src, unsigned NumElementsDst)
static Value * tryEmitFMulAdd(const BinOpInfo &op, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool isSub=false)
static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E, llvm::Value *InVal, bool IsInc, FPOptions FPFeatures)
#define HANDLE_BINOP(OP)
#define COMPOUND_OP(Op)
#define HANDLEBINOP(OP)
static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
static bool isCheapEnoughToEvaluateUnconditionally(const Expr *e, CIRGenFunction &cgf)
Return true if the specified expression is cheap enough and side-effect-free enough to evaluate uncon...
static std::optional< QualType > getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e)
If e is a widened promoted integer, get its base (unpromoted) type.
#define VISITCOMP(CODE)
llvm::APSInt APSInt
Definition Compiler.cpp:23
static Decl::Kind getKind(const Decl *D)
FormatToken * Previous
The previous token in the unwrapped line.
SanitizerHandler
static QualType getPointeeType(const MemRegion *R)
This file contains the declaration of TrapReasonBuilder and related classes.
llvm::APInt getValue() const
APSInt & getInt()
Definition APValue.h:489
bool isInt() const
Definition APValue.h:467
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
ParentMapContext & getParentMapContext()
Returns the dynamic AST node parent map context.
unsigned getIntWidth(QualType T) const
const llvm::fltSemantics & getFloatTypeSemantics(QualType T) const
Return the APFloat 'semantics' for the specified scalar floating point type.
CanQualType FloatTy
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
const LangOptions & getLangOpts() const
Definition ASTContext.h:926
bool isTypeIgnoredBySanitizer(const SanitizerMask &Mask, const QualType &Ty) const
Check if a type can have its sanitizer instrumentation elided based on its presence within an ignorel...
CanQualType BoolTy
unsigned getOpenMPDefaultSimdAlign(QualType T) const
Get default simd alignment of the specified complete type in bits.
llvm::FixedPointSemantics getFixedPointSemantics(QualType Ty) const
bool hasSameUnqualifiedType(QualType T1, QualType T2) const
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType getPromotedIntegerType(QualType PromotableType) const
Return the type that PromotableType will promote to: C99 6.3.1.1p2, assuming that PromotableType is a...
const VariableArrayType * getAsVariableArrayType(QualType T) const
QualType getComplexType(QualType T) const
Return the uniqued reference to the type for a complex number with the specified element type.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:891
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
bool isPromotableIntegerType(QualType T) const
More type predicates useful for type checking/promotion.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4465
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4471
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4477
LabelDecl * getLabel() const
Definition Expr.h:4507
uint64_t getValue() const
Definition ExprCXX.h:3038
QualType getElementType() const
Definition TypeBase.h:3732
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:6638
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:3972
Expr * getLHS() const
Definition Expr.h:4022
static Opcode getOpForCompoundAssignment(Opcode Opc)
Definition Expr.h:4119
bool isCompoundAssignmentOp() const
Definition Expr.h:4116
SourceLocation getExprLoc() const
Definition Expr.h:4013
bool isShiftOp() const
Definition Expr.h:4061
Expr * getRHS() const
Definition Expr.h:4024
bool isShiftAssignOp() const
Definition Expr.h:4130
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:4185
static bool isNullPointerArithmeticExtension(ASTContext &Ctx, Opcode Opc, const Expr *LHS, const Expr *RHS)
Return true if a binary operator using the specified opcode and operands would match the 'p = (i8*)nu...
Definition Expr.cpp:2200
Opcode getOpcode() const
Definition Expr.h:4017
BinaryOperatorKind Opcode
Definition Expr.h:3977
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition DeclCXX.h:203
QualType getType() const
Retrieves the type of the base class.
Definition DeclCXX.h:249
bool getValue() const
Definition ExprCXX.h:740
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1105
bool getValue() const
Definition ExprCXX.h:4326
Expr * getSemanticForm()
Get an equivalent semantic form for this expression.
Definition ExprCXX.h:304
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1599
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3610
path_iterator path_begin()
Definition Expr.h:3680
CastKind getCastKind() const
Definition Expr.h:3654
bool changesVolatileQualification() const
Return.
Definition Expr.h:3744
path_iterator path_end()
Definition Expr.h:3681
Expr * getSubExpr()
Definition Expr.h:3660
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
bool isOne() const
isOne - Test whether the quantity equals one.
Definition CharUnits.h:125
unsigned getValue() const
Definition Expr.h:1629
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition Expr.h:4818
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
SanitizerSet SanitizeTrap
Set of sanitizer checks that trap rather than diagnose.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
static Address invalid()
Definition Address.h:176
bool isValid() const
Definition Address.h:177
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:112
virtual llvm::Constant * EmitNullMemberPointer(const MemberPointerType *MPT)
Create a null member pointer of the given type.
Definition CGCXXABI.cpp:103
virtual llvm::Value * EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT)
Determine if a member pointer is non-null. Returns an i1.
Definition CGCXXABI.cpp:95
virtual llvm::Value * EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality)
Emit a comparison between two member pointers. Returns an i1.
Definition CGCXXABI.cpp:85
virtual llvm::Value * EmitMemberPointerConversion(CodeGenFunction &CGF, const CastExpr *E, llvm::Value *Src)
Perform a derived-to-base, base-to-derived, or bitcast member pointer conversion.
Definition CGCXXABI.cpp:72
void EmitPseudoVariable(CGBuilderTy &Builder, llvm::Instruction *Value, QualType Ty)
Emit a pseudo variable and debug info for an intermediate value if it does not correspond to a variab...
void addHeapAllocSiteMetadata(llvm::CallBase *CallSite, QualType AllocatedTy, SourceLocation Loc)
Add heapallocsite metadata for MSAllocator calls.
void emitInitListOpaqueValues(CodeGenFunction &CGF, InitListExpr *E)
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS)
Checks if the provided LVal is lastprivate conditional and emits the code to update the value of the ...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
Produce the code for a CK_ARCConsumeObject.
Definition CGObjC.cpp:2152
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
Definition CGObjC.cpp:573
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
CurrentSourceLocExprScope CurSourceLocExprScope
Source location information about the default argument or member initializer expression we're evaluat...
llvm::Value * EmitARCReclaimReturnedObject(const Expr *e, bool allowUnsafeClaim)
Definition CGObjC.cpp:3089
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
Definition CGObjC.cpp:3679
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:6678
llvm::Value * EmitObjCSelectorExpr(const ObjCSelectorExpr *E)
Emit a selector.
Definition CGObjC.cpp:257
SanitizerSet SanOpts
Sanitizers enabled for this function.
RawAddress CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
Definition CGExpr.cpp:181
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
llvm::Value * EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E)
Definition CGObjC.cpp:251
llvm::BlockAddress * GetAddrOfLabel(const LabelDecl *L)
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
llvm::Value * EmitObjCProtocolExpr(const ObjCProtocolExpr *E)
Definition CGObjC.cpp:265
llvm::Value * EmitPointerAuthQualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType ValueType, Address StorageAddress, bool IsKnownNonNull)
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
Definition CGExpr.cpp:2680
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:3648
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
Definition CGCall.cpp:6288
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
Definition CGExpr.cpp:1236
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
Definition CGExpr.cpp:6778
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void maybeUpdateMCDCTestVectorBitmap(const Expr *E)
Increment the profiler's counter for the given expression by StepV.
void EmitCXXDeleteExpr(const CXXDeleteExpr *E)
llvm::Value * EmitObjCArrayLiteral(const ObjCArrayLiteral *E)
Definition CGObjC.cpp:247
llvm::Value * EmitPromotedScalarExpr(const Expr *E, QualType PromotionType)
const LangOptions & getLangOpts() const
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
Store into a strong object.
Definition CGObjC.cpp:2545
bool isPointerKnownNonNull(const Expr *E)
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
Definition CGClass.cpp:394
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
Definition CGDecl.cpp:765
llvm::Value * EmitPointerAuthUnqualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType PointerType, Address StorageAddress, bool IsKnownNonNull)
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
Definition CGClass.cpp:2890
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
llvm::Value * EmitCXXNewExpr(const CXXNewExpr *E)
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
std::pair< LValue, llvm::Value * > EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored)
Definition CGObjC.cpp:3629
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3538
LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E, llvm::Value *&Result)
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
Definition CGExpr.cpp:174
const TargetInfo & getTarget() const
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
llvm::Value * EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty)
Definition CGObjC.cpp:3953
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:242
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot(), llvm::CallBase **CallOrInvoke=nullptr)
Definition CGExpr.cpp:5931
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2336
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
static bool isInstrumentedCondition(const Expr *C)
isInstrumentedCondition - Determine whether the given condition is an instrumentable condition (i....
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
llvm::Value * EmitObjCBoxedExpr(const ObjCBoxedExpr *E)
EmitObjCBoxedExpr - This routine generates code to call the appropriate expression boxing method.
Definition CGObjC.cpp:64
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:223
void maybeResetMCDCCondBitmap(const Expr *E)
Zero-init the MCDC temp value.
RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:3788
SmallVector< const BinaryOperator *, 16 > MCDCLogOpStack
Stack to track the Logical Operator recursion nest for MC/DC.
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:5884
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
Definition CGExpr.cpp:1967
llvm::Value * EmitARCRetainScalarExpr(const Expr *expr)
EmitARCRetainScalarExpr - Semantically equivalent to EmitARCRetainObject(e->getType(),...
Definition CGObjC.cpp:3493
llvm::Value * EmitBlockLiteral(const BlockExpr *)
Emit block literal.
Definition CGBlocks.cpp:764
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition CGExpr.cpp:2153
void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val)
Update the MCDC temp value with the condition's evaluated result.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:5870
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition CGExpr.cpp:2533
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition CGExpr.cpp:4228
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition CGStmt.cpp:566
llvm::AtomicRMWInst * emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Order=llvm::AtomicOrdering::SequentiallyConsistent, llvm::SyncScope::ID SSID=llvm::SyncScope::System, const AtomicExpr *AE=nullptr)
Emit an atomicrmw instruction, and applying relevant metadata when applicable.
llvm::Value * EmitPointerArithmetic(const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer, Expr *indexOperand, llvm::Value *index, bool isSubtraction)
Emit pointer + index arithmetic.
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition CGExpr.cpp:264
uint64_t getCurrentProfileCount()
Get the profiler's current count.
llvm::Type * ConvertTypeForMem(QualType T)
RValue EmitAtomicExpr(AtomicExpr *E)
Definition CGAtomic.cpp:855
void markStmtMaybeUsed(const Stmt *S)
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
llvm::Value * authPointerToPointerCast(llvm::Value *ResultPtr, QualType SourceType, QualType DestType)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1515
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
Definition CGStmt.cpp:672
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
Definition CGExpr.cpp:1596
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
Definition CGExpr.cpp:734
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
llvm::Value * EmitBuiltinAvailable(const VersionTuple &Version)
Definition CGObjC.cpp:4033
void FlattenAccessAndType(Address Addr, QualType AddrTy, SmallVectorImpl< std::pair< Address, llvm::Value * > > &AccessList, SmallVectorImpl< QualType > &FlatTypes)
Definition CGExpr.cpp:6787
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
llvm::Value * EmitMatrixIndexExpr(const Expr *E)
Definition CGExpr.cpp:4768
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false, const TrapReason *TR=nullptr)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
Definition CGExpr.cpp:4140
llvm::Value * LoadCXXThis()
LoadCXXThis - Load the value of 'this'.
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition CGExpr.cpp:2183
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
llvm::Value * getArrayInitIndex()
Get the index of the current ArrayInitLoopExpr, if any.
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
llvm::Value * EmitObjCStringLiteral(const ObjCStringLiteral *E)
Emits an instance of NSConstantString representing the object.
Definition CGObjC.cpp:51
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
ConstantEmission tryEmitAsConstant(const DeclRefExpr *RefExpr)
Try to emit a reference to the given value without producing it as an l-value.
Definition CGExpr.cpp:1864
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1631
llvm::Value * EmitARCExtendBlockObject(const Expr *expr)
Definition CGObjC.cpp:3524
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
i8* @objc_storeWeak(i8** addr, i8* value) Returns value.
Definition CGObjC.cpp:2651
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
ComplexPairTy EmitPromotedValue(ComplexPairTy result, QualType PromotionType)
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:652
This class organizes the cross-function state that is used while generating LLVM code.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition CGExpr.cpp:1311
CGHLSLRuntime & getHLSLRuntime()
Return a reference to the configured HLSL runtime.
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
TrapReasonBuilder BuildTrapReason(unsigned DiagID, TrapReason &TR)
Helper function to construct a TrapReasonBuilder.
llvm::Constant * getNullPointer(llvm::PointerType *T, QualType QT)
Get target specific null pointer.
const TargetInfo & getTarget() const
llvm::Constant * getMemberPointerConstant(const UnaryOperator *e)
const llvm::DataLayout & getDataLayout() const
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
llvm::Value * createOpenCLIntToSamplerConversion(const Expr *E, CodeGenFunction &CGF)
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
LValue - This represents an lvalue references.
Definition CGValue.h:182
bool isBitField() const
Definition CGValue.h:280
bool isVolatileQualified() const
Definition CGValue.h:285
const Qualifiers & getQuals() const
Definition CGValue.h:338
Address getAddress() const
Definition CGValue.h:361
QualType getType() const
Definition CGValue.h:291
const CGBitFieldInfo & getBitFieldInfo() const
Definition CGValue.h:424
static RValue get(llvm::Value *V)
Definition CGValue.h:98
bool isAggregate() const
Definition CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition CGValue.h:83
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:71
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, llvm::Type *DestTy, bool IsNonNull=false) const
CompoundAssignOperator - For compound assignments (e.g.
Definition Expr.h:4234
QualType getComputationLHSType() const
Definition Expr.h:4268
QualType getComputationResultType() const
Definition Expr.h:4271
bool isSatisfied() const
Whether or not the concept with the given arguments was satisfied when the expression was created.
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:4743
T * getAttr() const
Definition DeclBase.h:573
ChildElementIter< false > begin()
Definition Expr.h:5166
size_t getDataElementCount() const
Definition Expr.h:5082
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool isIntegerConstantExpr(const ASTContext &Ctx) const
bool isGLValue() const
Definition Expr.h:287
@ SE_AllowSideEffects
Allow any unmodeled side effect.
Definition Expr.h:674
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3081
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3065
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
bool refersToBitField() const
Returns true if this expression is a gl-value that potentially refers to a bit-field.
Definition Expr.h:476
QualType getType() const
Definition Expr.h:144
llvm::APInt getValue() const
Returns an internal integer representation of the literal.
Definition Expr.h:1575
llvm::APFloat getValue() const
Definition Expr.h:1666
const Expr * getSubExpr() const
Definition Expr.h:1062
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition Expr.h:3787
unsigned getNumInits() const
Definition Expr.h:5263
bool hadArrayRangeDesignator() const
Definition Expr.h:5417
const Expr * getInit(unsigned Init) const
Definition Expr.h:5287
@ PostDecrInWhile
while (count–)
bool isSignedOverflowDefined() const
bool isOverflowPatternExcluded(OverflowPatternExclusionKind Kind) const
std::string OverflowHandler
The name of the handler function to be called when -ftrapv is specified.
Represents a matrix type, as defined in the Matrix Types clang extensions.
Definition TypeBase.h:4335
Expr * getBase() const
Definition Expr.h:3375
bool isArrow() const
Definition Expr.h:3482
VersionTuple getVersion() const
Definition ExprObjC.h:1726
ObjCIsaExpr - Represent X->isa and X.isa when X is an ObjC 'id' type.
Definition ExprObjC.h:1498
Expr * getBase() const
Definition ExprObjC.h:1523
SourceLocation getExprLoc() const LLVM_READONLY
Definition ExprObjC.h:1546
const ObjCMethodDecl * getMethodDecl() const
Definition ExprObjC.h:1364
QualType getReturnType() const
Definition DeclObjC.h:329
Represents a pointer to an Objective C object.
Definition TypeBase.h:7903
const ObjCObjectType * getObjectType() const
Gets the type pointed to by this ObjC pointer.
Definition TypeBase.h:7940
Expr * getIndexExpr(unsigned Idx)
Definition Expr.h:2586
const OffsetOfNode & getComponent(unsigned Idx) const
Definition Expr.h:2574
TypeSourceInfo * getTypeSourceInfo() const
Definition Expr.h:2567
unsigned getNumComponents() const
Definition Expr.h:2582
unsigned getArrayExprIndex() const
For an array element node, returns the index into the array of expressions.
Definition Expr.h:2479
FieldDecl * getField() const
For a field offsetof node, returns the field.
Definition Expr.h:2485
@ Array
An index into an array.
Definition Expr.h:2426
@ Identifier
A field in a dependent type, known only by its name.
Definition Expr.h:2430
@ Field
A field.
Definition Expr.h:2428
@ Base
An implicit indirection through a C++ base class, when the field found is in a base class.
Definition Expr.h:2433
Kind getKind() const
Determine what kind of offsetof node this is.
Definition Expr.h:2475
CXXBaseSpecifier * getBase() const
For a base class node, returns the base specifier.
Definition Expr.h:2495
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:1208
Expr * getSelectedExpr() const
Definition ExprCXX.h:4633
const Expr * getSubExpr() const
Definition Expr.h:2199
DynTypedNodeList getParents(const NodeT &Node)
Returns the parents of the given node (within the traversal scope).
Pointer-authentication qualifiers.
Definition TypeBase.h:152
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3328
A (possibly-)qualified type.
Definition TypeBase.h:937
PointerAuthQualifier getPointerAuth() const
Definition TypeBase.h:1453
bool mayBeDynamicClass() const
Returns true if it is a class and it might be dynamic.
Definition Type.cpp:130
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8285
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8411
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1438
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition TypeBase.h:8470
QualType getCanonicalType() const
Definition TypeBase.h:8337
bool UseExcessPrecision(const ASTContext &Ctx)
Definition Type.cpp:1612
bool mayBeNotDynamicClass() const
Returns true if it is not a class or if the class might not be dynamic.
Definition Type.cpp:135
bool isCanonical() const
Definition TypeBase.h:8342
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition TypeBase.h:361
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition TypeBase.h:364
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition TypeBase.h:367
void removePointerAuth()
Definition TypeBase.h:610
specific_decl_iterator< FieldDecl > field_iterator
Definition Decl.h:4509
bool isSatisfied() const
Whether or not the requires clause is satisfied.
std::string ComputeName(ASTContext &Context) const
Definition Expr.cpp:583
static constexpr SanitizerMask bitPosToMask(const unsigned Pos)
Create a mask with a bit enabled at position Pos.
Definition Sanitizers.h:59
llvm::APSInt getShuffleMaskIdx(unsigned N) const
Definition Expr.h:4629
unsigned getNumSubExprs() const
getNumSubExprs - Return the size of the SubExprs array.
Definition Expr.h:4610
Expr * getExpr(unsigned Index)
getExpr - Return the Expr at the specified index.
Definition Expr.h:4616
unsigned getPackLength() const
Retrieve the length of the parameter pack.
Definition ExprCXX.h:4509
APValue EvaluateInContext(const ASTContext &Ctx, const Expr *DefaultExpr) const
Return the result of evaluating this SourceLocExpr in the specified (and possibly null) default argum...
Definition Expr.cpp:2277
SourceLocation getLocation() const
Definition Expr.h:4995
Encodes a location in the source.
CompoundStmt * getSubStmt()
Definition Expr.h:4546
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
void dump() const
Dumps the specified AST fragment and all subtrees to llvm::errs().
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:346
virtual bool useFP16ConversionIntrinsics() const
Check whether llvm intrinsics such as llvm.convert.to.fp16 should be used to convert to and from __fp...
VersionTuple getPlatformMinVersion() const
Retrieve the minimum desired version of the platform, to which the program should be compiled.
const llvm::fltSemantics & getHalfFormat() const
Definition TargetInfo.h:783
const llvm::fltSemantics & getBFloat16Format() const
Definition TargetInfo.h:793
const llvm::fltSemantics & getLongDoubleFormat() const
Definition TargetInfo.h:804
const llvm::fltSemantics & getFloat128Format() const
Definition TargetInfo.h:812
const llvm::fltSemantics & getIbm128Format() const
Definition TargetInfo.h:820
QualType getType() const
Return the type wrapped by this type source info.
Definition TypeBase.h:8267
bool getBoolValue() const
Definition ExprCXX.h:2941
const APValue & getAPValue() const
Definition ExprCXX.h:2946
bool isStoredAsBoolean() const
Definition ExprCXX.h:2937
bool isVoidType() const
Definition TypeBase.h:8878
bool isBooleanType() const
Definition TypeBase.h:9008
bool isSignableType(const ASTContext &Ctx) const
Definition TypeBase.h:8534
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2225
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition Type.cpp:2273
CXXRecordDecl * castAsCXXRecordDecl() const
Definition Type.h:36
bool isArithmeticType() const
Definition Type.cpp:2337
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8922
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9165
bool isReferenceType() const
Definition TypeBase.h:8546
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition Type.cpp:1909
bool isSveVLSBuiltinType() const
Determines if this is a sizeless type supported by the 'arm_sve_vector_bits' type attribute,...
Definition Type.cpp:2608
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isExtVectorType() const
Definition TypeBase.h:8665
bool isExtVectorBoolType() const
Definition TypeBase.h:8669
bool isOCLIntelSubgroupAVCType() const
Definition TypeBase.h:8797
bool isBuiltinType() const
Helper methods to distinguish type categories.
Definition TypeBase.h:8645
RecordDecl * castAsRecordDecl() const
Definition Type.h:48
bool isAnyComplexType() const
Definition TypeBase.h:8657
bool isFixedPointType() const
Return true if this is a fixed point type according to ISO/IEC JTC1 SC22 WG14 N1169.
Definition TypeBase.h:8934
bool isHalfType() const
Definition TypeBase.h:8882
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition Type.cpp:2243
bool isQueueT() const
Definition TypeBase.h:8768
bool isMatrixType() const
Definition TypeBase.h:8679
bool isEventT() const
Definition TypeBase.h:8760
bool isFunctionType() const
Definition TypeBase.h:8518
bool isVectorType() const
Definition TypeBase.h:8661
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2320
bool isFloatingType() const
Definition Type.cpp:2304
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition Type.cpp:2253
const T * castAsCanonical() const
Return this type's canonical type cast to the specified type.
Definition TypeBase.h:2928
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9098
bool isNullPtrType() const
Definition TypeBase.h:8915
QualType getTypeOfArgument() const
Gets the argument type, or the type of the argument expression, whichever is appropriate.
Definition Expr.h:2694
UnaryExprOrTypeTrait getKind() const
Definition Expr.h:2657
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
SourceLocation getExprLoc() const
Definition Expr.h:2368
Expr * getSubExpr() const
Definition Expr.h:2285
Opcode getOpcode() const
Definition Expr.h:2280
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:2400
bool canOverflow() const
Returns true if the unary operator can cause an overflow.
Definition Expr.h:2298
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:711
QualType getType() const
Definition Decl.h:722
bool isWeak() const
Determine whether this symbol is weakly-imported, or declared with the weak or weak-ref attr.
Definition Decl.cpp:5453
QualType getType() const
Definition Value.cpp:237
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3964
Represents a GCC generic vector type.
Definition TypeBase.h:4173
WhileStmt - This represents a 'while' stmt.
Definition Stmt.h:2697
Defines the clang::TargetInfo interface.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::ArgumentAdaptingMatcherFunc< internal::HasMatcher > has
Matches AST nodes that have child AST nodes that match the provided matcher.
const AstTypeMatcher< PointerType > pointerType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
llvm::APFloat APFloat
Definition Floating.h:27
llvm::APInt APInt
Definition FixedPoint.h:19
bool LE(InterpState &S, CodePtr OpPC)
Definition Interp.h:1274
bool Load(InterpState &S, CodePtr OpPC)
Definition Interp.h:1939
bool GE(InterpState &S, CodePtr OpPC)
Definition Interp.h:1289
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
if(T->getSizeExpr()) TRY_TO(TraverseStmt(const_cast< Expr * >(T -> getSizeExpr())))
@ Result
The result type of a method or function.
Definition TypeBase.h:905
const FunctionProtoType * T
CastKind
CastKind - The kind of operation required for a conversion.
U cast(CodeGen::Address addr)
Definition Address.h:327
unsigned long uint64_t
long int64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
#define false
Definition stdbool.h:26
#define true
Definition stdbool.h:25
llvm::Value * TotalOffset
llvm::Value * OffsetOverflows
Structure with information about how a bitfield should be accessed.
unsigned Size
The total size of the bit-field, in bits.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
static TBAAAccessInfo getMayAliasInfo()
Definition CodeGenTBAA.h:63
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Definition Sanitizers.h:184