Thanks to visit codestin.com
Credit goes to clang.llvm.org

clang 22.0.0git
InterpBuiltin.cpp
Go to the documentation of this file.
1//===--- InterpBuiltin.cpp - Interpreter for the constexpr VM ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
9#include "Boolean.h"
10#include "EvalEmitter.h"
11#include "Interp.h"
13#include "PrimType.h"
14#include "clang/AST/OSLog.h"
19#include "llvm/ADT/StringExtras.h"
20#include "llvm/Support/ErrorHandling.h"
21#include "llvm/Support/SipHash.h"
22
23namespace clang {
24namespace interp {
25
26LLVM_ATTRIBUTE_UNUSED static bool isNoopBuiltin(unsigned ID) {
27 switch (ID) {
28 case Builtin::BIas_const:
29 case Builtin::BIforward:
30 case Builtin::BIforward_like:
31 case Builtin::BImove:
32 case Builtin::BImove_if_noexcept:
33 case Builtin::BIaddressof:
34 case Builtin::BI__addressof:
35 case Builtin::BI__builtin_addressof:
36 case Builtin::BI__builtin_launder:
37 return true;
38 default:
39 return false;
40 }
41 return false;
42}
43
44static void discard(InterpStack &Stk, PrimType T) {
45 TYPE_SWITCH(T, { Stk.discard<T>(); });
46}
47
49 INT_TYPE_SWITCH(T, return Stk.pop<T>().toAPSInt());
50}
51
52static APSInt popToAPSInt(InterpState &S, const Expr *E) {
53 return popToAPSInt(S.Stk, *S.getContext().classify(E->getType()));
54}
56 return popToAPSInt(S.Stk, *S.getContext().classify(T));
57}
58
59/// Pushes \p Val on the stack as the type given by \p QT.
60static void pushInteger(InterpState &S, const APSInt &Val, QualType QT) {
64 assert(T);
65
66 unsigned BitWidth = S.getASTContext().getTypeSize(QT);
67
68 if (T == PT_IntAPS) {
69 auto Result = S.allocAP<IntegralAP<true>>(BitWidth);
70 Result.copy(Val);
72 return;
73 }
74
75 if (T == PT_IntAP) {
76 auto Result = S.allocAP<IntegralAP<false>>(BitWidth);
77 Result.copy(Val);
79 return;
80 }
81
83 int64_t V = Val.getSExtValue();
84 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
85 } else {
87 uint64_t V = Val.getZExtValue();
88 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
89 }
90}
91
92template <typename T>
93static void pushInteger(InterpState &S, T Val, QualType QT) {
94 if constexpr (std::is_same_v<T, APInt>)
95 pushInteger(S, APSInt(Val, !std::is_signed_v<T>), QT);
96 else if constexpr (std::is_same_v<T, APSInt>)
97 pushInteger(S, Val, QT);
98 else
100 APSInt(APInt(sizeof(T) * 8, static_cast<uint64_t>(Val),
101 std::is_signed_v<T>),
102 !std::is_signed_v<T>),
103 QT);
104}
105
106static void assignInteger(InterpState &S, const Pointer &Dest, PrimType ValueT,
107 const APSInt &Value) {
108
109 if (ValueT == PT_IntAPS) {
110 Dest.deref<IntegralAP<true>>() =
111 S.allocAP<IntegralAP<true>>(Value.getBitWidth());
112 Dest.deref<IntegralAP<true>>().copy(Value);
113 } else if (ValueT == PT_IntAP) {
114 Dest.deref<IntegralAP<false>>() =
115 S.allocAP<IntegralAP<false>>(Value.getBitWidth());
116 Dest.deref<IntegralAP<false>>().copy(Value);
117 } else {
119 ValueT, { Dest.deref<T>() = T::from(static_cast<T>(Value)); });
120 }
121}
122
123static QualType getElemType(const Pointer &P) {
124 const Descriptor *Desc = P.getFieldDesc();
125 QualType T = Desc->getType();
126 if (Desc->isPrimitive())
127 return T;
128 if (T->isPointerType())
129 return T->getAs<PointerType>()->getPointeeType();
130 if (Desc->isArray())
131 return Desc->getElemQualType();
132 if (const auto *AT = T->getAsArrayTypeUnsafe())
133 return AT->getElementType();
134 return T;
135}
136
138 unsigned ID) {
139 if (!S.diagnosing())
140 return;
141
142 auto Loc = S.Current->getSource(OpPC);
143 if (S.getLangOpts().CPlusPlus11)
144 S.CCEDiag(Loc, diag::note_constexpr_invalid_function)
145 << /*isConstexpr=*/0 << /*isConstructor=*/0
147 else
148 S.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
149}
150
151static llvm::APSInt convertBoolVectorToInt(const Pointer &Val) {
152 assert(Val.getFieldDesc()->isPrimitiveArray() &&
154 "Not a boolean vector");
155 unsigned NumElems = Val.getNumElems();
156
157 // Each element is one bit, so create an integer with NumElts bits.
158 llvm::APSInt Result(NumElems, 0);
159 for (unsigned I = 0; I != NumElems; ++I) {
160 if (Val.elem<bool>(I))
161 Result.setBit(I);
162 }
163
164 return Result;
165}
166
168 const InterpFrame *Frame,
169 const CallExpr *Call) {
170 unsigned Depth = S.Current->getDepth();
171 auto isStdCall = [](const FunctionDecl *F) -> bool {
172 return F && F->isInStdNamespace() && F->getIdentifier() &&
173 F->getIdentifier()->isStr("is_constant_evaluated");
174 };
175 const InterpFrame *Caller = Frame->Caller;
176 // The current frame is the one for __builtin_is_constant_evaluated.
177 // The one above that, potentially the one for std::is_constant_evaluated().
179 S.getEvalStatus().Diag &&
180 (Depth == 0 || (Depth == 1 && isStdCall(Frame->getCallee())))) {
181 if (Caller && isStdCall(Frame->getCallee())) {
182 const Expr *E = Caller->getExpr(Caller->getRetPC());
183 S.report(E->getExprLoc(),
184 diag::warn_is_constant_evaluated_always_true_constexpr)
185 << "std::is_constant_evaluated" << E->getSourceRange();
186 } else {
187 S.report(Call->getExprLoc(),
188 diag::warn_is_constant_evaluated_always_true_constexpr)
189 << "__builtin_is_constant_evaluated" << Call->getSourceRange();
190 }
191 }
192
194 return true;
195}
196
197// __builtin_assume(int)
199 const InterpFrame *Frame,
200 const CallExpr *Call) {
201 assert(Call->getNumArgs() == 1);
202 discard(S.Stk, *S.getContext().classify(Call->getArg(0)));
203 return true;
204}
205
207 const InterpFrame *Frame,
208 const CallExpr *Call, unsigned ID) {
209 uint64_t Limit = ~static_cast<uint64_t>(0);
210 if (ID == Builtin::BIstrncmp || ID == Builtin::BI__builtin_strncmp ||
211 ID == Builtin::BIwcsncmp || ID == Builtin::BI__builtin_wcsncmp)
212 Limit = popToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(2)))
213 .getZExtValue();
214
215 const Pointer &B = S.Stk.pop<Pointer>();
216 const Pointer &A = S.Stk.pop<Pointer>();
217 if (ID == Builtin::BIstrcmp || ID == Builtin::BIstrncmp ||
218 ID == Builtin::BIwcscmp || ID == Builtin::BIwcsncmp)
219 diagnoseNonConstexprBuiltin(S, OpPC, ID);
220
221 if (Limit == 0) {
222 pushInteger(S, 0, Call->getType());
223 return true;
224 }
225
226 if (!CheckLive(S, OpPC, A, AK_Read) || !CheckLive(S, OpPC, B, AK_Read))
227 return false;
228
229 if (A.isDummy() || B.isDummy())
230 return false;
231 if (!A.isBlockPointer() || !B.isBlockPointer())
232 return false;
233
234 bool IsWide = ID == Builtin::BIwcscmp || ID == Builtin::BIwcsncmp ||
235 ID == Builtin::BI__builtin_wcscmp ||
236 ID == Builtin::BI__builtin_wcsncmp;
237 assert(A.getFieldDesc()->isPrimitiveArray());
238 assert(B.getFieldDesc()->isPrimitiveArray());
239
240 // Different element types shouldn't happen, but with casts they can.
242 return false;
243
244 PrimType ElemT = *S.getContext().classify(getElemType(A));
245
246 auto returnResult = [&](int V) -> bool {
247 pushInteger(S, V, Call->getType());
248 return true;
249 };
250
251 unsigned IndexA = A.getIndex();
252 unsigned IndexB = B.getIndex();
253 uint64_t Steps = 0;
254 for (;; ++IndexA, ++IndexB, ++Steps) {
255
256 if (Steps >= Limit)
257 break;
258 const Pointer &PA = A.atIndex(IndexA);
259 const Pointer &PB = B.atIndex(IndexB);
260 if (!CheckRange(S, OpPC, PA, AK_Read) ||
261 !CheckRange(S, OpPC, PB, AK_Read)) {
262 return false;
263 }
264
265 if (IsWide) {
266 INT_TYPE_SWITCH(ElemT, {
267 T CA = PA.deref<T>();
268 T CB = PB.deref<T>();
269 if (CA > CB)
270 return returnResult(1);
271 if (CA < CB)
272 return returnResult(-1);
273 if (CA.isZero() || CB.isZero())
274 return returnResult(0);
275 });
276 continue;
277 }
278
279 uint8_t CA = PA.deref<uint8_t>();
280 uint8_t CB = PB.deref<uint8_t>();
281
282 if (CA > CB)
283 return returnResult(1);
284 if (CA < CB)
285 return returnResult(-1);
286 if (CA == 0 || CB == 0)
287 return returnResult(0);
288 }
289
290 return returnResult(0);
291}
292
294 const InterpFrame *Frame,
295 const CallExpr *Call, unsigned ID) {
296 const Pointer &StrPtr = S.Stk.pop<Pointer>();
297
298 if (ID == Builtin::BIstrlen || ID == Builtin::BIwcslen)
299 diagnoseNonConstexprBuiltin(S, OpPC, ID);
300
301 if (!CheckArray(S, OpPC, StrPtr))
302 return false;
303
304 if (!CheckLive(S, OpPC, StrPtr, AK_Read))
305 return false;
306
307 if (!CheckDummy(S, OpPC, StrPtr.block(), AK_Read))
308 return false;
309
310 if (!StrPtr.getFieldDesc()->isPrimitiveArray())
311 return false;
312
313 assert(StrPtr.getFieldDesc()->isPrimitiveArray());
314 unsigned ElemSize = StrPtr.getFieldDesc()->getElemSize();
315
316 if (ID == Builtin::BI__builtin_wcslen || ID == Builtin::BIwcslen) {
317 [[maybe_unused]] const ASTContext &AC = S.getASTContext();
318 assert(ElemSize == AC.getTypeSizeInChars(AC.getWCharType()).getQuantity());
319 }
320
321 size_t Len = 0;
322 for (size_t I = StrPtr.getIndex();; ++I, ++Len) {
323 const Pointer &ElemPtr = StrPtr.atIndex(I);
324
325 if (!CheckRange(S, OpPC, ElemPtr, AK_Read))
326 return false;
327
328 uint32_t Val;
329 switch (ElemSize) {
330 case 1:
331 Val = ElemPtr.deref<uint8_t>();
332 break;
333 case 2:
334 Val = ElemPtr.deref<uint16_t>();
335 break;
336 case 4:
337 Val = ElemPtr.deref<uint32_t>();
338 break;
339 default:
340 llvm_unreachable("Unsupported char size");
341 }
342 if (Val == 0)
343 break;
344 }
345
346 pushInteger(S, Len, Call->getType());
347
348 return true;
349}
350
352 const InterpFrame *Frame, const CallExpr *Call,
353 bool Signaling) {
354 const Pointer &Arg = S.Stk.pop<Pointer>();
355
356 if (!CheckLoad(S, OpPC, Arg))
357 return false;
358
359 assert(Arg.getFieldDesc()->isPrimitiveArray());
360
361 // Convert the given string to an integer using StringRef's API.
362 llvm::APInt Fill;
363 std::string Str;
364 assert(Arg.getNumElems() >= 1);
365 for (unsigned I = 0;; ++I) {
366 const Pointer &Elem = Arg.atIndex(I);
367
368 if (!CheckLoad(S, OpPC, Elem))
369 return false;
370
371 if (Elem.deref<int8_t>() == 0)
372 break;
373
374 Str += Elem.deref<char>();
375 }
376
377 // Treat empty strings as if they were zero.
378 if (Str.empty())
379 Fill = llvm::APInt(32, 0);
380 else if (StringRef(Str).getAsInteger(0, Fill))
381 return false;
382
383 const llvm::fltSemantics &TargetSemantics =
385 Call->getDirectCallee()->getReturnType());
386
387 Floating Result = S.allocFloat(TargetSemantics);
389 if (Signaling)
390 Result.copy(
391 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
392 else
393 Result.copy(
394 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
395 } else {
396 // Prior to IEEE 754-2008, architectures were allowed to choose whether
397 // the first bit of their significand was set for qNaN or sNaN. MIPS chose
398 // a different encoding to what became a standard in 2008, and for pre-
399 // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as
400 // sNaN. This is now known as "legacy NaN" encoding.
401 if (Signaling)
402 Result.copy(
403 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
404 else
405 Result.copy(
406 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
407 }
408
410 return true;
411}
412
414 const InterpFrame *Frame,
415 const CallExpr *Call) {
416 const llvm::fltSemantics &TargetSemantics =
418 Call->getDirectCallee()->getReturnType());
419
420 Floating Result = S.allocFloat(TargetSemantics);
421 Result.copy(APFloat::getInf(TargetSemantics));
423 return true;
424}
425
427 const InterpFrame *Frame) {
428 const Floating &Arg2 = S.Stk.pop<Floating>();
429 const Floating &Arg1 = S.Stk.pop<Floating>();
430 Floating Result = S.allocFloat(Arg1.getSemantics());
431
432 APFloat Copy = Arg1.getAPFloat();
433 Copy.copySign(Arg2.getAPFloat());
434 Result.copy(Copy);
436
437 return true;
438}
439
441 const InterpFrame *Frame, bool IsNumBuiltin) {
442 const Floating &RHS = S.Stk.pop<Floating>();
443 const Floating &LHS = S.Stk.pop<Floating>();
444 Floating Result = S.allocFloat(LHS.getSemantics());
445
446 if (IsNumBuiltin)
447 Result.copy(llvm::minimumnum(LHS.getAPFloat(), RHS.getAPFloat()));
448 else
449 Result.copy(minnum(LHS.getAPFloat(), RHS.getAPFloat()));
451 return true;
452}
453
455 const InterpFrame *Frame, bool IsNumBuiltin) {
456 const Floating &RHS = S.Stk.pop<Floating>();
457 const Floating &LHS = S.Stk.pop<Floating>();
458 Floating Result = S.allocFloat(LHS.getSemantics());
459
460 if (IsNumBuiltin)
461 Result.copy(llvm::maximumnum(LHS.getAPFloat(), RHS.getAPFloat()));
462 else
463 Result.copy(maxnum(LHS.getAPFloat(), RHS.getAPFloat()));
465 return true;
466}
467
468/// Defined as __builtin_isnan(...), to accommodate the fact that it can
469/// take a float, double, long double, etc.
470/// But for us, that's all a Floating anyway.
472 const InterpFrame *Frame,
473 const CallExpr *Call) {
474 const Floating &Arg = S.Stk.pop<Floating>();
475
476 pushInteger(S, Arg.isNan(), Call->getType());
477 return true;
478}
479
481 const InterpFrame *Frame,
482 const CallExpr *Call) {
483 const Floating &Arg = S.Stk.pop<Floating>();
484
485 pushInteger(S, Arg.isSignaling(), Call->getType());
486 return true;
487}
488
490 const InterpFrame *Frame, bool CheckSign,
491 const CallExpr *Call) {
492 const Floating &Arg = S.Stk.pop<Floating>();
493 APFloat F = Arg.getAPFloat();
494 bool IsInf = F.isInfinity();
495
496 if (CheckSign)
497 pushInteger(S, IsInf ? (F.isNegative() ? -1 : 1) : 0, Call->getType());
498 else
499 pushInteger(S, IsInf, Call->getType());
500 return true;
501}
502
504 const InterpFrame *Frame,
505 const CallExpr *Call) {
506 const Floating &Arg = S.Stk.pop<Floating>();
507
508 pushInteger(S, Arg.isFinite(), Call->getType());
509 return true;
510}
511
513 const InterpFrame *Frame,
514 const CallExpr *Call) {
515 const Floating &Arg = S.Stk.pop<Floating>();
516
517 pushInteger(S, Arg.isNormal(), Call->getType());
518 return true;
519}
520
522 const InterpFrame *Frame,
523 const CallExpr *Call) {
524 const Floating &Arg = S.Stk.pop<Floating>();
525
526 pushInteger(S, Arg.isDenormal(), Call->getType());
527 return true;
528}
529
531 const InterpFrame *Frame,
532 const CallExpr *Call) {
533 const Floating &Arg = S.Stk.pop<Floating>();
534
535 pushInteger(S, Arg.isZero(), Call->getType());
536 return true;
537}
538
540 const InterpFrame *Frame,
541 const CallExpr *Call) {
542 const Floating &Arg = S.Stk.pop<Floating>();
543
544 pushInteger(S, Arg.isNegative(), Call->getType());
545 return true;
546}
547
549 const CallExpr *Call, unsigned ID) {
550 const Floating &RHS = S.Stk.pop<Floating>();
551 const Floating &LHS = S.Stk.pop<Floating>();
552
554 S,
555 [&] {
556 switch (ID) {
557 case Builtin::BI__builtin_isgreater:
558 return LHS > RHS;
559 case Builtin::BI__builtin_isgreaterequal:
560 return LHS >= RHS;
561 case Builtin::BI__builtin_isless:
562 return LHS < RHS;
563 case Builtin::BI__builtin_islessequal:
564 return LHS <= RHS;
565 case Builtin::BI__builtin_islessgreater: {
566 ComparisonCategoryResult Cmp = LHS.compare(RHS);
567 return Cmp == ComparisonCategoryResult::Less ||
569 }
570 case Builtin::BI__builtin_isunordered:
572 default:
573 llvm_unreachable("Unexpected builtin ID: Should be a floating point "
574 "comparison function");
575 }
576 }(),
577 Call->getType());
578 return true;
579}
580
581/// First parameter to __builtin_isfpclass is the floating value, the
582/// second one is an integral value.
584 const InterpFrame *Frame,
585 const CallExpr *Call) {
586 APSInt FPClassArg = popToAPSInt(S, Call->getArg(1));
587 const Floating &F = S.Stk.pop<Floating>();
588
589 int32_t Result = static_cast<int32_t>(
590 (F.classify() & std::move(FPClassArg)).getZExtValue());
591 pushInteger(S, Result, Call->getType());
592
593 return true;
594}
595
596/// Five int values followed by one floating value.
597/// __builtin_fpclassify(int, int, int, int, int, float)
599 const InterpFrame *Frame,
600 const CallExpr *Call) {
601 const Floating &Val = S.Stk.pop<Floating>();
602
603 PrimType IntT = *S.getContext().classify(Call->getArg(0));
604 APSInt Values[5];
605 for (unsigned I = 0; I != 5; ++I)
606 Values[4 - I] = popToAPSInt(S.Stk, IntT);
607
608 unsigned Index;
609 switch (Val.getCategory()) {
610 case APFloat::fcNaN:
611 Index = 0;
612 break;
613 case APFloat::fcInfinity:
614 Index = 1;
615 break;
616 case APFloat::fcNormal:
617 Index = Val.isDenormal() ? 3 : 2;
618 break;
619 case APFloat::fcZero:
620 Index = 4;
621 break;
622 }
623
624 // The last argument is first on the stack.
625 assert(Index <= 4);
626
627 pushInteger(S, Values[Index], Call->getType());
628 return true;
629}
630
631static inline Floating abs(InterpState &S, const Floating &In) {
632 if (!In.isNegative())
633 return In;
634
635 Floating Output = S.allocFloat(In.getSemantics());
636 APFloat New = In.getAPFloat();
637 New.changeSign();
638 Output.copy(New);
639 return Output;
640}
641
642// The C standard says "fabs raises no floating-point exceptions,
643// even if x is a signaling NaN. The returned value is independent of
644// the current rounding direction mode." Therefore constant folding can
645// proceed without regard to the floating point settings.
646// Reference, WG14 N2478 F.10.4.3
648 const InterpFrame *Frame) {
649 const Floating &Val = S.Stk.pop<Floating>();
650 S.Stk.push<Floating>(abs(S, Val));
651 return true;
652}
653
655 const InterpFrame *Frame,
656 const CallExpr *Call) {
657 APSInt Val = popToAPSInt(S, Call->getArg(0));
658 if (Val ==
659 APSInt(APInt::getSignedMinValue(Val.getBitWidth()), /*IsUnsigned=*/false))
660 return false;
661 if (Val.isNegative())
662 Val.negate();
663 pushInteger(S, Val, Call->getType());
664 return true;
665}
666
668 const InterpFrame *Frame,
669 const CallExpr *Call) {
670 APSInt Val;
671 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
672 const Pointer &Arg = S.Stk.pop<Pointer>();
673 Val = convertBoolVectorToInt(Arg);
674 } else {
675 Val = popToAPSInt(S, Call->getArg(0));
676 }
677 pushInteger(S, Val.popcount(), Call->getType());
678 return true;
679}
680
682 const InterpFrame *Frame,
683 const CallExpr *Call) {
684 APSInt Val = popToAPSInt(S, Call->getArg(0));
685 pushInteger(S, Val.popcount() % 2, Call->getType());
686 return true;
687}
688
690 const InterpFrame *Frame,
691 const CallExpr *Call) {
692 APSInt Val = popToAPSInt(S, Call->getArg(0));
693 pushInteger(S, Val.getBitWidth() - Val.getSignificantBits(), Call->getType());
694 return true;
695}
696
698 const InterpFrame *Frame,
699 const CallExpr *Call) {
700 APSInt Val = popToAPSInt(S, Call->getArg(0));
701 pushInteger(S, Val.reverseBits(), Call->getType());
702 return true;
703}
704
706 const InterpFrame *Frame,
707 const CallExpr *Call) {
708 // This is an unevaluated call, so there are no arguments on the stack.
709 assert(Call->getNumArgs() == 1);
710 const Expr *Arg = Call->getArg(0);
711
712 GCCTypeClass ResultClass =
714 int32_t ReturnVal = static_cast<int32_t>(ResultClass);
715 pushInteger(S, ReturnVal, Call->getType());
716 return true;
717}
718
719// __builtin_expect(long, long)
720// __builtin_expect_with_probability(long, long, double)
722 const InterpFrame *Frame,
723 const CallExpr *Call) {
724 // The return value is simply the value of the first parameter.
725 // We ignore the probability.
726 unsigned NumArgs = Call->getNumArgs();
727 assert(NumArgs == 2 || NumArgs == 3);
728
729 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
730 if (NumArgs == 3)
731 S.Stk.discard<Floating>();
732 discard(S.Stk, ArgT);
733
734 APSInt Val = popToAPSInt(S.Stk, ArgT);
735 pushInteger(S, Val, Call->getType());
736 return true;
737}
738
739/// rotateleft(value, amount)
741 const InterpFrame *Frame,
742 const CallExpr *Call, bool Right) {
743 APSInt Amount = popToAPSInt(S, Call->getArg(1));
744 APSInt Value = popToAPSInt(S, Call->getArg(0));
745
747 if (Right)
748 Result = APSInt(Value.rotr(Amount.urem(Value.getBitWidth())),
749 /*IsUnsigned=*/true);
750 else // Left.
751 Result = APSInt(Value.rotl(Amount.urem(Value.getBitWidth())),
752 /*IsUnsigned=*/true);
753
754 pushInteger(S, Result, Call->getType());
755 return true;
756}
757
759 const InterpFrame *Frame,
760 const CallExpr *Call) {
761 APSInt Value = popToAPSInt(S, Call->getArg(0));
762
763 uint64_t N = Value.countr_zero();
764 pushInteger(S, N == Value.getBitWidth() ? 0 : N + 1, Call->getType());
765 return true;
766}
767
769 const InterpFrame *Frame,
770 const CallExpr *Call) {
771#ifndef NDEBUG
772 assert(Call->getArg(0)->isLValue());
773 PrimType PtrT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr);
774 assert(PtrT == PT_Ptr &&
775 "Unsupported pointer type passed to __builtin_addressof()");
776#endif
777 return true;
778}
779
781 const InterpFrame *Frame,
782 const CallExpr *Call) {
783 return Call->getDirectCallee()->isConstexpr();
784}
785
787 const InterpFrame *Frame,
788 const CallExpr *Call) {
789 APSInt Arg = popToAPSInt(S, Call->getArg(0));
790
792 Arg.getZExtValue());
793 pushInteger(S, Result, Call->getType());
794 return true;
795}
796
797// Two integral values followed by a pointer (lhs, rhs, resultOut)
799 const CallExpr *Call,
800 unsigned BuiltinOp) {
801 const Pointer &ResultPtr = S.Stk.pop<Pointer>();
802 if (ResultPtr.isDummy())
803 return false;
804
805 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
806 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
807 APSInt RHS = popToAPSInt(S.Stk, RHST);
808 APSInt LHS = popToAPSInt(S.Stk, LHST);
809 QualType ResultType = Call->getArg(2)->getType()->getPointeeType();
810 PrimType ResultT = *S.getContext().classify(ResultType);
811 bool Overflow;
812
814 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
815 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
816 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
817 bool IsSigned = LHS.isSigned() || RHS.isSigned() ||
819 bool AllSigned = LHS.isSigned() && RHS.isSigned() &&
821 uint64_t LHSSize = LHS.getBitWidth();
822 uint64_t RHSSize = RHS.getBitWidth();
823 uint64_t ResultSize = S.getASTContext().getTypeSize(ResultType);
824 uint64_t MaxBits = std::max(std::max(LHSSize, RHSSize), ResultSize);
825
826 // Add an additional bit if the signedness isn't uniformly agreed to. We
827 // could do this ONLY if there is a signed and an unsigned that both have
828 // MaxBits, but the code to check that is pretty nasty. The issue will be
829 // caught in the shrink-to-result later anyway.
830 if (IsSigned && !AllSigned)
831 ++MaxBits;
832
833 LHS = APSInt(LHS.extOrTrunc(MaxBits), !IsSigned);
834 RHS = APSInt(RHS.extOrTrunc(MaxBits), !IsSigned);
835 Result = APSInt(MaxBits, !IsSigned);
836 }
837
838 // Find largest int.
839 switch (BuiltinOp) {
840 default:
841 llvm_unreachable("Invalid value for BuiltinOp");
842 case Builtin::BI__builtin_add_overflow:
843 case Builtin::BI__builtin_sadd_overflow:
844 case Builtin::BI__builtin_saddl_overflow:
845 case Builtin::BI__builtin_saddll_overflow:
846 case Builtin::BI__builtin_uadd_overflow:
847 case Builtin::BI__builtin_uaddl_overflow:
848 case Builtin::BI__builtin_uaddll_overflow:
849 Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow)
850 : LHS.uadd_ov(RHS, Overflow);
851 break;
852 case Builtin::BI__builtin_sub_overflow:
853 case Builtin::BI__builtin_ssub_overflow:
854 case Builtin::BI__builtin_ssubl_overflow:
855 case Builtin::BI__builtin_ssubll_overflow:
856 case Builtin::BI__builtin_usub_overflow:
857 case Builtin::BI__builtin_usubl_overflow:
858 case Builtin::BI__builtin_usubll_overflow:
859 Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow)
860 : LHS.usub_ov(RHS, Overflow);
861 break;
862 case Builtin::BI__builtin_mul_overflow:
863 case Builtin::BI__builtin_smul_overflow:
864 case Builtin::BI__builtin_smull_overflow:
865 case Builtin::BI__builtin_smulll_overflow:
866 case Builtin::BI__builtin_umul_overflow:
867 case Builtin::BI__builtin_umull_overflow:
868 case Builtin::BI__builtin_umulll_overflow:
869 Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow)
870 : LHS.umul_ov(RHS, Overflow);
871 break;
872 }
873
874 // In the case where multiple sizes are allowed, truncate and see if
875 // the values are the same.
876 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
877 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
878 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
879 // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead,
880 // since it will give us the behavior of a TruncOrSelf in the case where
881 // its parameter <= its size. We previously set Result to be at least the
882 // type-size of the result, so getTypeSize(ResultType) <= Resu
883 APSInt Temp = Result.extOrTrunc(S.getASTContext().getTypeSize(ResultType));
884 Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType());
885
886 if (!APSInt::isSameValue(Temp, Result))
887 Overflow = true;
888 Result = std::move(Temp);
889 }
890
891 // Write Result to ResultPtr and put Overflow on the stack.
892 assignInteger(S, ResultPtr, ResultT, Result);
893 if (ResultPtr.canBeInitialized())
894 ResultPtr.initialize();
895
896 assert(Call->getDirectCallee()->getReturnType()->isBooleanType());
897 S.Stk.push<Boolean>(Overflow);
898 return true;
899}
900
901/// Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
903 const InterpFrame *Frame,
904 const CallExpr *Call, unsigned BuiltinOp) {
905 const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
906 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
907 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
908 APSInt CarryIn = popToAPSInt(S.Stk, LHST);
909 APSInt RHS = popToAPSInt(S.Stk, RHST);
910 APSInt LHS = popToAPSInt(S.Stk, LHST);
911
912 if (CarryOutPtr.isDummy())
913 return false;
914
915 APSInt CarryOut;
916
918 // Copy the number of bits and sign.
919 Result = LHS;
920 CarryOut = LHS;
921
922 bool FirstOverflowed = false;
923 bool SecondOverflowed = false;
924 switch (BuiltinOp) {
925 default:
926 llvm_unreachable("Invalid value for BuiltinOp");
927 case Builtin::BI__builtin_addcb:
928 case Builtin::BI__builtin_addcs:
929 case Builtin::BI__builtin_addc:
930 case Builtin::BI__builtin_addcl:
931 case Builtin::BI__builtin_addcll:
932 Result =
933 LHS.uadd_ov(RHS, FirstOverflowed).uadd_ov(CarryIn, SecondOverflowed);
934 break;
935 case Builtin::BI__builtin_subcb:
936 case Builtin::BI__builtin_subcs:
937 case Builtin::BI__builtin_subc:
938 case Builtin::BI__builtin_subcl:
939 case Builtin::BI__builtin_subcll:
940 Result =
941 LHS.usub_ov(RHS, FirstOverflowed).usub_ov(CarryIn, SecondOverflowed);
942 break;
943 }
944 // It is possible for both overflows to happen but CGBuiltin uses an OR so
945 // this is consistent.
946 CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed);
947
948 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
949 PrimType CarryOutT = *S.getContext().classify(CarryOutType);
950 assignInteger(S, CarryOutPtr, CarryOutT, CarryOut);
951 CarryOutPtr.initialize();
952
953 assert(Call->getType() == Call->getArg(0)->getType());
954 pushInteger(S, Result, Call->getType());
955 return true;
956}
957
959 const InterpFrame *Frame, const CallExpr *Call,
960 unsigned BuiltinOp) {
961
962 std::optional<APSInt> Fallback;
963 if (BuiltinOp == Builtin::BI__builtin_clzg && Call->getNumArgs() == 2)
964 Fallback = popToAPSInt(S, Call->getArg(1));
965
966 APSInt Val;
967 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
968 const Pointer &Arg = S.Stk.pop<Pointer>();
969 Val = convertBoolVectorToInt(Arg);
970 } else {
971 Val = popToAPSInt(S, Call->getArg(0));
972 }
973
974 // When the argument is 0, the result of GCC builtins is undefined, whereas
975 // for Microsoft intrinsics, the result is the bit-width of the argument.
976 bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
977 BuiltinOp != Builtin::BI__lzcnt &&
978 BuiltinOp != Builtin::BI__lzcnt64;
979
980 if (Val == 0) {
981 if (Fallback) {
982 pushInteger(S, *Fallback, Call->getType());
983 return true;
984 }
985
986 if (ZeroIsUndefined)
987 return false;
988 }
989
990 pushInteger(S, Val.countl_zero(), Call->getType());
991 return true;
992}
993
995 const InterpFrame *Frame, const CallExpr *Call,
996 unsigned BuiltinID) {
997 std::optional<APSInt> Fallback;
998 if (BuiltinID == Builtin::BI__builtin_ctzg && Call->getNumArgs() == 2)
999 Fallback = popToAPSInt(S, Call->getArg(1));
1000
1001 APSInt Val;
1002 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
1003 const Pointer &Arg = S.Stk.pop<Pointer>();
1004 Val = convertBoolVectorToInt(Arg);
1005 } else {
1006 Val = popToAPSInt(S, Call->getArg(0));
1007 }
1008
1009 if (Val == 0) {
1010 if (Fallback) {
1011 pushInteger(S, *Fallback, Call->getType());
1012 return true;
1013 }
1014 return false;
1015 }
1016
1017 pushInteger(S, Val.countr_zero(), Call->getType());
1018 return true;
1019}
1020
1022 const InterpFrame *Frame,
1023 const CallExpr *Call) {
1024 const APSInt &Val = popToAPSInt(S, Call->getArg(0));
1025 assert(Val.getActiveBits() <= 64);
1026
1027 pushInteger(S, Val.byteSwap(), Call->getType());
1028 return true;
1029}
1030
1031/// bool __atomic_always_lock_free(size_t, void const volatile*)
1032/// bool __atomic_is_lock_free(size_t, void const volatile*)
1034 const InterpFrame *Frame,
1035 const CallExpr *Call,
1036 unsigned BuiltinOp) {
1037 auto returnBool = [&S](bool Value) -> bool {
1038 S.Stk.push<Boolean>(Value);
1039 return true;
1040 };
1041
1042 const Pointer &Ptr = S.Stk.pop<Pointer>();
1043 const APSInt &SizeVal = popToAPSInt(S, Call->getArg(0));
1044
1045 // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
1046 // of two less than or equal to the maximum inline atomic width, we know it
1047 // is lock-free. If the size isn't a power of two, or greater than the
1048 // maximum alignment where we promote atomics, we know it is not lock-free
1049 // (at least not in the sense of atomic_is_lock_free). Otherwise,
1050 // the answer can only be determined at runtime; for example, 16-byte
1051 // atomics have lock-free implementations on some, but not all,
1052 // x86-64 processors.
1053
1054 // Check power-of-two.
1055 CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
1056 if (Size.isPowerOfTwo()) {
1057 // Check against inlining width.
1058 unsigned InlineWidthBits =
1060 if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) {
1061
1062 // OK, we will inline appropriately-aligned operations of this size,
1063 // and _Atomic(T) is appropriately-aligned.
1064 if (Size == CharUnits::One())
1065 return returnBool(true);
1066
1067 // Same for null pointers.
1068 assert(BuiltinOp != Builtin::BI__c11_atomic_is_lock_free);
1069 if (Ptr.isZero())
1070 return returnBool(true);
1071
1072 if (Ptr.isIntegralPointer()) {
1073 uint64_t IntVal = Ptr.getIntegerRepresentation();
1074 if (APSInt(APInt(64, IntVal, false), true).isAligned(Size.getAsAlign()))
1075 return returnBool(true);
1076 }
1077
1078 const Expr *PtrArg = Call->getArg(1);
1079 // Otherwise, check if the type's alignment against Size.
1080 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(PtrArg)) {
1081 // Drop the potential implicit-cast to 'const volatile void*', getting
1082 // the underlying type.
1083 if (ICE->getCastKind() == CK_BitCast)
1084 PtrArg = ICE->getSubExpr();
1085 }
1086
1087 if (const auto *PtrTy = PtrArg->getType()->getAs<PointerType>()) {
1088 QualType PointeeType = PtrTy->getPointeeType();
1089 if (!PointeeType->isIncompleteType() &&
1090 S.getASTContext().getTypeAlignInChars(PointeeType) >= Size) {
1091 // OK, we will inline operations on this object.
1092 return returnBool(true);
1093 }
1094 }
1095 }
1096 }
1097
1098 if (BuiltinOp == Builtin::BI__atomic_always_lock_free)
1099 return returnBool(false);
1100
1101 return false;
1102}
1103
1104/// bool __c11_atomic_is_lock_free(size_t)
1106 CodePtr OpPC,
1107 const InterpFrame *Frame,
1108 const CallExpr *Call) {
1109 const APSInt &SizeVal = popToAPSInt(S, Call->getArg(0));
1110
1111 CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
1112 if (Size.isPowerOfTwo()) {
1113 // Check against inlining width.
1114 unsigned InlineWidthBits =
1116 if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) {
1117 S.Stk.push<Boolean>(true);
1118 return true;
1119 }
1120 }
1121
1122 return false; // returnBool(false);
1123}
1124
1125/// __builtin_complex(Float A, float B);
1127 const InterpFrame *Frame,
1128 const CallExpr *Call) {
1129 const Floating &Arg2 = S.Stk.pop<Floating>();
1130 const Floating &Arg1 = S.Stk.pop<Floating>();
1131 Pointer &Result = S.Stk.peek<Pointer>();
1132
1133 Result.elem<Floating>(0) = Arg1;
1134 Result.elem<Floating>(1) = Arg2;
1135 Result.initializeAllElements();
1136
1137 return true;
1138}
1139
1140/// __builtin_is_aligned()
1141/// __builtin_align_up()
1142/// __builtin_align_down()
1143/// The first parameter is either an integer or a pointer.
1144/// The second parameter is the requested alignment as an integer.
1146 const InterpFrame *Frame,
1147 const CallExpr *Call,
1148 unsigned BuiltinOp) {
1149 const APSInt &Alignment = popToAPSInt(S, Call->getArg(1));
1150
1151 if (Alignment < 0 || !Alignment.isPowerOf2()) {
1152 S.FFDiag(Call, diag::note_constexpr_invalid_alignment) << Alignment;
1153 return false;
1154 }
1155 unsigned SrcWidth = S.getASTContext().getIntWidth(Call->getArg(0)->getType());
1156 APSInt MaxValue(APInt::getOneBitSet(SrcWidth, SrcWidth - 1));
1157 if (APSInt::compareValues(Alignment, MaxValue) > 0) {
1158 S.FFDiag(Call, diag::note_constexpr_alignment_too_big)
1159 << MaxValue << Call->getArg(0)->getType() << Alignment;
1160 return false;
1161 }
1162
1163 // The first parameter is either an integer or a pointer.
1164 PrimType FirstArgT = *S.Ctx.classify(Call->getArg(0));
1165
1166 if (isIntegralType(FirstArgT)) {
1167 const APSInt &Src = popToAPSInt(S.Stk, FirstArgT);
1168 APInt AlignMinusOne = Alignment.extOrTrunc(Src.getBitWidth()) - 1;
1169 if (BuiltinOp == Builtin::BI__builtin_align_up) {
1170 APSInt AlignedVal =
1171 APSInt((Src + AlignMinusOne) & ~AlignMinusOne, Src.isUnsigned());
1172 pushInteger(S, AlignedVal, Call->getType());
1173 } else if (BuiltinOp == Builtin::BI__builtin_align_down) {
1174 APSInt AlignedVal = APSInt(Src & ~AlignMinusOne, Src.isUnsigned());
1175 pushInteger(S, AlignedVal, Call->getType());
1176 } else {
1177 assert(*S.Ctx.classify(Call->getType()) == PT_Bool);
1178 S.Stk.push<Boolean>((Src & AlignMinusOne) == 0);
1179 }
1180 return true;
1181 }
1182 assert(FirstArgT == PT_Ptr);
1183 const Pointer &Ptr = S.Stk.pop<Pointer>();
1184 if (!Ptr.isBlockPointer())
1185 return false;
1186
1187 unsigned PtrOffset = Ptr.getIndex();
1188 CharUnits BaseAlignment =
1190 CharUnits PtrAlign =
1191 BaseAlignment.alignmentAtOffset(CharUnits::fromQuantity(PtrOffset));
1192
1193 if (BuiltinOp == Builtin::BI__builtin_is_aligned) {
1194 if (PtrAlign.getQuantity() >= Alignment) {
1195 S.Stk.push<Boolean>(true);
1196 return true;
1197 }
1198 // If the alignment is not known to be sufficient, some cases could still
1199 // be aligned at run time. However, if the requested alignment is less or
1200 // equal to the base alignment and the offset is not aligned, we know that
1201 // the run-time value can never be aligned.
1202 if (BaseAlignment.getQuantity() >= Alignment &&
1203 PtrAlign.getQuantity() < Alignment) {
1204 S.Stk.push<Boolean>(false);
1205 return true;
1206 }
1207
1208 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_compute)
1209 << Alignment;
1210 return false;
1211 }
1212
1213 assert(BuiltinOp == Builtin::BI__builtin_align_down ||
1214 BuiltinOp == Builtin::BI__builtin_align_up);
1215
1216 // For align_up/align_down, we can return the same value if the alignment
1217 // is known to be greater or equal to the requested value.
1218 if (PtrAlign.getQuantity() >= Alignment) {
1219 S.Stk.push<Pointer>(Ptr);
1220 return true;
1221 }
1222
1223 // The alignment could be greater than the minimum at run-time, so we cannot
1224 // infer much about the resulting pointer value. One case is possible:
1225 // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we
1226 // can infer the correct index if the requested alignment is smaller than
1227 // the base alignment so we can perform the computation on the offset.
1228 if (BaseAlignment.getQuantity() >= Alignment) {
1229 assert(Alignment.getBitWidth() <= 64 &&
1230 "Cannot handle > 64-bit address-space");
1231 uint64_t Alignment64 = Alignment.getZExtValue();
1232 CharUnits NewOffset =
1233 CharUnits::fromQuantity(BuiltinOp == Builtin::BI__builtin_align_down
1234 ? llvm::alignDown(PtrOffset, Alignment64)
1235 : llvm::alignTo(PtrOffset, Alignment64));
1236
1237 S.Stk.push<Pointer>(Ptr.atIndex(NewOffset.getQuantity()));
1238 return true;
1239 }
1240
1241 // Otherwise, we cannot constant-evaluate the result.
1242 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_adjust) << Alignment;
1243 return false;
1244}
1245
1246/// __builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
1248 const InterpFrame *Frame,
1249 const CallExpr *Call) {
1250 assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3);
1251
1252 std::optional<APSInt> ExtraOffset;
1253 if (Call->getNumArgs() == 3)
1254 ExtraOffset = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(2)));
1255
1256 APSInt Alignment = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(1)));
1257 const Pointer &Ptr = S.Stk.pop<Pointer>();
1258
1259 CharUnits Align = CharUnits::fromQuantity(Alignment.getZExtValue());
1260
1261 // If there is a base object, then it must have the correct alignment.
1262 if (Ptr.isBlockPointer()) {
1263 CharUnits BaseAlignment;
1264 if (const auto *VD = Ptr.getDeclDesc()->asValueDecl())
1265 BaseAlignment = S.getASTContext().getDeclAlign(VD);
1266 else if (const auto *E = Ptr.getDeclDesc()->asExpr())
1267 BaseAlignment = GetAlignOfExpr(S.getASTContext(), E, UETT_AlignOf);
1268
1269 if (BaseAlignment < Align) {
1270 S.CCEDiag(Call->getArg(0),
1271 diag::note_constexpr_baa_insufficient_alignment)
1272 << 0 << BaseAlignment.getQuantity() << Align.getQuantity();
1273 return false;
1274 }
1275 }
1276
1277 APValue AV = Ptr.toAPValue(S.getASTContext());
1278 CharUnits AVOffset = AV.getLValueOffset();
1279 if (ExtraOffset)
1280 AVOffset -= CharUnits::fromQuantity(ExtraOffset->getZExtValue());
1281 if (AVOffset.alignTo(Align) != AVOffset) {
1282 if (Ptr.isBlockPointer())
1283 S.CCEDiag(Call->getArg(0),
1284 diag::note_constexpr_baa_insufficient_alignment)
1285 << 1 << AVOffset.getQuantity() << Align.getQuantity();
1286 else
1287 S.CCEDiag(Call->getArg(0),
1288 diag::note_constexpr_baa_value_insufficient_alignment)
1289 << AVOffset.getQuantity() << Align.getQuantity();
1290 return false;
1291 }
1292
1293 S.Stk.push<Pointer>(Ptr);
1294 return true;
1295}
1296
1298 const InterpFrame *Frame,
1299 const CallExpr *Call) {
1300 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1301 !Call->getArg(1)->getType()->isIntegerType())
1302 return false;
1303
1304 APSInt Index = popToAPSInt(S, Call->getArg(1));
1305 APSInt Val = popToAPSInt(S, Call->getArg(0));
1306
1307 unsigned BitWidth = Val.getBitWidth();
1308 uint64_t Shift = Index.extractBitsAsZExtValue(8, 0);
1309 uint64_t Length = Index.extractBitsAsZExtValue(8, 8);
1310 Length = Length > BitWidth ? BitWidth : Length;
1311
1312 // Handle out of bounds cases.
1313 if (Length == 0 || Shift >= BitWidth) {
1314 pushInteger(S, 0, Call->getType());
1315 return true;
1316 }
1317
1318 uint64_t Result = Val.getZExtValue() >> Shift;
1319 Result &= llvm::maskTrailingOnes<uint64_t>(Length);
1320 pushInteger(S, Result, Call->getType());
1321 return true;
1322}
1323
1325 const InterpFrame *Frame,
1326 const CallExpr *Call) {
1327 QualType CallType = Call->getType();
1328 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1329 !Call->getArg(1)->getType()->isIntegerType() ||
1330 !CallType->isIntegerType())
1331 return false;
1332
1333 APSInt Idx = popToAPSInt(S, Call->getArg(1));
1334 APSInt Val = popToAPSInt(S, Call->getArg(0));
1335
1336 unsigned BitWidth = Val.getBitWidth();
1337 uint64_t Index = Idx.extractBitsAsZExtValue(8, 0);
1338
1339 if (Index < BitWidth)
1340 Val.clearHighBits(BitWidth - Index);
1341
1342 pushInteger(S, Val, CallType);
1343 return true;
1344}
1345
1347 const InterpFrame *Frame,
1348 const CallExpr *Call) {
1349 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1350 !Call->getArg(1)->getType()->isIntegerType())
1351 return false;
1352
1353 APSInt Mask = popToAPSInt(S, Call->getArg(1));
1354 APSInt Val = popToAPSInt(S, Call->getArg(0));
1355
1356 unsigned BitWidth = Val.getBitWidth();
1357 APInt Result = APInt::getZero(BitWidth);
1358 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
1359 if (Mask[I])
1360 Result.setBitVal(I, Val[P++]);
1361 }
1362 pushInteger(S, std::move(Result), Call->getType());
1363 return true;
1364}
1365
1367 const InterpFrame *Frame,
1368 const CallExpr *Call) {
1369 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1370 !Call->getArg(1)->getType()->isIntegerType())
1371 return false;
1372
1373 APSInt Mask = popToAPSInt(S, Call->getArg(1));
1374 APSInt Val = popToAPSInt(S, Call->getArg(0));
1375
1376 unsigned BitWidth = Val.getBitWidth();
1377 APInt Result = APInt::getZero(BitWidth);
1378 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
1379 if (Mask[I])
1380 Result.setBitVal(P++, Val[I]);
1381 }
1382 pushInteger(S, std::move(Result), Call->getType());
1383 return true;
1384}
1385
1386/// (CarryIn, LHS, RHS, Result)
1388 CodePtr OpPC,
1389 const InterpFrame *Frame,
1390 const CallExpr *Call,
1391 unsigned BuiltinOp) {
1392 if (Call->getNumArgs() != 4 || !Call->getArg(0)->getType()->isIntegerType() ||
1393 !Call->getArg(1)->getType()->isIntegerType() ||
1394 !Call->getArg(2)->getType()->isIntegerType())
1395 return false;
1396
1397 const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
1398
1399 APSInt RHS = popToAPSInt(S, Call->getArg(2));
1400 APSInt LHS = popToAPSInt(S, Call->getArg(1));
1401 APSInt CarryIn = popToAPSInt(S, Call->getArg(0));
1402
1403 bool IsAdd = BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u32 ||
1404 BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u64;
1405
1406 unsigned BitWidth = LHS.getBitWidth();
1407 unsigned CarryInBit = CarryIn.ugt(0) ? 1 : 0;
1408 APInt ExResult =
1409 IsAdd ? (LHS.zext(BitWidth + 1) + (RHS.zext(BitWidth + 1) + CarryInBit))
1410 : (LHS.zext(BitWidth + 1) - (RHS.zext(BitWidth + 1) + CarryInBit));
1411
1412 APInt Result = ExResult.extractBits(BitWidth, 0);
1413 APSInt CarryOut =
1414 APSInt(ExResult.extractBits(1, BitWidth), /*IsUnsigned=*/true);
1415
1416 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
1417 PrimType CarryOutT = *S.getContext().classify(CarryOutType);
1418 assignInteger(S, CarryOutPtr, CarryOutT, APSInt(std::move(Result), true));
1419
1420 pushInteger(S, CarryOut, Call->getType());
1421
1422 return true;
1423}
1424
1426 CodePtr OpPC,
1427 const InterpFrame *Frame,
1428 const CallExpr *Call) {
1431 pushInteger(S, Layout.size().getQuantity(), Call->getType());
1432 return true;
1433}
1434
1435static bool
1437 const InterpFrame *Frame,
1438 const CallExpr *Call) {
1439 const auto &Ptr = S.Stk.pop<Pointer>();
1440 assert(Ptr.getFieldDesc()->isPrimitiveArray());
1441
1442 // This should be created for a StringLiteral, so should alway shold at least
1443 // one array element.
1444 assert(Ptr.getFieldDesc()->getNumElems() >= 1);
1445 StringRef R(&Ptr.deref<char>(), Ptr.getFieldDesc()->getNumElems() - 1);
1446 uint64_t Result = getPointerAuthStableSipHash(R);
1447 pushInteger(S, Result, Call->getType());
1448 return true;
1449}
1450
1452 const InterpFrame *Frame,
1453 const CallExpr *Call) {
1454 // A call to __operator_new is only valid within std::allocate<>::allocate.
1455 // Walk up the call stack to find the appropriate caller and get the
1456 // element type from it.
1457 auto [NewCall, ElemType] = S.getStdAllocatorCaller("allocate");
1458
1459 if (ElemType.isNull()) {
1460 S.FFDiag(Call, S.getLangOpts().CPlusPlus20
1461 ? diag::note_constexpr_new_untyped
1462 : diag::note_constexpr_new);
1463 return false;
1464 }
1465 assert(NewCall);
1466
1467 if (ElemType->isIncompleteType() || ElemType->isFunctionType()) {
1468 S.FFDiag(Call, diag::note_constexpr_new_not_complete_object_type)
1469 << (ElemType->isIncompleteType() ? 0 : 1) << ElemType;
1470 return false;
1471 }
1472
1473 // We only care about the first parameter (the size), so discard all the
1474 // others.
1475 {
1476 unsigned NumArgs = Call->getNumArgs();
1477 assert(NumArgs >= 1);
1478
1479 // The std::nothrow_t arg never gets put on the stack.
1480 if (Call->getArg(NumArgs - 1)->getType()->isNothrowT())
1481 --NumArgs;
1482 auto Args = ArrayRef(Call->getArgs(), Call->getNumArgs());
1483 // First arg is needed.
1484 Args = Args.drop_front();
1485
1486 // Discard the rest.
1487 for (const Expr *Arg : Args)
1488 discard(S.Stk, *S.getContext().classify(Arg));
1489 }
1490
1491 APSInt Bytes = popToAPSInt(S, Call->getArg(0));
1492 CharUnits ElemSize = S.getASTContext().getTypeSizeInChars(ElemType);
1493 assert(!ElemSize.isZero());
1494 // Divide the number of bytes by sizeof(ElemType), so we get the number of
1495 // elements we should allocate.
1496 APInt NumElems, Remainder;
1497 APInt ElemSizeAP(Bytes.getBitWidth(), ElemSize.getQuantity());
1498 APInt::udivrem(Bytes, ElemSizeAP, NumElems, Remainder);
1499 if (Remainder != 0) {
1500 // This likely indicates a bug in the implementation of 'std::allocator'.
1501 S.FFDiag(Call, diag::note_constexpr_operator_new_bad_size)
1502 << Bytes << APSInt(ElemSizeAP, true) << ElemType;
1503 return false;
1504 }
1505
1506 // NB: The same check we're using in CheckArraySize()
1507 if (NumElems.getActiveBits() >
1509 NumElems.ugt(Descriptor::MaxArrayElemBytes / ElemSize.getQuantity())) {
1510 // FIXME: NoThrow check?
1511 const SourceInfo &Loc = S.Current->getSource(OpPC);
1512 S.FFDiag(Loc, diag::note_constexpr_new_too_large)
1513 << NumElems.getZExtValue();
1514 return false;
1515 }
1516
1517 if (!CheckArraySize(S, OpPC, NumElems.getZExtValue()))
1518 return false;
1519
1520 bool IsArray = NumElems.ugt(1);
1521 OptPrimType ElemT = S.getContext().classify(ElemType);
1522 DynamicAllocator &Allocator = S.getAllocator();
1523 if (ElemT) {
1524 Block *B =
1525 Allocator.allocate(NewCall, *ElemT, NumElems.getZExtValue(),
1527 assert(B);
1528 S.Stk.push<Pointer>(Pointer(B).atIndex(0));
1529 return true;
1530 }
1531
1532 assert(!ElemT);
1533
1534 // Composite arrays
1535 if (IsArray) {
1536 const Descriptor *Desc =
1537 S.P.createDescriptor(NewCall, ElemType.getTypePtr(), std::nullopt);
1538 Block *B =
1539 Allocator.allocate(Desc, NumElems.getZExtValue(), S.Ctx.getEvalID(),
1541 assert(B);
1542 S.Stk.push<Pointer>(Pointer(B).atIndex(0));
1543 return true;
1544 }
1545
1546 // Records. Still allocate them as single-element arrays.
1548 ElemType, NumElems, nullptr, ArraySizeModifier::Normal, 0);
1549
1550 const Descriptor *Desc = S.P.createDescriptor(NewCall, AllocType.getTypePtr(),
1552 Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(),
1554 assert(B);
1555 S.Stk.push<Pointer>(Pointer(B).atIndex(0).narrow());
1556 return true;
1557}
1558
1560 const InterpFrame *Frame,
1561 const CallExpr *Call) {
1562 const Expr *Source = nullptr;
1563 const Block *BlockToDelete = nullptr;
1564
1566 S.Stk.discard<Pointer>();
1567 return false;
1568 }
1569
1570 // This is permitted only within a call to std::allocator<T>::deallocate.
1571 if (!S.getStdAllocatorCaller("deallocate")) {
1572 S.FFDiag(Call);
1573 S.Stk.discard<Pointer>();
1574 return true;
1575 }
1576
1577 {
1578 const Pointer &Ptr = S.Stk.pop<Pointer>();
1579
1580 if (Ptr.isZero()) {
1581 S.CCEDiag(Call, diag::note_constexpr_deallocate_null);
1582 return true;
1583 }
1584
1585 Source = Ptr.getDeclDesc()->asExpr();
1586 BlockToDelete = Ptr.block();
1587
1588 if (!BlockToDelete->isDynamic()) {
1589 S.FFDiag(Call, diag::note_constexpr_delete_not_heap_alloc)
1591 if (const auto *D = Ptr.getFieldDesc()->asDecl())
1592 S.Note(D->getLocation(), diag::note_declared_at);
1593 }
1594 }
1595 assert(BlockToDelete);
1596
1597 DynamicAllocator &Allocator = S.getAllocator();
1598 const Descriptor *BlockDesc = BlockToDelete->getDescriptor();
1599 std::optional<DynamicAllocator::Form> AllocForm =
1600 Allocator.getAllocationForm(Source);
1601
1602 if (!Allocator.deallocate(Source, BlockToDelete, S)) {
1603 // Nothing has been deallocated, this must be a double-delete.
1604 const SourceInfo &Loc = S.Current->getSource(OpPC);
1605 S.FFDiag(Loc, diag::note_constexpr_double_delete);
1606 return false;
1607 }
1608 assert(AllocForm);
1609
1610 return CheckNewDeleteForms(
1611 S, OpPC, *AllocForm, DynamicAllocator::Form::Operator, BlockDesc, Source);
1612}
1613
1615 const InterpFrame *Frame,
1616 const CallExpr *Call) {
1617 const Floating &Arg0 = S.Stk.pop<Floating>();
1618 S.Stk.push<Floating>(Arg0);
1619 return true;
1620}
1621
1623 const CallExpr *Call, unsigned ID) {
1624 const Pointer &Arg = S.Stk.pop<Pointer>();
1625 assert(Arg.getFieldDesc()->isPrimitiveArray());
1626
1627 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1628 assert(Call->getType() == ElemType);
1629 PrimType ElemT = *S.getContext().classify(ElemType);
1630 unsigned NumElems = Arg.getNumElems();
1631
1633 T Result = Arg.elem<T>(0);
1634 unsigned BitWidth = Result.bitWidth();
1635 for (unsigned I = 1; I != NumElems; ++I) {
1636 T Elem = Arg.elem<T>(I);
1637 T PrevResult = Result;
1638
1639 if (ID == Builtin::BI__builtin_reduce_add) {
1640 if (T::add(Result, Elem, BitWidth, &Result)) {
1641 unsigned OverflowBits = BitWidth + 1;
1642 (void)handleOverflow(S, OpPC,
1643 (PrevResult.toAPSInt(OverflowBits) +
1644 Elem.toAPSInt(OverflowBits)));
1645 return false;
1646 }
1647 } else if (ID == Builtin::BI__builtin_reduce_mul) {
1648 if (T::mul(Result, Elem, BitWidth, &Result)) {
1649 unsigned OverflowBits = BitWidth * 2;
1650 (void)handleOverflow(S, OpPC,
1651 (PrevResult.toAPSInt(OverflowBits) *
1652 Elem.toAPSInt(OverflowBits)));
1653 return false;
1654 }
1655
1656 } else if (ID == Builtin::BI__builtin_reduce_and) {
1657 (void)T::bitAnd(Result, Elem, BitWidth, &Result);
1658 } else if (ID == Builtin::BI__builtin_reduce_or) {
1659 (void)T::bitOr(Result, Elem, BitWidth, &Result);
1660 } else if (ID == Builtin::BI__builtin_reduce_xor) {
1661 (void)T::bitXor(Result, Elem, BitWidth, &Result);
1662 } else if (ID == Builtin::BI__builtin_reduce_min) {
1663 if (Elem < Result)
1664 Result = Elem;
1665 } else if (ID == Builtin::BI__builtin_reduce_max) {
1666 if (Elem > Result)
1667 Result = Elem;
1668 } else {
1669 llvm_unreachable("Unhandled vector reduce builtin");
1670 }
1671 }
1672 pushInteger(S, Result.toAPSInt(), Call->getType());
1673 });
1674
1675 return true;
1676}
1677
1679 const InterpFrame *Frame,
1680 const CallExpr *Call,
1681 unsigned BuiltinID) {
1682 assert(Call->getNumArgs() == 1);
1683 QualType Ty = Call->getArg(0)->getType();
1684 if (Ty->isIntegerType()) {
1685 APSInt Val = popToAPSInt(S, Call->getArg(0));
1686 pushInteger(S, Val.abs(), Call->getType());
1687 return true;
1688 }
1689
1690 if (Ty->isFloatingType()) {
1691 Floating Val = S.Stk.pop<Floating>();
1692 Floating Result = abs(S, Val);
1693 S.Stk.push<Floating>(Result);
1694 return true;
1695 }
1696
1697 // Otherwise, the argument must be a vector.
1698 assert(Call->getArg(0)->getType()->isVectorType());
1699 const Pointer &Arg = S.Stk.pop<Pointer>();
1700 assert(Arg.getFieldDesc()->isPrimitiveArray());
1701 const Pointer &Dst = S.Stk.peek<Pointer>();
1702 assert(Dst.getFieldDesc()->isPrimitiveArray());
1703 assert(Arg.getFieldDesc()->getNumElems() ==
1704 Dst.getFieldDesc()->getNumElems());
1705
1706 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1707 PrimType ElemT = *S.getContext().classify(ElemType);
1708 unsigned NumElems = Arg.getNumElems();
1709 // we can either have a vector of integer or a vector of floating point
1710 for (unsigned I = 0; I != NumElems; ++I) {
1711 if (ElemType->isIntegerType()) {
1713 Dst.elem<T>(I) = T::from(static_cast<T>(
1714 APSInt(Arg.elem<T>(I).toAPSInt().abs(),
1716 });
1717 } else {
1718 Floating Val = Arg.elem<Floating>(I);
1719 Dst.elem<Floating>(I) = abs(S, Val);
1720 }
1721 }
1723
1724 return true;
1725}
1726
1727/// Can be called with an integer or vector as the first and only parameter.
1729 const InterpFrame *Frame,
1730 const CallExpr *Call,
1731 unsigned BuiltinID) {
1732 assert(Call->getNumArgs() == 1);
1733 if (Call->getArg(0)->getType()->isIntegerType()) {
1734 APSInt Val = popToAPSInt(S, Call->getArg(0));
1735
1736 if (BuiltinID == Builtin::BI__builtin_elementwise_popcount) {
1737 pushInteger(S, Val.popcount(), Call->getType());
1738 } else {
1739 pushInteger(S, Val.reverseBits(), Call->getType());
1740 }
1741 return true;
1742 }
1743 // Otherwise, the argument must be a vector.
1744 assert(Call->getArg(0)->getType()->isVectorType());
1745 const Pointer &Arg = S.Stk.pop<Pointer>();
1746 assert(Arg.getFieldDesc()->isPrimitiveArray());
1747 const Pointer &Dst = S.Stk.peek<Pointer>();
1748 assert(Dst.getFieldDesc()->isPrimitiveArray());
1749 assert(Arg.getFieldDesc()->getNumElems() ==
1750 Dst.getFieldDesc()->getNumElems());
1751
1752 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1753 PrimType ElemT = *S.getContext().classify(ElemType);
1754 unsigned NumElems = Arg.getNumElems();
1755
1756 // FIXME: Reading from uninitialized vector elements?
1757 for (unsigned I = 0; I != NumElems; ++I) {
1759 if (BuiltinID == Builtin::BI__builtin_elementwise_popcount) {
1760 Dst.elem<T>(I) = T::from(Arg.elem<T>(I).toAPSInt().popcount());
1761 } else {
1762 Dst.elem<T>(I) =
1763 T::from(Arg.elem<T>(I).toAPSInt().reverseBits().getZExtValue());
1764 }
1765 });
1766 }
1768
1769 return true;
1770}
1771
1772/// Can be called with an integer or vector as the first and only parameter.
1774 CodePtr OpPC,
1775 const InterpFrame *Frame,
1776 const CallExpr *Call,
1777 unsigned BuiltinID) {
1778 const bool HasZeroArg = Call->getNumArgs() == 2;
1779 const bool IsCTTZ = BuiltinID == Builtin::BI__builtin_elementwise_ctzg;
1780 assert(Call->getNumArgs() == 1 || HasZeroArg);
1781 if (Call->getArg(0)->getType()->isIntegerType()) {
1782 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
1783 APSInt Val = popToAPSInt(S.Stk, ArgT);
1784 std::optional<APSInt> ZeroVal;
1785 if (HasZeroArg) {
1786 ZeroVal = Val;
1787 Val = popToAPSInt(S.Stk, ArgT);
1788 }
1789
1790 if (Val.isZero()) {
1791 if (ZeroVal) {
1792 pushInteger(S, *ZeroVal, Call->getType());
1793 return true;
1794 }
1795 // If we haven't been provided the second argument, the result is
1796 // undefined
1797 S.FFDiag(S.Current->getSource(OpPC),
1798 diag::note_constexpr_countzeroes_zero)
1799 << /*IsTrailing=*/IsCTTZ;
1800 return false;
1801 }
1802
1803 if (BuiltinID == Builtin::BI__builtin_elementwise_clzg) {
1804 pushInteger(S, Val.countLeadingZeros(), Call->getType());
1805 } else {
1806 pushInteger(S, Val.countTrailingZeros(), Call->getType());
1807 }
1808 return true;
1809 }
1810 // Otherwise, the argument must be a vector.
1811 const ASTContext &ASTCtx = S.getASTContext();
1812 Pointer ZeroArg;
1813 if (HasZeroArg) {
1814 assert(Call->getArg(1)->getType()->isVectorType() &&
1815 ASTCtx.hasSameUnqualifiedType(Call->getArg(0)->getType(),
1816 Call->getArg(1)->getType()));
1817 (void)ASTCtx;
1818 ZeroArg = S.Stk.pop<Pointer>();
1819 assert(ZeroArg.getFieldDesc()->isPrimitiveArray());
1820 }
1821 assert(Call->getArg(0)->getType()->isVectorType());
1822 const Pointer &Arg = S.Stk.pop<Pointer>();
1823 assert(Arg.getFieldDesc()->isPrimitiveArray());
1824 const Pointer &Dst = S.Stk.peek<Pointer>();
1825 assert(Dst.getFieldDesc()->isPrimitiveArray());
1826 assert(Arg.getFieldDesc()->getNumElems() ==
1827 Dst.getFieldDesc()->getNumElems());
1828
1829 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1830 PrimType ElemT = *S.getContext().classify(ElemType);
1831 unsigned NumElems = Arg.getNumElems();
1832
1833 // FIXME: Reading from uninitialized vector elements?
1834 for (unsigned I = 0; I != NumElems; ++I) {
1836 APInt EltVal = Arg.atIndex(I).deref<T>().toAPSInt();
1837 if (EltVal.isZero()) {
1838 if (HasZeroArg) {
1839 Dst.atIndex(I).deref<T>() = ZeroArg.atIndex(I).deref<T>();
1840 } else {
1841 // If we haven't been provided the second argument, the result is
1842 // undefined
1843 S.FFDiag(S.Current->getSource(OpPC),
1844 diag::note_constexpr_countzeroes_zero)
1845 << /*IsTrailing=*/IsCTTZ;
1846 return false;
1847 }
1848 } else if (IsCTTZ) {
1849 Dst.atIndex(I).deref<T>() = T::from(EltVal.countTrailingZeros());
1850 } else {
1851 Dst.atIndex(I).deref<T>() = T::from(EltVal.countLeadingZeros());
1852 }
1853 Dst.atIndex(I).initialize();
1854 });
1855 }
1856
1857 return true;
1858}
1859
1861 const InterpFrame *Frame,
1862 const CallExpr *Call, unsigned ID) {
1863 assert(Call->getNumArgs() == 3);
1864 const ASTContext &ASTCtx = S.getASTContext();
1865 APSInt Size = popToAPSInt(S, Call->getArg(2));
1866 const Pointer SrcPtr = S.Stk.pop<Pointer>();
1867 const Pointer DestPtr = S.Stk.pop<Pointer>();
1868
1869 assert(!Size.isSigned() && "memcpy and friends take an unsigned size");
1870
1871 if (ID == Builtin::BImemcpy || ID == Builtin::BImemmove)
1872 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1873
1874 bool Move =
1875 (ID == Builtin::BI__builtin_memmove || ID == Builtin::BImemmove ||
1876 ID == Builtin::BI__builtin_wmemmove || ID == Builtin::BIwmemmove);
1877 bool WChar = ID == Builtin::BIwmemcpy || ID == Builtin::BIwmemmove ||
1878 ID == Builtin::BI__builtin_wmemcpy ||
1879 ID == Builtin::BI__builtin_wmemmove;
1880
1881 // If the size is zero, we treat this as always being a valid no-op.
1882 if (Size.isZero()) {
1883 S.Stk.push<Pointer>(DestPtr);
1884 return true;
1885 }
1886
1887 if (SrcPtr.isZero() || DestPtr.isZero()) {
1888 Pointer DiagPtr = (SrcPtr.isZero() ? SrcPtr : DestPtr);
1889 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
1890 << /*IsMove=*/Move << /*IsWchar=*/WChar << !SrcPtr.isZero()
1891 << DiagPtr.toDiagnosticString(ASTCtx);
1892 return false;
1893 }
1894
1895 // Diagnose integral src/dest pointers specially.
1896 if (SrcPtr.isIntegralPointer() || DestPtr.isIntegralPointer()) {
1897 std::string DiagVal = "(void *)";
1898 DiagVal += SrcPtr.isIntegralPointer()
1899 ? std::to_string(SrcPtr.getIntegerRepresentation())
1900 : std::to_string(DestPtr.getIntegerRepresentation());
1901 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
1902 << Move << WChar << DestPtr.isIntegralPointer() << DiagVal;
1903 return false;
1904 }
1905
1906 // Can't read from dummy pointers.
1907 if (DestPtr.isDummy() || SrcPtr.isDummy())
1908 return false;
1909
1910 if (DestPtr.getType()->isIncompleteType()) {
1911 S.FFDiag(S.Current->getSource(OpPC),
1912 diag::note_constexpr_memcpy_incomplete_type)
1913 << Move << DestPtr.getType();
1914 return false;
1915 }
1916 if (SrcPtr.getType()->isIncompleteType()) {
1917 S.FFDiag(S.Current->getSource(OpPC),
1918 diag::note_constexpr_memcpy_incomplete_type)
1919 << Move << SrcPtr.getType();
1920 return false;
1921 }
1922
1923 QualType DestElemType = getElemType(DestPtr);
1924 if (DestElemType->isIncompleteType()) {
1925 S.FFDiag(S.Current->getSource(OpPC),
1926 diag::note_constexpr_memcpy_incomplete_type)
1927 << Move << DestElemType;
1928 return false;
1929 }
1930
1931 size_t RemainingDestElems;
1932 if (DestPtr.getFieldDesc()->isArray()) {
1933 RemainingDestElems = DestPtr.isUnknownSizeArray()
1934 ? 0
1935 : (DestPtr.getNumElems() - DestPtr.getIndex());
1936 } else {
1937 RemainingDestElems = 1;
1938 }
1939 unsigned DestElemSize = ASTCtx.getTypeSizeInChars(DestElemType).getQuantity();
1940
1941 if (WChar) {
1942 uint64_t WCharSize =
1943 ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
1944 Size *= APSInt(APInt(Size.getBitWidth(), WCharSize, /*IsSigned=*/false),
1945 /*IsUnsigend=*/true);
1946 }
1947
1948 if (Size.urem(DestElemSize) != 0) {
1949 S.FFDiag(S.Current->getSource(OpPC),
1950 diag::note_constexpr_memcpy_unsupported)
1951 << Move << WChar << 0 << DestElemType << Size << DestElemSize;
1952 return false;
1953 }
1954
1955 QualType SrcElemType = getElemType(SrcPtr);
1956 size_t RemainingSrcElems;
1957 if (SrcPtr.getFieldDesc()->isArray()) {
1958 RemainingSrcElems = SrcPtr.isUnknownSizeArray()
1959 ? 0
1960 : (SrcPtr.getNumElems() - SrcPtr.getIndex());
1961 } else {
1962 RemainingSrcElems = 1;
1963 }
1964 unsigned SrcElemSize = ASTCtx.getTypeSizeInChars(SrcElemType).getQuantity();
1965
1966 if (!ASTCtx.hasSameUnqualifiedType(DestElemType, SrcElemType)) {
1967 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_type_pun)
1968 << Move << SrcElemType << DestElemType;
1969 return false;
1970 }
1971
1972 if (!DestElemType.isTriviallyCopyableType(ASTCtx)) {
1973 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_nontrivial)
1974 << Move << DestElemType;
1975 return false;
1976 }
1977
1978 // Check if we have enough elements to read from and write to.
1979 size_t RemainingDestBytes = RemainingDestElems * DestElemSize;
1980 size_t RemainingSrcBytes = RemainingSrcElems * SrcElemSize;
1981 if (Size.ugt(RemainingDestBytes) || Size.ugt(RemainingSrcBytes)) {
1982 APInt N = Size.udiv(DestElemSize);
1983 S.FFDiag(S.Current->getSource(OpPC),
1984 diag::note_constexpr_memcpy_unsupported)
1985 << Move << WChar << (Size.ugt(RemainingSrcBytes) ? 1 : 2)
1986 << DestElemType << toString(N, 10, /*Signed=*/false);
1987 return false;
1988 }
1989
1990 // Check for overlapping memory regions.
1991 if (!Move && Pointer::pointToSameBlock(SrcPtr, DestPtr)) {
1992 // Remove base casts.
1993 Pointer SrcP = SrcPtr;
1994 while (SrcP.isBaseClass())
1995 SrcP = SrcP.getBase();
1996
1997 Pointer DestP = DestPtr;
1998 while (DestP.isBaseClass())
1999 DestP = DestP.getBase();
2000
2001 unsigned SrcIndex = SrcP.expand().getIndex() * SrcP.elemSize();
2002 unsigned DstIndex = DestP.expand().getIndex() * DestP.elemSize();
2003 unsigned N = Size.getZExtValue();
2004
2005 if ((SrcIndex <= DstIndex && (SrcIndex + N) > DstIndex) ||
2006 (DstIndex <= SrcIndex && (DstIndex + N) > SrcIndex)) {
2007 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_overlap)
2008 << /*IsWChar=*/false;
2009 return false;
2010 }
2011 }
2012
2013 assert(Size.getZExtValue() % DestElemSize == 0);
2014 if (!DoMemcpy(S, OpPC, SrcPtr, DestPtr, Bytes(Size.getZExtValue()).toBits()))
2015 return false;
2016
2017 S.Stk.push<Pointer>(DestPtr);
2018 return true;
2019}
2020
2021/// Determine if T is a character type for which we guarantee that
2022/// sizeof(T) == 1.
2024 return T->isCharType() || T->isChar8Type();
2025}
2026
2028 const InterpFrame *Frame,
2029 const CallExpr *Call, unsigned ID) {
2030 assert(Call->getNumArgs() == 3);
2031 const APSInt &Size = popToAPSInt(S, Call->getArg(2));
2032 const Pointer &PtrB = S.Stk.pop<Pointer>();
2033 const Pointer &PtrA = S.Stk.pop<Pointer>();
2034
2035 if (ID == Builtin::BImemcmp || ID == Builtin::BIbcmp ||
2036 ID == Builtin::BIwmemcmp)
2037 diagnoseNonConstexprBuiltin(S, OpPC, ID);
2038
2039 if (Size.isZero()) {
2040 pushInteger(S, 0, Call->getType());
2041 return true;
2042 }
2043
2044 bool IsWide =
2045 (ID == Builtin::BIwmemcmp || ID == Builtin::BI__builtin_wmemcmp);
2046
2047 const ASTContext &ASTCtx = S.getASTContext();
2048 QualType ElemTypeA = getElemType(PtrA);
2049 QualType ElemTypeB = getElemType(PtrB);
2050 // FIXME: This is an arbitrary limitation the current constant interpreter
2051 // had. We could remove this.
2052 if (!IsWide && (!isOneByteCharacterType(ElemTypeA) ||
2053 !isOneByteCharacterType(ElemTypeB))) {
2054 S.FFDiag(S.Current->getSource(OpPC),
2055 diag::note_constexpr_memcmp_unsupported)
2056 << ASTCtx.BuiltinInfo.getQuotedName(ID) << PtrA.getType()
2057 << PtrB.getType();
2058 return false;
2059 }
2060
2061 if (PtrA.isDummy() || PtrB.isDummy())
2062 return false;
2063
2064 // Now, read both pointers to a buffer and compare those.
2065 BitcastBuffer BufferA(
2066 Bits(ASTCtx.getTypeSize(ElemTypeA) * PtrA.getNumElems()));
2067 readPointerToBuffer(S.getContext(), PtrA, BufferA, false);
2068 // FIXME: The swapping here is UNDOING something we do when reading the
2069 // data into the buffer.
2070 if (ASTCtx.getTargetInfo().isBigEndian())
2071 swapBytes(BufferA.Data.get(), BufferA.byteSize().getQuantity());
2072
2073 BitcastBuffer BufferB(
2074 Bits(ASTCtx.getTypeSize(ElemTypeB) * PtrB.getNumElems()));
2075 readPointerToBuffer(S.getContext(), PtrB, BufferB, false);
2076 // FIXME: The swapping here is UNDOING something we do when reading the
2077 // data into the buffer.
2078 if (ASTCtx.getTargetInfo().isBigEndian())
2079 swapBytes(BufferB.Data.get(), BufferB.byteSize().getQuantity());
2080
2081 size_t MinBufferSize = std::min(BufferA.byteSize().getQuantity(),
2082 BufferB.byteSize().getQuantity());
2083
2084 unsigned ElemSize = 1;
2085 if (IsWide)
2086 ElemSize = ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
2087 // The Size given for the wide variants is in wide-char units. Convert it
2088 // to bytes.
2089 size_t ByteSize = Size.getZExtValue() * ElemSize;
2090 size_t CmpSize = std::min(MinBufferSize, ByteSize);
2091
2092 for (size_t I = 0; I != CmpSize; I += ElemSize) {
2093 if (IsWide) {
2095 T A = *reinterpret_cast<T *>(BufferA.Data.get() + I);
2096 T B = *reinterpret_cast<T *>(BufferB.Data.get() + I);
2097 if (A < B) {
2098 pushInteger(S, -1, Call->getType());
2099 return true;
2100 }
2101 if (A > B) {
2102 pushInteger(S, 1, Call->getType());
2103 return true;
2104 }
2105 });
2106 } else {
2107 std::byte A = BufferA.Data[I];
2108 std::byte B = BufferB.Data[I];
2109
2110 if (A < B) {
2111 pushInteger(S, -1, Call->getType());
2112 return true;
2113 }
2114 if (A > B) {
2115 pushInteger(S, 1, Call->getType());
2116 return true;
2117 }
2118 }
2119 }
2120
2121 // We compared CmpSize bytes above. If the limiting factor was the Size
2122 // passed, we're done and the result is equality (0).
2123 if (ByteSize <= CmpSize) {
2124 pushInteger(S, 0, Call->getType());
2125 return true;
2126 }
2127
2128 // However, if we read all the available bytes but were instructed to read
2129 // even more, diagnose this as a "read of dereferenced one-past-the-end
2130 // pointer". This is what would happen if we called CheckLoad() on every array
2131 // element.
2132 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_past_end)
2133 << AK_Read << S.Current->getRange(OpPC);
2134 return false;
2135}
2136
2137// __builtin_memchr(ptr, int, int)
2138// __builtin_strchr(ptr, int)
2140 const CallExpr *Call, unsigned ID) {
2141 if (ID == Builtin::BImemchr || ID == Builtin::BIwcschr ||
2142 ID == Builtin::BIstrchr || ID == Builtin::BIwmemchr)
2143 diagnoseNonConstexprBuiltin(S, OpPC, ID);
2144
2145 std::optional<APSInt> MaxLength;
2146 if (Call->getNumArgs() == 3)
2147 MaxLength = popToAPSInt(S, Call->getArg(2));
2148
2149 APSInt Desired = popToAPSInt(S, Call->getArg(1));
2150 const Pointer &Ptr = S.Stk.pop<Pointer>();
2151
2152 if (MaxLength && MaxLength->isZero()) {
2153 S.Stk.push<Pointer>();
2154 return true;
2155 }
2156
2157 if (Ptr.isDummy()) {
2158 if (Ptr.getType()->isIncompleteType())
2159 S.FFDiag(S.Current->getSource(OpPC),
2160 diag::note_constexpr_ltor_incomplete_type)
2161 << Ptr.getType();
2162 return false;
2163 }
2164
2165 // Null is only okay if the given size is 0.
2166 if (Ptr.isZero()) {
2167 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_null)
2168 << AK_Read;
2169 return false;
2170 }
2171
2172 QualType ElemTy = Ptr.getFieldDesc()->isArray()
2173 ? Ptr.getFieldDesc()->getElemQualType()
2174 : Ptr.getFieldDesc()->getType();
2175 bool IsRawByte = ID == Builtin::BImemchr || ID == Builtin::BI__builtin_memchr;
2176
2177 // Give up on byte-oriented matching against multibyte elements.
2178 if (IsRawByte && !isOneByteCharacterType(ElemTy)) {
2179 S.FFDiag(S.Current->getSource(OpPC),
2180 diag::note_constexpr_memchr_unsupported)
2181 << S.getASTContext().BuiltinInfo.getQuotedName(ID) << ElemTy;
2182 return false;
2183 }
2184
2185 if (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr) {
2186 // strchr compares directly to the passed integer, and therefore
2187 // always fails if given an int that is not a char.
2188 if (Desired !=
2189 Desired.trunc(S.getASTContext().getCharWidth()).getSExtValue()) {
2190 S.Stk.push<Pointer>();
2191 return true;
2192 }
2193 }
2194
2195 uint64_t DesiredVal;
2196 if (ID == Builtin::BIwmemchr || ID == Builtin::BI__builtin_wmemchr ||
2197 ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr) {
2198 // wcschr and wmemchr are given a wchar_t to look for. Just use it.
2199 DesiredVal = Desired.getZExtValue();
2200 } else {
2201 DesiredVal = Desired.trunc(S.getASTContext().getCharWidth()).getZExtValue();
2202 }
2203
2204 bool StopAtZero =
2205 (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr ||
2206 ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr);
2207
2208 PrimType ElemT =
2209 IsRawByte ? PT_Sint8 : *S.getContext().classify(getElemType(Ptr));
2210
2211 size_t Index = Ptr.getIndex();
2212 size_t Step = 0;
2213 for (;;) {
2214 const Pointer &ElemPtr =
2215 (Index + Step) > 0 ? Ptr.atIndex(Index + Step) : Ptr;
2216
2217 if (!CheckLoad(S, OpPC, ElemPtr))
2218 return false;
2219
2220 uint64_t V;
2222 ElemT, { V = static_cast<uint64_t>(ElemPtr.deref<T>().toUnsigned()); });
2223
2224 if (V == DesiredVal) {
2225 S.Stk.push<Pointer>(ElemPtr);
2226 return true;
2227 }
2228
2229 if (StopAtZero && V == 0)
2230 break;
2231
2232 ++Step;
2233 if (MaxLength && Step == MaxLength->getZExtValue())
2234 break;
2235 }
2236
2237 S.Stk.push<Pointer>();
2238 return true;
2239}
2240
2241static std::optional<unsigned> computeFullDescSize(const ASTContext &ASTCtx,
2242 const Descriptor *Desc) {
2243 if (Desc->isPrimitive())
2244 return ASTCtx.getTypeSizeInChars(Desc->getType()).getQuantity();
2245 if (Desc->isArray())
2246 return ASTCtx.getTypeSizeInChars(Desc->getElemQualType()).getQuantity() *
2247 Desc->getNumElems();
2248 if (Desc->isRecord()) {
2249 // Can't use Descriptor::getType() as that may return a pointer type. Look
2250 // at the decl directly.
2251 return ASTCtx
2253 ASTCtx.getCanonicalTagType(Desc->ElemRecord->getDecl()))
2254 .getQuantity();
2255 }
2256
2257 return std::nullopt;
2258}
2259
2260/// Compute the byte offset of \p Ptr in the full declaration.
2261static unsigned computePointerOffset(const ASTContext &ASTCtx,
2262 const Pointer &Ptr) {
2263 unsigned Result = 0;
2264
2265 Pointer P = Ptr;
2266 while (P.isField() || P.isArrayElement()) {
2267 P = P.expand();
2268 const Descriptor *D = P.getFieldDesc();
2269
2270 if (P.isArrayElement()) {
2271 unsigned ElemSize =
2273 if (P.isOnePastEnd())
2274 Result += ElemSize * P.getNumElems();
2275 else
2276 Result += ElemSize * P.getIndex();
2277 P = P.expand().getArray();
2278 } else if (P.isBaseClass()) {
2279 const auto *RD = cast<CXXRecordDecl>(D->asDecl());
2280 bool IsVirtual = Ptr.isVirtualBaseClass();
2281 P = P.getBase();
2282 const Record *BaseRecord = P.getRecord();
2283
2284 const ASTRecordLayout &Layout =
2285 ASTCtx.getASTRecordLayout(cast<CXXRecordDecl>(BaseRecord->getDecl()));
2286 if (IsVirtual)
2287 Result += Layout.getVBaseClassOffset(RD).getQuantity();
2288 else
2289 Result += Layout.getBaseClassOffset(RD).getQuantity();
2290 } else if (P.isField()) {
2291 const FieldDecl *FD = P.getField();
2292 const ASTRecordLayout &Layout =
2293 ASTCtx.getASTRecordLayout(FD->getParent());
2294 unsigned FieldIndex = FD->getFieldIndex();
2295 uint64_t FieldOffset =
2296 ASTCtx.toCharUnitsFromBits(Layout.getFieldOffset(FieldIndex))
2297 .getQuantity();
2298 Result += FieldOffset;
2299 P = P.getBase();
2300 } else
2301 llvm_unreachable("Unhandled descriptor type");
2302 }
2303
2304 return Result;
2305}
2306
2307/// Does Ptr point to the last subobject?
2308static bool pointsToLastObject(const Pointer &Ptr) {
2309 Pointer P = Ptr;
2310 while (!P.isRoot()) {
2311
2312 if (P.isArrayElement()) {
2313 P = P.expand().getArray();
2314 continue;
2315 }
2316 if (P.isBaseClass()) {
2317 if (P.getRecord()->getNumFields() > 0)
2318 return false;
2319 P = P.getBase();
2320 continue;
2321 }
2322
2323 Pointer Base = P.getBase();
2324 if (const Record *R = Base.getRecord()) {
2325 assert(P.getField());
2326 if (P.getField()->getFieldIndex() != R->getNumFields() - 1)
2327 return false;
2328 }
2329 P = Base;
2330 }
2331
2332 return true;
2333}
2334
2335/// Does Ptr point to the last object AND to a flexible array member?
2336static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const Pointer &Ptr) {
2337 auto isFlexibleArrayMember = [&](const Descriptor *FieldDesc) {
2339 FAMKind StrictFlexArraysLevel =
2340 Ctx.getLangOpts().getStrictFlexArraysLevel();
2341
2342 if (StrictFlexArraysLevel == FAMKind::Default)
2343 return true;
2344
2345 unsigned NumElems = FieldDesc->getNumElems();
2346 if (NumElems == 0 && StrictFlexArraysLevel != FAMKind::IncompleteOnly)
2347 return true;
2348
2349 if (NumElems == 1 && StrictFlexArraysLevel == FAMKind::OneZeroOrIncomplete)
2350 return true;
2351 return false;
2352 };
2353
2354 const Descriptor *FieldDesc = Ptr.getFieldDesc();
2355 if (!FieldDesc->isArray())
2356 return false;
2357
2358 return Ptr.isDummy() && pointsToLastObject(Ptr) &&
2359 isFlexibleArrayMember(FieldDesc);
2360}
2361
2363 const InterpFrame *Frame,
2364 const CallExpr *Call) {
2365 const ASTContext &ASTCtx = S.getASTContext();
2366 // From the GCC docs:
2367 // Kind is an integer constant from 0 to 3. If the least significant bit is
2368 // clear, objects are whole variables. If it is set, a closest surrounding
2369 // subobject is considered the object a pointer points to. The second bit
2370 // determines if maximum or minimum of remaining bytes is computed.
2371 unsigned Kind = popToAPSInt(S, Call->getArg(1)).getZExtValue();
2372 assert(Kind <= 3 && "unexpected kind");
2373 bool UseFieldDesc = (Kind & 1u);
2374 bool ReportMinimum = (Kind & 2u);
2375 const Pointer &Ptr = S.Stk.pop<Pointer>();
2376
2377 if (Call->getArg(0)->HasSideEffects(ASTCtx)) {
2378 // "If there are any side effects in them, it returns (size_t) -1
2379 // for type 0 or 1 and (size_t) 0 for type 2 or 3."
2380 pushInteger(S, Kind <= 1 ? -1 : 0, Call->getType());
2381 return true;
2382 }
2383
2384 if (Ptr.isZero() || !Ptr.isBlockPointer())
2385 return false;
2386
2387 // We can't load through pointers.
2388 if (Ptr.isDummy() && Ptr.getType()->isPointerType())
2389 return false;
2390
2391 bool DetermineForCompleteObject = Ptr.getFieldDesc() == Ptr.getDeclDesc();
2392 const Descriptor *DeclDesc = Ptr.getDeclDesc();
2393 assert(DeclDesc);
2394
2395 if (!UseFieldDesc || DetermineForCompleteObject) {
2396 // Lower bound, so we can't fall back to this.
2397 if (ReportMinimum && !DetermineForCompleteObject)
2398 return false;
2399
2400 // Can't read beyond the pointer decl desc.
2401 if (!UseFieldDesc && !ReportMinimum && DeclDesc->getType()->isPointerType())
2402 return false;
2403 } else {
2404 if (isUserWritingOffTheEnd(ASTCtx, Ptr.expand())) {
2405 // If we cannot determine the size of the initial allocation, then we
2406 // can't given an accurate upper-bound. However, we are still able to give
2407 // conservative lower-bounds for Type=3.
2408 if (Kind == 1)
2409 return false;
2410 }
2411 }
2412
2413 const Descriptor *Desc = UseFieldDesc ? Ptr.getFieldDesc() : DeclDesc;
2414 assert(Desc);
2415
2416 std::optional<unsigned> FullSize = computeFullDescSize(ASTCtx, Desc);
2417 if (!FullSize)
2418 return false;
2419
2420 unsigned ByteOffset;
2421 if (UseFieldDesc) {
2422 if (Ptr.isBaseClass())
2423 ByteOffset = computePointerOffset(ASTCtx, Ptr.getBase()) -
2424 computePointerOffset(ASTCtx, Ptr);
2425 else
2426 ByteOffset =
2427 computePointerOffset(ASTCtx, Ptr) -
2428 computePointerOffset(ASTCtx, Ptr.expand().atIndex(0).narrow());
2429 } else
2430 ByteOffset = computePointerOffset(ASTCtx, Ptr);
2431
2432 assert(ByteOffset <= *FullSize);
2433 unsigned Result = *FullSize - ByteOffset;
2434
2435 pushInteger(S, Result, Call->getType());
2436 return true;
2437}
2438
2440 const CallExpr *Call) {
2441
2442 if (!S.inConstantContext())
2443 return false;
2444
2445 const Pointer &Ptr = S.Stk.pop<Pointer>();
2446
2447 auto Error = [&](int Diag) {
2448 bool CalledFromStd = false;
2449 const auto *Callee = S.Current->getCallee();
2450 if (Callee && Callee->isInStdNamespace()) {
2451 const IdentifierInfo *Identifier = Callee->getIdentifier();
2452 CalledFromStd = Identifier && Identifier->isStr("is_within_lifetime");
2453 }
2454 S.CCEDiag(CalledFromStd
2456 : S.Current->getSource(OpPC),
2457 diag::err_invalid_is_within_lifetime)
2458 << (CalledFromStd ? "std::is_within_lifetime"
2459 : "__builtin_is_within_lifetime")
2460 << Diag;
2461 return false;
2462 };
2463
2464 if (Ptr.isZero())
2465 return Error(0);
2466 if (Ptr.isOnePastEnd())
2467 return Error(1);
2468
2469 bool Result = Ptr.getLifetime() != Lifetime::Ended;
2470 if (!Ptr.isActive()) {
2471 Result = false;
2472 } else {
2473 if (!CheckLive(S, OpPC, Ptr, AK_Read))
2474 return false;
2475 if (!CheckMutable(S, OpPC, Ptr))
2476 return false;
2477 if (!CheckDummy(S, OpPC, Ptr.block(), AK_Read))
2478 return false;
2479 }
2480
2481 // Check if we're currently running an initializer.
2482 if (llvm::is_contained(S.InitializingBlocks, Ptr.block()))
2483 return Error(2);
2484 if (S.EvaluatingDecl && Ptr.getDeclDesc()->asVarDecl() == S.EvaluatingDecl)
2485 return Error(2);
2486
2487 pushInteger(S, Result, Call->getType());
2488 return true;
2489}
2490
2492 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2493 llvm::function_ref<APInt(const APSInt &)> Fn) {
2494 assert(Call->getNumArgs() == 1);
2495 assert(Call->getType()->isIntegerType());
2496
2497 // Single integer case.
2498 if (!Call->getArg(0)->getType()->isVectorType()) {
2499 APSInt Src = popToAPSInt(S, Call->getArg(0));
2500 APInt Result = Fn(Src);
2501 pushInteger(S, APSInt(std::move(Result), !Src.isSigned()), Call->getType());
2502 return true;
2503 }
2504
2505 // TODO: Add vector integer handling.
2506 return false;
2507}
2508
2510 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2511 llvm::function_ref<APInt(const APSInt &, const APSInt &)> Fn) {
2512 assert(Call->getNumArgs() == 2);
2513
2514 // Single integer case.
2515 if (!Call->getArg(0)->getType()->isVectorType()) {
2516 assert(!Call->getArg(1)->getType()->isVectorType());
2517 APSInt RHS = popToAPSInt(S, Call->getArg(1));
2518 APSInt LHS = popToAPSInt(S, Call->getArg(0));
2519 APInt Result = Fn(LHS, RHS);
2520 pushInteger(S, APSInt(std::move(Result), !LHS.isSigned()), Call->getType());
2521 return true;
2522 }
2523
2524 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2525 assert(VT->getElementType()->isIntegralOrEnumerationType());
2526 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2527 unsigned NumElems = VT->getNumElements();
2528 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2529
2530 // Vector + Scalar case.
2531 if (!Call->getArg(1)->getType()->isVectorType()) {
2532 assert(Call->getArg(1)->getType()->isIntegralOrEnumerationType());
2533
2534 APSInt RHS = popToAPSInt(S, Call->getArg(1));
2535 const Pointer &LHS = S.Stk.pop<Pointer>();
2536 const Pointer &Dst = S.Stk.peek<Pointer>();
2537
2538 for (unsigned I = 0; I != NumElems; ++I) {
2540 Dst.elem<T>(I) = static_cast<T>(
2541 APSInt(Fn(LHS.elem<T>(I).toAPSInt(), RHS), DestUnsigned));
2542 });
2543 }
2545 return true;
2546 }
2547
2548 // Vector case.
2549 assert(Call->getArg(0)->getType()->isVectorType() &&
2550 Call->getArg(1)->getType()->isVectorType());
2551 assert(VT->getElementType() ==
2552 Call->getArg(1)->getType()->castAs<VectorType>()->getElementType());
2553 assert(VT->getNumElements() ==
2554 Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements());
2555 assert(VT->getElementType()->isIntegralOrEnumerationType());
2556
2557 const Pointer &RHS = S.Stk.pop<Pointer>();
2558 const Pointer &LHS = S.Stk.pop<Pointer>();
2559 const Pointer &Dst = S.Stk.peek<Pointer>();
2560 for (unsigned I = 0; I != NumElems; ++I) {
2562 APSInt Elem1 = LHS.elem<T>(I).toAPSInt();
2563 APSInt Elem2 = RHS.elem<T>(I).toAPSInt();
2564 Dst.elem<T>(I) = static_cast<T>(APSInt(Fn(Elem1, Elem2), DestUnsigned));
2565 });
2566 }
2568
2569 return true;
2570}
2571
2572static bool
2574 llvm::function_ref<APInt(const APSInt &)> PackFn) {
2575 const auto *VT0 = E->getArg(0)->getType()->castAs<VectorType>();
2576 [[maybe_unused]] const auto *VT1 =
2577 E->getArg(1)->getType()->castAs<VectorType>();
2578 assert(VT0 && VT1 && "pack builtin VT0 and VT1 must be VectorType");
2579 assert(VT0->getElementType() == VT1->getElementType() &&
2580 VT0->getNumElements() == VT1->getNumElements() &&
2581 "pack builtin VT0 and VT1 ElementType must be same");
2582
2583 const Pointer &RHS = S.Stk.pop<Pointer>();
2584 const Pointer &LHS = S.Stk.pop<Pointer>();
2585 const Pointer &Dst = S.Stk.peek<Pointer>();
2586
2587 const ASTContext &ASTCtx = S.getASTContext();
2588 const unsigned SrcBits = ASTCtx.getIntWidth(VT0->getElementType());
2589 const unsigned LHSVecLen = VT0->getNumElements();
2590 const unsigned SrcPerLane = 128 / SrcBits;
2591 const unsigned Lanes = LHSVecLen * SrcBits / 128;
2592
2593 PrimType SrcT = *S.getContext().classify(VT0->getElementType());
2594 PrimType DstT = *S.getContext().classify(getElemType(Dst));
2595 const bool IsUnsigend = getElemType(Dst)->isUnsignedIntegerType();
2596
2597 for (unsigned Lane = 0; Lane != Lanes; ++Lane) {
2598 const unsigned BaseSrc = Lane * SrcPerLane;
2599 const unsigned BaseDst = Lane * (2 * SrcPerLane);
2600
2601 for (unsigned I = 0; I != SrcPerLane; ++I) {
2603 APSInt A = LHS.elem<T>(BaseSrc + I).toAPSInt();
2604 APSInt B = RHS.elem<T>(BaseSrc + I).toAPSInt();
2605
2606 assignInteger(S, Dst.atIndex(BaseDst + I), DstT,
2607 APSInt(PackFn(A), IsUnsigend));
2608 assignInteger(S, Dst.atIndex(BaseDst + SrcPerLane + I), DstT,
2609 APSInt(PackFn(B), IsUnsigend));
2610 });
2611 }
2612 }
2613
2614 Dst.initializeAllElements();
2615 return true;
2616}
2617
2619 const CallExpr *Call,
2620 unsigned BuiltinID) {
2621 assert(Call->getNumArgs() == 2);
2622
2623 QualType Arg0Type = Call->getArg(0)->getType();
2624
2625 // TODO: Support floating-point types.
2626 if (!(Arg0Type->isIntegerType() ||
2627 (Arg0Type->isVectorType() &&
2628 Arg0Type->castAs<VectorType>()->getElementType()->isIntegerType())))
2629 return false;
2630
2631 if (!Arg0Type->isVectorType()) {
2632 assert(!Call->getArg(1)->getType()->isVectorType());
2633 APSInt RHS = popToAPSInt(S, Call->getArg(1));
2634 APSInt LHS = popToAPSInt(S, Arg0Type);
2635 APInt Result;
2636 if (BuiltinID == Builtin::BI__builtin_elementwise_max) {
2637 Result = std::max(LHS, RHS);
2638 } else if (BuiltinID == Builtin::BI__builtin_elementwise_min) {
2639 Result = std::min(LHS, RHS);
2640 } else {
2641 llvm_unreachable("Wrong builtin ID");
2642 }
2643
2644 pushInteger(S, APSInt(Result, !LHS.isSigned()), Call->getType());
2645 return true;
2646 }
2647
2648 // Vector case.
2649 assert(Call->getArg(0)->getType()->isVectorType() &&
2650 Call->getArg(1)->getType()->isVectorType());
2651 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2652 assert(VT->getElementType() ==
2653 Call->getArg(1)->getType()->castAs<VectorType>()->getElementType());
2654 assert(VT->getNumElements() ==
2655 Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements());
2656 assert(VT->getElementType()->isIntegralOrEnumerationType());
2657
2658 const Pointer &RHS = S.Stk.pop<Pointer>();
2659 const Pointer &LHS = S.Stk.pop<Pointer>();
2660 const Pointer &Dst = S.Stk.peek<Pointer>();
2661 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2662 unsigned NumElems = VT->getNumElements();
2663 for (unsigned I = 0; I != NumElems; ++I) {
2664 APSInt Elem1;
2665 APSInt Elem2;
2667 Elem1 = LHS.elem<T>(I).toAPSInt();
2668 Elem2 = RHS.elem<T>(I).toAPSInt();
2669 });
2670
2671 APSInt Result;
2672 if (BuiltinID == Builtin::BI__builtin_elementwise_max) {
2673 Result = APSInt(std::max(Elem1, Elem2),
2674 Call->getType()->isUnsignedIntegerOrEnumerationType());
2675 } else if (BuiltinID == Builtin::BI__builtin_elementwise_min) {
2676 Result = APSInt(std::min(Elem1, Elem2),
2677 Call->getType()->isUnsignedIntegerOrEnumerationType());
2678 } else {
2679 llvm_unreachable("Wrong builtin ID");
2680 }
2681
2683 { Dst.elem<T>(I) = static_cast<T>(Result); });
2684 }
2685 Dst.initializeAllElements();
2686
2687 return true;
2688}
2689
2691 const CallExpr *Call,
2692 unsigned BuiltinID) {
2693 assert(Call->getArg(0)->getType()->isVectorType() &&
2694 Call->getArg(1)->getType()->isVectorType());
2695 const Pointer &RHS = S.Stk.pop<Pointer>();
2696 const Pointer &LHS = S.Stk.pop<Pointer>();
2697 const Pointer &Dst = S.Stk.peek<Pointer>();
2698
2699 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2700 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2701 unsigned SourceLen = VT->getNumElements();
2702
2703 PrimType DstElemT = *S.getContext().classify(
2704 Call->getType()->castAs<VectorType>()->getElementType());
2705 unsigned DstElem = 0;
2706 for (unsigned I = 0; I != SourceLen; I += 2) {
2707 APSInt Elem1;
2708 APSInt Elem2;
2710 Elem1 = LHS.elem<T>(I).toAPSInt();
2711 Elem2 = RHS.elem<T>(I).toAPSInt();
2712 });
2713
2714 APSInt Result;
2715 switch (BuiltinID) {
2716 case clang::X86::BI__builtin_ia32_pmuludq128:
2717 case clang::X86::BI__builtin_ia32_pmuludq256:
2718 case clang::X86::BI__builtin_ia32_pmuludq512:
2719 Result = APSInt(llvm::APIntOps::muluExtended(Elem1, Elem2),
2720 /*IsUnsigned=*/true);
2721 break;
2722 case clang::X86::BI__builtin_ia32_pmuldq128:
2723 case clang::X86::BI__builtin_ia32_pmuldq256:
2724 case clang::X86::BI__builtin_ia32_pmuldq512:
2725 Result = APSInt(llvm::APIntOps::mulsExtended(Elem1, Elem2),
2726 /*IsUnsigned=*/false);
2727 break;
2728 }
2729 INT_TYPE_SWITCH_NO_BOOL(DstElemT,
2730 { Dst.elem<T>(DstElem) = static_cast<T>(Result); });
2731 ++DstElem;
2732 }
2733
2734 Dst.initializeAllElements();
2735 return true;
2736}
2737
2739 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2740 llvm::function_ref<APFloat(const APFloat &, const APFloat &,
2741 const APFloat &, llvm::RoundingMode)>
2742 Fn) {
2743 assert(Call->getNumArgs() == 3);
2744
2745 FPOptions FPO = Call->getFPFeaturesInEffect(S.Ctx.getLangOpts());
2746 llvm::RoundingMode RM = getRoundingMode(FPO);
2747 const QualType Arg1Type = Call->getArg(0)->getType();
2748 const QualType Arg2Type = Call->getArg(1)->getType();
2749 const QualType Arg3Type = Call->getArg(2)->getType();
2750
2751 // Non-vector floating point types.
2752 if (!Arg1Type->isVectorType()) {
2753 assert(!Arg2Type->isVectorType());
2754 assert(!Arg3Type->isVectorType());
2755 (void)Arg2Type;
2756 (void)Arg3Type;
2757
2758 const Floating &Z = S.Stk.pop<Floating>();
2759 const Floating &Y = S.Stk.pop<Floating>();
2760 const Floating &X = S.Stk.pop<Floating>();
2761 APFloat F = Fn(X.getAPFloat(), Y.getAPFloat(), Z.getAPFloat(), RM);
2762 Floating Result = S.allocFloat(X.getSemantics());
2763 Result.copy(F);
2764 S.Stk.push<Floating>(Result);
2765 return true;
2766 }
2767
2768 // Vector type.
2769 assert(Arg1Type->isVectorType() && Arg2Type->isVectorType() &&
2770 Arg3Type->isVectorType());
2771
2772 const VectorType *VecT = Arg1Type->castAs<VectorType>();
2773 const QualType ElemT = VecT->getElementType();
2774 unsigned NumElems = VecT->getNumElements();
2775
2776 assert(ElemT == Arg2Type->castAs<VectorType>()->getElementType() &&
2777 ElemT == Arg3Type->castAs<VectorType>()->getElementType());
2778 assert(NumElems == Arg2Type->castAs<VectorType>()->getNumElements() &&
2779 NumElems == Arg3Type->castAs<VectorType>()->getNumElements());
2780 assert(ElemT->isRealFloatingType());
2781 (void)ElemT;
2782
2783 const Pointer &VZ = S.Stk.pop<Pointer>();
2784 const Pointer &VY = S.Stk.pop<Pointer>();
2785 const Pointer &VX = S.Stk.pop<Pointer>();
2786 const Pointer &Dst = S.Stk.peek<Pointer>();
2787 for (unsigned I = 0; I != NumElems; ++I) {
2788 using T = PrimConv<PT_Float>::T;
2789 APFloat X = VX.elem<T>(I).getAPFloat();
2790 APFloat Y = VY.elem<T>(I).getAPFloat();
2791 APFloat Z = VZ.elem<T>(I).getAPFloat();
2792 APFloat F = Fn(X, Y, Z, RM);
2793 Dst.elem<Floating>(I) = Floating(F);
2794 }
2796 return true;
2797}
2798
2799/// AVX512 predicated move: "Result = Mask[] ? LHS[] : RHS[]".
2801 const CallExpr *Call) {
2802 const Pointer &RHS = S.Stk.pop<Pointer>();
2803 const Pointer &LHS = S.Stk.pop<Pointer>();
2804 APSInt Mask = popToAPSInt(S, Call->getArg(0));
2805 const Pointer &Dst = S.Stk.peek<Pointer>();
2806
2807 assert(LHS.getNumElems() == RHS.getNumElems());
2808 assert(LHS.getNumElems() == Dst.getNumElems());
2809 unsigned NumElems = LHS.getNumElems();
2810 PrimType ElemT = LHS.getFieldDesc()->getPrimType();
2811 PrimType DstElemT = Dst.getFieldDesc()->getPrimType();
2812
2813 for (unsigned I = 0; I != NumElems; ++I) {
2814 if (ElemT == PT_Float) {
2815 assert(DstElemT == PT_Float);
2816 Dst.elem<Floating>(I) =
2817 Mask[I] ? LHS.elem<Floating>(I) : RHS.elem<Floating>(I);
2818 } else {
2819 APSInt Elem;
2820 INT_TYPE_SWITCH(ElemT, {
2821 Elem = Mask[I] ? LHS.elem<T>(I).toAPSInt() : RHS.elem<T>(I).toAPSInt();
2822 });
2823 INT_TYPE_SWITCH_NO_BOOL(DstElemT,
2824 { Dst.elem<T>(I) = static_cast<T>(Elem); });
2825 }
2826 }
2828
2829 return true;
2830}
2831
2833 const CallExpr *Call) {
2834 APSInt Mask = popToAPSInt(S, Call->getArg(2));
2835 const Pointer &TrueVec = S.Stk.pop<Pointer>();
2836 const Pointer &FalseVec = S.Stk.pop<Pointer>();
2837 const Pointer &Dst = S.Stk.peek<Pointer>();
2838
2839 assert(FalseVec.getNumElems() == TrueVec.getNumElems());
2840 assert(FalseVec.getNumElems() == Dst.getNumElems());
2841 unsigned NumElems = FalseVec.getNumElems();
2842 PrimType ElemT = FalseVec.getFieldDesc()->getPrimType();
2843 PrimType DstElemT = Dst.getFieldDesc()->getPrimType();
2844
2845 for (unsigned I = 0; I != NumElems; ++I) {
2846 bool MaskBit = Mask[I % 8];
2847 if (ElemT == PT_Float) {
2848 assert(DstElemT == PT_Float);
2849 Dst.elem<Floating>(I) =
2850 MaskBit ? TrueVec.elem<Floating>(I) : FalseVec.elem<Floating>(I);
2851 } else {
2852 assert(DstElemT == ElemT);
2853 INT_TYPE_SWITCH_NO_BOOL(DstElemT, {
2854 Dst.elem<T>(I) =
2855 static_cast<T>(MaskBit ? TrueVec.elem<T>(I).toAPSInt()
2856 : FalseVec.elem<T>(I).toAPSInt());
2857 });
2858 }
2859 }
2860 Dst.initializeAllElements();
2861
2862 return true;
2863}
2864
2866 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2867 llvm::function_ref<APInt(const APSInt &, const APSInt &, const APSInt &)>
2868 Fn) {
2869 assert(Call->getNumArgs() == 3);
2870
2871 QualType Arg0Type = Call->getArg(0)->getType();
2872 QualType Arg2Type = Call->getArg(2)->getType();
2873 // Non-vector integer types.
2874 if (!Arg0Type->isVectorType()) {
2875 const APSInt &Op2 = popToAPSInt(S, Arg2Type);
2876 const APSInt &Op1 = popToAPSInt(S, Call->getArg(1));
2877 const APSInt &Op0 = popToAPSInt(S, Arg0Type);
2878 APSInt Result = APSInt(Fn(Op0, Op1, Op2), Op0.isUnsigned());
2879 pushInteger(S, Result, Call->getType());
2880 return true;
2881 }
2882
2883 const auto *VecT = Arg0Type->castAs<VectorType>();
2884 const PrimType &ElemT = *S.getContext().classify(VecT->getElementType());
2885 unsigned NumElems = VecT->getNumElements();
2886 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2887
2888 // Vector + Vector + Scalar case.
2889 if (!Arg2Type->isVectorType()) {
2890 APSInt Op2 = popToAPSInt(S, Arg2Type);
2891
2892 const Pointer &Op1 = S.Stk.pop<Pointer>();
2893 const Pointer &Op0 = S.Stk.pop<Pointer>();
2894 const Pointer &Dst = S.Stk.peek<Pointer>();
2895 for (unsigned I = 0; I != NumElems; ++I) {
2897 Dst.elem<T>(I) = static_cast<T>(APSInt(
2898 Fn(Op0.elem<T>(I).toAPSInt(), Op1.elem<T>(I).toAPSInt(), Op2),
2899 DestUnsigned));
2900 });
2901 }
2903
2904 return true;
2905 }
2906
2907 // Vector type.
2908 const Pointer &Op2 = S.Stk.pop<Pointer>();
2909 const Pointer &Op1 = S.Stk.pop<Pointer>();
2910 const Pointer &Op0 = S.Stk.pop<Pointer>();
2911 const Pointer &Dst = S.Stk.peek<Pointer>();
2912 for (unsigned I = 0; I != NumElems; ++I) {
2913 APSInt Val0, Val1, Val2;
2915 Val0 = Op0.elem<T>(I).toAPSInt();
2916 Val1 = Op1.elem<T>(I).toAPSInt();
2917 Val2 = Op2.elem<T>(I).toAPSInt();
2918 });
2919 APSInt Result = APSInt(Fn(Val0, Val1, Val2), Val0.isUnsigned());
2921 { Dst.elem<T>(I) = static_cast<T>(Result); });
2922 }
2924
2925 return true;
2926}
2927
2929 const CallExpr *Call,
2930 unsigned ID) {
2931 assert(Call->getNumArgs() == 3);
2932
2933 APSInt ImmAPS = popToAPSInt(S, Call->getArg(2));
2934 uint64_t Index = ImmAPS.getZExtValue();
2935
2936 const Pointer &SubVec = S.Stk.pop<Pointer>();
2937 if (!SubVec.getFieldDesc()->isPrimitiveArray())
2938 return false;
2939
2940 const Pointer &BaseVec = S.Stk.pop<Pointer>();
2941 if (!BaseVec.getFieldDesc()->isPrimitiveArray())
2942 return false;
2943
2944 const Pointer &Dst = S.Stk.peek<Pointer>();
2945
2946 unsigned BaseElements = BaseVec.getNumElems();
2947 unsigned SubElements = SubVec.getNumElems();
2948
2949 assert(SubElements != 0 && BaseElements != 0 &&
2950 (BaseElements % SubElements) == 0);
2951
2952 unsigned NumLanes = BaseElements / SubElements;
2953 unsigned Lane = static_cast<unsigned>(Index % NumLanes);
2954 unsigned InsertPos = Lane * SubElements;
2955
2956 PrimType ElemPT = BaseVec.getFieldDesc()->getPrimType();
2957
2958 TYPE_SWITCH(ElemPT, {
2959 for (unsigned I = 0; I != BaseElements; ++I)
2960 Dst.elem<T>(I) = BaseVec.elem<T>(I);
2961 for (unsigned I = 0; I != SubElements; ++I)
2962 Dst.elem<T>(InsertPos + I) = SubVec.elem<T>(I);
2963 });
2964
2966
2967 return true;
2968}
2969
2971 uint32_t BuiltinID) {
2972 if (!S.getASTContext().BuiltinInfo.isConstantEvaluated(BuiltinID))
2973 return Invalid(S, OpPC);
2974
2975 const InterpFrame *Frame = S.Current;
2976 switch (BuiltinID) {
2977 case Builtin::BI__builtin_is_constant_evaluated:
2979
2980 case Builtin::BI__builtin_assume:
2981 case Builtin::BI__assume:
2982 return interp__builtin_assume(S, OpPC, Frame, Call);
2983
2984 case Builtin::BI__builtin_strcmp:
2985 case Builtin::BIstrcmp:
2986 case Builtin::BI__builtin_strncmp:
2987 case Builtin::BIstrncmp:
2988 case Builtin::BI__builtin_wcsncmp:
2989 case Builtin::BIwcsncmp:
2990 case Builtin::BI__builtin_wcscmp:
2991 case Builtin::BIwcscmp:
2992 return interp__builtin_strcmp(S, OpPC, Frame, Call, BuiltinID);
2993
2994 case Builtin::BI__builtin_strlen:
2995 case Builtin::BIstrlen:
2996 case Builtin::BI__builtin_wcslen:
2997 case Builtin::BIwcslen:
2998 return interp__builtin_strlen(S, OpPC, Frame, Call, BuiltinID);
2999
3000 case Builtin::BI__builtin_nan:
3001 case Builtin::BI__builtin_nanf:
3002 case Builtin::BI__builtin_nanl:
3003 case Builtin::BI__builtin_nanf16:
3004 case Builtin::BI__builtin_nanf128:
3005 return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/false);
3006
3007 case Builtin::BI__builtin_nans:
3008 case Builtin::BI__builtin_nansf:
3009 case Builtin::BI__builtin_nansl:
3010 case Builtin::BI__builtin_nansf16:
3011 case Builtin::BI__builtin_nansf128:
3012 return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/true);
3013
3014 case Builtin::BI__builtin_huge_val:
3015 case Builtin::BI__builtin_huge_valf:
3016 case Builtin::BI__builtin_huge_vall:
3017 case Builtin::BI__builtin_huge_valf16:
3018 case Builtin::BI__builtin_huge_valf128:
3019 case Builtin::BI__builtin_inf:
3020 case Builtin::BI__builtin_inff:
3021 case Builtin::BI__builtin_infl:
3022 case Builtin::BI__builtin_inff16:
3023 case Builtin::BI__builtin_inff128:
3024 return interp__builtin_inf(S, OpPC, Frame, Call);
3025
3026 case Builtin::BI__builtin_copysign:
3027 case Builtin::BI__builtin_copysignf:
3028 case Builtin::BI__builtin_copysignl:
3029 case Builtin::BI__builtin_copysignf128:
3030 return interp__builtin_copysign(S, OpPC, Frame);
3031
3032 case Builtin::BI__builtin_fmin:
3033 case Builtin::BI__builtin_fminf:
3034 case Builtin::BI__builtin_fminl:
3035 case Builtin::BI__builtin_fminf16:
3036 case Builtin::BI__builtin_fminf128:
3037 return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/false);
3038
3039 case Builtin::BI__builtin_fminimum_num:
3040 case Builtin::BI__builtin_fminimum_numf:
3041 case Builtin::BI__builtin_fminimum_numl:
3042 case Builtin::BI__builtin_fminimum_numf16:
3043 case Builtin::BI__builtin_fminimum_numf128:
3044 return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/true);
3045
3046 case Builtin::BI__builtin_fmax:
3047 case Builtin::BI__builtin_fmaxf:
3048 case Builtin::BI__builtin_fmaxl:
3049 case Builtin::BI__builtin_fmaxf16:
3050 case Builtin::BI__builtin_fmaxf128:
3051 return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/false);
3052
3053 case Builtin::BI__builtin_fmaximum_num:
3054 case Builtin::BI__builtin_fmaximum_numf:
3055 case Builtin::BI__builtin_fmaximum_numl:
3056 case Builtin::BI__builtin_fmaximum_numf16:
3057 case Builtin::BI__builtin_fmaximum_numf128:
3058 return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/true);
3059
3060 case Builtin::BI__builtin_isnan:
3061 return interp__builtin_isnan(S, OpPC, Frame, Call);
3062
3063 case Builtin::BI__builtin_issignaling:
3064 return interp__builtin_issignaling(S, OpPC, Frame, Call);
3065
3066 case Builtin::BI__builtin_isinf:
3067 return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/false, Call);
3068
3069 case Builtin::BI__builtin_isinf_sign:
3070 return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/true, Call);
3071
3072 case Builtin::BI__builtin_isfinite:
3073 return interp__builtin_isfinite(S, OpPC, Frame, Call);
3074
3075 case Builtin::BI__builtin_isnormal:
3076 return interp__builtin_isnormal(S, OpPC, Frame, Call);
3077
3078 case Builtin::BI__builtin_issubnormal:
3079 return interp__builtin_issubnormal(S, OpPC, Frame, Call);
3080
3081 case Builtin::BI__builtin_iszero:
3082 return interp__builtin_iszero(S, OpPC, Frame, Call);
3083
3084 case Builtin::BI__builtin_signbit:
3085 case Builtin::BI__builtin_signbitf:
3086 case Builtin::BI__builtin_signbitl:
3087 return interp__builtin_signbit(S, OpPC, Frame, Call);
3088
3089 case Builtin::BI__builtin_isgreater:
3090 case Builtin::BI__builtin_isgreaterequal:
3091 case Builtin::BI__builtin_isless:
3092 case Builtin::BI__builtin_islessequal:
3093 case Builtin::BI__builtin_islessgreater:
3094 case Builtin::BI__builtin_isunordered:
3095 return interp_floating_comparison(S, OpPC, Call, BuiltinID);
3096
3097 case Builtin::BI__builtin_isfpclass:
3098 return interp__builtin_isfpclass(S, OpPC, Frame, Call);
3099
3100 case Builtin::BI__builtin_fpclassify:
3101 return interp__builtin_fpclassify(S, OpPC, Frame, Call);
3102
3103 case Builtin::BI__builtin_fabs:
3104 case Builtin::BI__builtin_fabsf:
3105 case Builtin::BI__builtin_fabsl:
3106 case Builtin::BI__builtin_fabsf128:
3107 return interp__builtin_fabs(S, OpPC, Frame);
3108
3109 case Builtin::BI__builtin_abs:
3110 case Builtin::BI__builtin_labs:
3111 case Builtin::BI__builtin_llabs:
3112 return interp__builtin_abs(S, OpPC, Frame, Call);
3113
3114 case Builtin::BI__builtin_popcount:
3115 case Builtin::BI__builtin_popcountl:
3116 case Builtin::BI__builtin_popcountll:
3117 case Builtin::BI__builtin_popcountg:
3118 case Builtin::BI__popcnt16: // Microsoft variants of popcount
3119 case Builtin::BI__popcnt:
3120 case Builtin::BI__popcnt64:
3121 return interp__builtin_popcount(S, OpPC, Frame, Call);
3122
3123 case Builtin::BI__builtin_parity:
3124 case Builtin::BI__builtin_parityl:
3125 case Builtin::BI__builtin_parityll:
3126 return interp__builtin_parity(S, OpPC, Frame, Call);
3127
3128 case Builtin::BI__builtin_clrsb:
3129 case Builtin::BI__builtin_clrsbl:
3130 case Builtin::BI__builtin_clrsbll:
3131 return interp__builtin_clrsb(S, OpPC, Frame, Call);
3132
3133 case Builtin::BI__builtin_bitreverse8:
3134 case Builtin::BI__builtin_bitreverse16:
3135 case Builtin::BI__builtin_bitreverse32:
3136 case Builtin::BI__builtin_bitreverse64:
3137 return interp__builtin_bitreverse(S, OpPC, Frame, Call);
3138
3139 case Builtin::BI__builtin_classify_type:
3140 return interp__builtin_classify_type(S, OpPC, Frame, Call);
3141
3142 case Builtin::BI__builtin_expect:
3143 case Builtin::BI__builtin_expect_with_probability:
3144 return interp__builtin_expect(S, OpPC, Frame, Call);
3145
3146 case Builtin::BI__builtin_rotateleft8:
3147 case Builtin::BI__builtin_rotateleft16:
3148 case Builtin::BI__builtin_rotateleft32:
3149 case Builtin::BI__builtin_rotateleft64:
3150 case Builtin::BI_rotl8: // Microsoft variants of rotate left
3151 case Builtin::BI_rotl16:
3152 case Builtin::BI_rotl:
3153 case Builtin::BI_lrotl:
3154 case Builtin::BI_rotl64:
3155 return interp__builtin_rotate(S, OpPC, Frame, Call, /*Right=*/false);
3156
3157 case Builtin::BI__builtin_rotateright8:
3158 case Builtin::BI__builtin_rotateright16:
3159 case Builtin::BI__builtin_rotateright32:
3160 case Builtin::BI__builtin_rotateright64:
3161 case Builtin::BI_rotr8: // Microsoft variants of rotate right
3162 case Builtin::BI_rotr16:
3163 case Builtin::BI_rotr:
3164 case Builtin::BI_lrotr:
3165 case Builtin::BI_rotr64:
3166 return interp__builtin_rotate(S, OpPC, Frame, Call, /*Right=*/true);
3167
3168 case Builtin::BI__builtin_ffs:
3169 case Builtin::BI__builtin_ffsl:
3170 case Builtin::BI__builtin_ffsll:
3171 return interp__builtin_ffs(S, OpPC, Frame, Call);
3172
3173 case Builtin::BIaddressof:
3174 case Builtin::BI__addressof:
3175 case Builtin::BI__builtin_addressof:
3176 assert(isNoopBuiltin(BuiltinID));
3177 return interp__builtin_addressof(S, OpPC, Frame, Call);
3178
3179 case Builtin::BIas_const:
3180 case Builtin::BIforward:
3181 case Builtin::BIforward_like:
3182 case Builtin::BImove:
3183 case Builtin::BImove_if_noexcept:
3184 assert(isNoopBuiltin(BuiltinID));
3185 return interp__builtin_move(S, OpPC, Frame, Call);
3186
3187 case Builtin::BI__builtin_eh_return_data_regno:
3189
3190 case Builtin::BI__builtin_launder:
3191 assert(isNoopBuiltin(BuiltinID));
3192 return true;
3193
3194 case Builtin::BI__builtin_add_overflow:
3195 case Builtin::BI__builtin_sub_overflow:
3196 case Builtin::BI__builtin_mul_overflow:
3197 case Builtin::BI__builtin_sadd_overflow:
3198 case Builtin::BI__builtin_uadd_overflow:
3199 case Builtin::BI__builtin_uaddl_overflow:
3200 case Builtin::BI__builtin_uaddll_overflow:
3201 case Builtin::BI__builtin_usub_overflow:
3202 case Builtin::BI__builtin_usubl_overflow:
3203 case Builtin::BI__builtin_usubll_overflow:
3204 case Builtin::BI__builtin_umul_overflow:
3205 case Builtin::BI__builtin_umull_overflow:
3206 case Builtin::BI__builtin_umulll_overflow:
3207 case Builtin::BI__builtin_saddl_overflow:
3208 case Builtin::BI__builtin_saddll_overflow:
3209 case Builtin::BI__builtin_ssub_overflow:
3210 case Builtin::BI__builtin_ssubl_overflow:
3211 case Builtin::BI__builtin_ssubll_overflow:
3212 case Builtin::BI__builtin_smul_overflow:
3213 case Builtin::BI__builtin_smull_overflow:
3214 case Builtin::BI__builtin_smulll_overflow:
3215 return interp__builtin_overflowop(S, OpPC, Call, BuiltinID);
3216
3217 case Builtin::BI__builtin_addcb:
3218 case Builtin::BI__builtin_addcs:
3219 case Builtin::BI__builtin_addc:
3220 case Builtin::BI__builtin_addcl:
3221 case Builtin::BI__builtin_addcll:
3222 case Builtin::BI__builtin_subcb:
3223 case Builtin::BI__builtin_subcs:
3224 case Builtin::BI__builtin_subc:
3225 case Builtin::BI__builtin_subcl:
3226 case Builtin::BI__builtin_subcll:
3227 return interp__builtin_carryop(S, OpPC, Frame, Call, BuiltinID);
3228
3229 case Builtin::BI__builtin_clz:
3230 case Builtin::BI__builtin_clzl:
3231 case Builtin::BI__builtin_clzll:
3232 case Builtin::BI__builtin_clzs:
3233 case Builtin::BI__builtin_clzg:
3234 case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes
3235 case Builtin::BI__lzcnt:
3236 case Builtin::BI__lzcnt64:
3237 return interp__builtin_clz(S, OpPC, Frame, Call, BuiltinID);
3238
3239 case Builtin::BI__builtin_ctz:
3240 case Builtin::BI__builtin_ctzl:
3241 case Builtin::BI__builtin_ctzll:
3242 case Builtin::BI__builtin_ctzs:
3243 case Builtin::BI__builtin_ctzg:
3244 return interp__builtin_ctz(S, OpPC, Frame, Call, BuiltinID);
3245
3246 case Builtin::BI__builtin_elementwise_clzg:
3247 case Builtin::BI__builtin_elementwise_ctzg:
3249 BuiltinID);
3250
3251 case Builtin::BI__builtin_bswap16:
3252 case Builtin::BI__builtin_bswap32:
3253 case Builtin::BI__builtin_bswap64:
3254 return interp__builtin_bswap(S, OpPC, Frame, Call);
3255
3256 case Builtin::BI__atomic_always_lock_free:
3257 case Builtin::BI__atomic_is_lock_free:
3258 return interp__builtin_atomic_lock_free(S, OpPC, Frame, Call, BuiltinID);
3259
3260 case Builtin::BI__c11_atomic_is_lock_free:
3262
3263 case Builtin::BI__builtin_complex:
3264 return interp__builtin_complex(S, OpPC, Frame, Call);
3265
3266 case Builtin::BI__builtin_is_aligned:
3267 case Builtin::BI__builtin_align_up:
3268 case Builtin::BI__builtin_align_down:
3269 return interp__builtin_is_aligned_up_down(S, OpPC, Frame, Call, BuiltinID);
3270
3271 case Builtin::BI__builtin_assume_aligned:
3272 return interp__builtin_assume_aligned(S, OpPC, Frame, Call);
3273
3274 case clang::X86::BI__builtin_ia32_bextr_u32:
3275 case clang::X86::BI__builtin_ia32_bextr_u64:
3276 case clang::X86::BI__builtin_ia32_bextri_u32:
3277 case clang::X86::BI__builtin_ia32_bextri_u64:
3278 return interp__builtin_ia32_bextr(S, OpPC, Frame, Call);
3279
3280 case clang::X86::BI__builtin_ia32_bzhi_si:
3281 case clang::X86::BI__builtin_ia32_bzhi_di:
3282 return interp__builtin_ia32_bzhi(S, OpPC, Frame, Call);
3283
3284 case clang::X86::BI__builtin_ia32_lzcnt_u16:
3285 case clang::X86::BI__builtin_ia32_lzcnt_u32:
3286 case clang::X86::BI__builtin_ia32_lzcnt_u64:
3288 S, OpPC, Call, [](const APSInt &Src) {
3289 return APInt(Src.getBitWidth(), Src.countLeadingZeros());
3290 });
3291
3292 case clang::X86::BI__builtin_ia32_tzcnt_u16:
3293 case clang::X86::BI__builtin_ia32_tzcnt_u32:
3294 case clang::X86::BI__builtin_ia32_tzcnt_u64:
3296 S, OpPC, Call, [](const APSInt &Src) {
3297 return APInt(Src.getBitWidth(), Src.countTrailingZeros());
3298 });
3299
3300 case clang::X86::BI__builtin_ia32_pdep_si:
3301 case clang::X86::BI__builtin_ia32_pdep_di:
3302 return interp__builtin_ia32_pdep(S, OpPC, Frame, Call);
3303
3304 case clang::X86::BI__builtin_ia32_pext_si:
3305 case clang::X86::BI__builtin_ia32_pext_di:
3306 return interp__builtin_ia32_pext(S, OpPC, Frame, Call);
3307
3308 case clang::X86::BI__builtin_ia32_addcarryx_u32:
3309 case clang::X86::BI__builtin_ia32_addcarryx_u64:
3310 case clang::X86::BI__builtin_ia32_subborrow_u32:
3311 case clang::X86::BI__builtin_ia32_subborrow_u64:
3313 BuiltinID);
3314
3315 case Builtin::BI__builtin_os_log_format_buffer_size:
3317
3318 case Builtin::BI__builtin_ptrauth_string_discriminator:
3320
3321 case Builtin::BI__noop:
3322 pushInteger(S, 0, Call->getType());
3323 return true;
3324
3325 case Builtin::BI__builtin_operator_new:
3326 return interp__builtin_operator_new(S, OpPC, Frame, Call);
3327
3328 case Builtin::BI__builtin_operator_delete:
3329 return interp__builtin_operator_delete(S, OpPC, Frame, Call);
3330
3331 case Builtin::BI__arithmetic_fence:
3333
3334 case Builtin::BI__builtin_reduce_add:
3335 case Builtin::BI__builtin_reduce_mul:
3336 case Builtin::BI__builtin_reduce_and:
3337 case Builtin::BI__builtin_reduce_or:
3338 case Builtin::BI__builtin_reduce_xor:
3339 case Builtin::BI__builtin_reduce_min:
3340 case Builtin::BI__builtin_reduce_max:
3341 return interp__builtin_vector_reduce(S, OpPC, Call, BuiltinID);
3342
3343 case Builtin::BI__builtin_elementwise_popcount:
3344 case Builtin::BI__builtin_elementwise_bitreverse:
3346 BuiltinID);
3347
3348 case Builtin::BI__builtin_elementwise_abs:
3349 return interp__builtin_elementwise_abs(S, OpPC, Frame, Call, BuiltinID);
3350
3351 case Builtin::BI__builtin_memcpy:
3352 case Builtin::BImemcpy:
3353 case Builtin::BI__builtin_wmemcpy:
3354 case Builtin::BIwmemcpy:
3355 case Builtin::BI__builtin_memmove:
3356 case Builtin::BImemmove:
3357 case Builtin::BI__builtin_wmemmove:
3358 case Builtin::BIwmemmove:
3359 return interp__builtin_memcpy(S, OpPC, Frame, Call, BuiltinID);
3360
3361 case Builtin::BI__builtin_memcmp:
3362 case Builtin::BImemcmp:
3363 case Builtin::BI__builtin_bcmp:
3364 case Builtin::BIbcmp:
3365 case Builtin::BI__builtin_wmemcmp:
3366 case Builtin::BIwmemcmp:
3367 return interp__builtin_memcmp(S, OpPC, Frame, Call, BuiltinID);
3368
3369 case Builtin::BImemchr:
3370 case Builtin::BI__builtin_memchr:
3371 case Builtin::BIstrchr:
3372 case Builtin::BI__builtin_strchr:
3373 case Builtin::BIwmemchr:
3374 case Builtin::BI__builtin_wmemchr:
3375 case Builtin::BIwcschr:
3376 case Builtin::BI__builtin_wcschr:
3377 case Builtin::BI__builtin_char_memchr:
3378 return interp__builtin_memchr(S, OpPC, Call, BuiltinID);
3379
3380 case Builtin::BI__builtin_object_size:
3381 case Builtin::BI__builtin_dynamic_object_size:
3382 return interp__builtin_object_size(S, OpPC, Frame, Call);
3383
3384 case Builtin::BI__builtin_is_within_lifetime:
3386
3387 case Builtin::BI__builtin_elementwise_add_sat:
3389 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
3390 return LHS.isSigned() ? LHS.sadd_sat(RHS) : LHS.uadd_sat(RHS);
3391 });
3392
3393 case Builtin::BI__builtin_elementwise_sub_sat:
3395 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
3396 return LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
3397 });
3398
3399 case clang::X86::BI__builtin_ia32_pavgb128:
3400 case clang::X86::BI__builtin_ia32_pavgw128:
3401 case clang::X86::BI__builtin_ia32_pavgb256:
3402 case clang::X86::BI__builtin_ia32_pavgw256:
3403 case clang::X86::BI__builtin_ia32_pavgb512:
3404 case clang::X86::BI__builtin_ia32_pavgw512:
3406 llvm::APIntOps::avgCeilU);
3407
3408 case clang::X86::BI__builtin_ia32_pmulhuw128:
3409 case clang::X86::BI__builtin_ia32_pmulhuw256:
3410 case clang::X86::BI__builtin_ia32_pmulhuw512:
3412 llvm::APIntOps::mulhu);
3413
3414 case clang::X86::BI__builtin_ia32_pmulhw128:
3415 case clang::X86::BI__builtin_ia32_pmulhw256:
3416 case clang::X86::BI__builtin_ia32_pmulhw512:
3418 llvm::APIntOps::mulhs);
3419
3420 case clang::X86::BI__builtin_ia32_psllv2di:
3421 case clang::X86::BI__builtin_ia32_psllv4di:
3422 case clang::X86::BI__builtin_ia32_psllv4si:
3423 case clang::X86::BI__builtin_ia32_psllv8di:
3424 case clang::X86::BI__builtin_ia32_psllv8hi:
3425 case clang::X86::BI__builtin_ia32_psllv8si:
3426 case clang::X86::BI__builtin_ia32_psllv16hi:
3427 case clang::X86::BI__builtin_ia32_psllv16si:
3428 case clang::X86::BI__builtin_ia32_psllv32hi:
3429 case clang::X86::BI__builtin_ia32_psllwi128:
3430 case clang::X86::BI__builtin_ia32_psllwi256:
3431 case clang::X86::BI__builtin_ia32_psllwi512:
3432 case clang::X86::BI__builtin_ia32_pslldi128:
3433 case clang::X86::BI__builtin_ia32_pslldi256:
3434 case clang::X86::BI__builtin_ia32_pslldi512:
3435 case clang::X86::BI__builtin_ia32_psllqi128:
3436 case clang::X86::BI__builtin_ia32_psllqi256:
3437 case clang::X86::BI__builtin_ia32_psllqi512:
3439 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
3440 if (RHS.uge(LHS.getBitWidth())) {
3441 return APInt::getZero(LHS.getBitWidth());
3442 }
3443 return LHS.shl(RHS.getZExtValue());
3444 });
3445
3446 case clang::X86::BI__builtin_ia32_psrav4si:
3447 case clang::X86::BI__builtin_ia32_psrav8di:
3448 case clang::X86::BI__builtin_ia32_psrav8hi:
3449 case clang::X86::BI__builtin_ia32_psrav8si:
3450 case clang::X86::BI__builtin_ia32_psrav16hi:
3451 case clang::X86::BI__builtin_ia32_psrav16si:
3452 case clang::X86::BI__builtin_ia32_psrav32hi:
3453 case clang::X86::BI__builtin_ia32_psravq128:
3454 case clang::X86::BI__builtin_ia32_psravq256:
3455 case clang::X86::BI__builtin_ia32_psrawi128:
3456 case clang::X86::BI__builtin_ia32_psrawi256:
3457 case clang::X86::BI__builtin_ia32_psrawi512:
3458 case clang::X86::BI__builtin_ia32_psradi128:
3459 case clang::X86::BI__builtin_ia32_psradi256:
3460 case clang::X86::BI__builtin_ia32_psradi512:
3461 case clang::X86::BI__builtin_ia32_psraqi128:
3462 case clang::X86::BI__builtin_ia32_psraqi256:
3463 case clang::X86::BI__builtin_ia32_psraqi512:
3465 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
3466 if (RHS.uge(LHS.getBitWidth())) {
3467 return LHS.ashr(LHS.getBitWidth() - 1);
3468 }
3469 return LHS.ashr(RHS.getZExtValue());
3470 });
3471
3472 case clang::X86::BI__builtin_ia32_psrlv2di:
3473 case clang::X86::BI__builtin_ia32_psrlv4di:
3474 case clang::X86::BI__builtin_ia32_psrlv4si:
3475 case clang::X86::BI__builtin_ia32_psrlv8di:
3476 case clang::X86::BI__builtin_ia32_psrlv8hi:
3477 case clang::X86::BI__builtin_ia32_psrlv8si:
3478 case clang::X86::BI__builtin_ia32_psrlv16hi:
3479 case clang::X86::BI__builtin_ia32_psrlv16si:
3480 case clang::X86::BI__builtin_ia32_psrlv32hi:
3481 case clang::X86::BI__builtin_ia32_psrlwi128:
3482 case clang::X86::BI__builtin_ia32_psrlwi256:
3483 case clang::X86::BI__builtin_ia32_psrlwi512:
3484 case clang::X86::BI__builtin_ia32_psrldi128:
3485 case clang::X86::BI__builtin_ia32_psrldi256:
3486 case clang::X86::BI__builtin_ia32_psrldi512:
3487 case clang::X86::BI__builtin_ia32_psrlqi128:
3488 case clang::X86::BI__builtin_ia32_psrlqi256:
3489 case clang::X86::BI__builtin_ia32_psrlqi512:
3491 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
3492 if (RHS.uge(LHS.getBitWidth())) {
3493 return APInt::getZero(LHS.getBitWidth());
3494 }
3495 return LHS.lshr(RHS.getZExtValue());
3496 });
3497 case clang::X86::BI__builtin_ia32_packsswb128:
3498 case clang::X86::BI__builtin_ia32_packsswb256:
3499 case clang::X86::BI__builtin_ia32_packsswb512:
3500 case clang::X86::BI__builtin_ia32_packssdw128:
3501 case clang::X86::BI__builtin_ia32_packssdw256:
3502 case clang::X86::BI__builtin_ia32_packssdw512:
3503 return interp__builtin_x86_pack(S, OpPC, Call, [](const APSInt &Src) {
3504 return APInt(Src).truncSSat(Src.getBitWidth() / 2);
3505 });
3506 case clang::X86::BI__builtin_ia32_packusdw128:
3507 case clang::X86::BI__builtin_ia32_packusdw256:
3508 case clang::X86::BI__builtin_ia32_packusdw512:
3509 case clang::X86::BI__builtin_ia32_packuswb128:
3510 case clang::X86::BI__builtin_ia32_packuswb256:
3511 case clang::X86::BI__builtin_ia32_packuswb512:
3512 return interp__builtin_x86_pack(S, OpPC, Call, [](const APSInt &Src) {
3513 unsigned DstBits = Src.getBitWidth() / 2;
3514 if (Src.isNegative())
3515 return APInt::getZero(DstBits);
3516 if (Src.isIntN(DstBits))
3517 return APInt(Src).trunc(DstBits);
3518 return APInt::getAllOnes(DstBits);
3519 });
3520
3521 case clang::X86::BI__builtin_ia32_vprotbi:
3522 case clang::X86::BI__builtin_ia32_vprotdi:
3523 case clang::X86::BI__builtin_ia32_vprotqi:
3524 case clang::X86::BI__builtin_ia32_vprotwi:
3525 case clang::X86::BI__builtin_ia32_prold128:
3526 case clang::X86::BI__builtin_ia32_prold256:
3527 case clang::X86::BI__builtin_ia32_prold512:
3528 case clang::X86::BI__builtin_ia32_prolq128:
3529 case clang::X86::BI__builtin_ia32_prolq256:
3530 case clang::X86::BI__builtin_ia32_prolq512:
3532 S, OpPC, Call,
3533 [](const APSInt &LHS, const APSInt &RHS) { return LHS.rotl(RHS); });
3534
3535 case clang::X86::BI__builtin_ia32_prord128:
3536 case clang::X86::BI__builtin_ia32_prord256:
3537 case clang::X86::BI__builtin_ia32_prord512:
3538 case clang::X86::BI__builtin_ia32_prorq128:
3539 case clang::X86::BI__builtin_ia32_prorq256:
3540 case clang::X86::BI__builtin_ia32_prorq512:
3542 S, OpPC, Call,
3543 [](const APSInt &LHS, const APSInt &RHS) { return LHS.rotr(RHS); });
3544
3545 case Builtin::BI__builtin_elementwise_max:
3546 case Builtin::BI__builtin_elementwise_min:
3547 return interp__builtin_elementwise_maxmin(S, OpPC, Call, BuiltinID);
3548
3549 case clang::X86::BI__builtin_ia32_pmuldq128:
3550 case clang::X86::BI__builtin_ia32_pmuldq256:
3551 case clang::X86::BI__builtin_ia32_pmuldq512:
3552 case clang::X86::BI__builtin_ia32_pmuludq128:
3553 case clang::X86::BI__builtin_ia32_pmuludq256:
3554 case clang::X86::BI__builtin_ia32_pmuludq512:
3555 return interp__builtin_ia32_pmul(S, OpPC, Call, BuiltinID);
3556
3557 case Builtin::BI__builtin_elementwise_fma:
3559 S, OpPC, Call,
3560 [](const APFloat &X, const APFloat &Y, const APFloat &Z,
3561 llvm::RoundingMode RM) {
3562 APFloat F = X;
3563 F.fusedMultiplyAdd(Y, Z, RM);
3564 return F;
3565 });
3566
3567 case X86::BI__builtin_ia32_vpshldd128:
3568 case X86::BI__builtin_ia32_vpshldd256:
3569 case X86::BI__builtin_ia32_vpshldd512:
3570 case X86::BI__builtin_ia32_vpshldq128:
3571 case X86::BI__builtin_ia32_vpshldq256:
3572 case X86::BI__builtin_ia32_vpshldq512:
3573 case X86::BI__builtin_ia32_vpshldw128:
3574 case X86::BI__builtin_ia32_vpshldw256:
3575 case X86::BI__builtin_ia32_vpshldw512:
3577 S, OpPC, Call,
3578 [](const APSInt &Hi, const APSInt &Lo, const APSInt &Amt) {
3579 return llvm::APIntOps::fshl(Hi, Lo, Amt);
3580 });
3581
3582 case X86::BI__builtin_ia32_vpshrdd128:
3583 case X86::BI__builtin_ia32_vpshrdd256:
3584 case X86::BI__builtin_ia32_vpshrdd512:
3585 case X86::BI__builtin_ia32_vpshrdq128:
3586 case X86::BI__builtin_ia32_vpshrdq256:
3587 case X86::BI__builtin_ia32_vpshrdq512:
3588 case X86::BI__builtin_ia32_vpshrdw128:
3589 case X86::BI__builtin_ia32_vpshrdw256:
3590 case X86::BI__builtin_ia32_vpshrdw512:
3591 // NOTE: Reversed Hi/Lo operands.
3593 S, OpPC, Call,
3594 [](const APSInt &Lo, const APSInt &Hi, const APSInt &Amt) {
3595 return llvm::APIntOps::fshr(Hi, Lo, Amt);
3596 });
3597
3598 case clang::X86::BI__builtin_ia32_blendpd:
3599 case clang::X86::BI__builtin_ia32_blendpd256:
3600 case clang::X86::BI__builtin_ia32_blendps:
3601 case clang::X86::BI__builtin_ia32_blendps256:
3602 case clang::X86::BI__builtin_ia32_pblendw128:
3603 case clang::X86::BI__builtin_ia32_pblendw256:
3604 case clang::X86::BI__builtin_ia32_pblendd128:
3605 case clang::X86::BI__builtin_ia32_pblendd256:
3606 return interp__builtin_blend(S, OpPC, Call);
3607
3608 case clang::X86::BI__builtin_ia32_blendvpd:
3609 case clang::X86::BI__builtin_ia32_blendvpd256:
3610 case clang::X86::BI__builtin_ia32_blendvps:
3611 case clang::X86::BI__builtin_ia32_blendvps256:
3613 S, OpPC, Call,
3614 [](const APFloat &F, const APFloat &T, const APFloat &C,
3615 llvm::RoundingMode) { return C.isNegative() ? T : F; });
3616
3617 case clang::X86::BI__builtin_ia32_pblendvb128:
3618 case clang::X86::BI__builtin_ia32_pblendvb256:
3620 S, OpPC, Call, [](const APSInt &F, const APSInt &T, const APSInt &C) {
3621 return ((APInt)C).isNegative() ? T : F;
3622 });
3623
3624 case X86::BI__builtin_ia32_selectb_128:
3625 case X86::BI__builtin_ia32_selectb_256:
3626 case X86::BI__builtin_ia32_selectb_512:
3627 case X86::BI__builtin_ia32_selectw_128:
3628 case X86::BI__builtin_ia32_selectw_256:
3629 case X86::BI__builtin_ia32_selectw_512:
3630 case X86::BI__builtin_ia32_selectd_128:
3631 case X86::BI__builtin_ia32_selectd_256:
3632 case X86::BI__builtin_ia32_selectd_512:
3633 case X86::BI__builtin_ia32_selectq_128:
3634 case X86::BI__builtin_ia32_selectq_256:
3635 case X86::BI__builtin_ia32_selectq_512:
3636 case X86::BI__builtin_ia32_selectph_128:
3637 case X86::BI__builtin_ia32_selectph_256:
3638 case X86::BI__builtin_ia32_selectph_512:
3639 case X86::BI__builtin_ia32_selectpbf_128:
3640 case X86::BI__builtin_ia32_selectpbf_256:
3641 case X86::BI__builtin_ia32_selectpbf_512:
3642 case X86::BI__builtin_ia32_selectps_128:
3643 case X86::BI__builtin_ia32_selectps_256:
3644 case X86::BI__builtin_ia32_selectps_512:
3645 case X86::BI__builtin_ia32_selectpd_128:
3646 case X86::BI__builtin_ia32_selectpd_256:
3647 case X86::BI__builtin_ia32_selectpd_512:
3648 return interp__builtin_select(S, OpPC, Call);
3649
3650 case X86::BI__builtin_ia32_kandqi:
3651 case X86::BI__builtin_ia32_kandhi:
3652 case X86::BI__builtin_ia32_kandsi:
3653 case X86::BI__builtin_ia32_kanddi:
3655 S, OpPC, Call,
3656 [](const APSInt &LHS, const APSInt &RHS) { return LHS & RHS; });
3657
3658 case X86::BI__builtin_ia32_kandnqi:
3659 case X86::BI__builtin_ia32_kandnhi:
3660 case X86::BI__builtin_ia32_kandnsi:
3661 case X86::BI__builtin_ia32_kandndi:
3663 S, OpPC, Call,
3664 [](const APSInt &LHS, const APSInt &RHS) { return ~LHS & RHS; });
3665
3666 case X86::BI__builtin_ia32_korqi:
3667 case X86::BI__builtin_ia32_korhi:
3668 case X86::BI__builtin_ia32_korsi:
3669 case X86::BI__builtin_ia32_kordi:
3671 S, OpPC, Call,
3672 [](const APSInt &LHS, const APSInt &RHS) { return LHS | RHS; });
3673
3674 case X86::BI__builtin_ia32_kxnorqi:
3675 case X86::BI__builtin_ia32_kxnorhi:
3676 case X86::BI__builtin_ia32_kxnorsi:
3677 case X86::BI__builtin_ia32_kxnordi:
3679 S, OpPC, Call,
3680 [](const APSInt &LHS, const APSInt &RHS) { return ~(LHS ^ RHS); });
3681
3682 case X86::BI__builtin_ia32_kxorqi:
3683 case X86::BI__builtin_ia32_kxorhi:
3684 case X86::BI__builtin_ia32_kxorsi:
3685 case X86::BI__builtin_ia32_kxordi:
3687 S, OpPC, Call,
3688 [](const APSInt &LHS, const APSInt &RHS) { return LHS ^ RHS; });
3689
3690 case X86::BI__builtin_ia32_knotqi:
3691 case X86::BI__builtin_ia32_knothi:
3692 case X86::BI__builtin_ia32_knotsi:
3693 case X86::BI__builtin_ia32_knotdi:
3695 S, OpPC, Call, [](const APSInt &Src) { return ~Src; });
3696
3697 case X86::BI__builtin_ia32_kaddqi:
3698 case X86::BI__builtin_ia32_kaddhi:
3699 case X86::BI__builtin_ia32_kaddsi:
3700 case X86::BI__builtin_ia32_kadddi:
3702 S, OpPC, Call,
3703 [](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; });
3704
3705 case Builtin::BI__builtin_elementwise_fshl:
3707 llvm::APIntOps::fshl);
3708 case Builtin::BI__builtin_elementwise_fshr:
3710 llvm::APIntOps::fshr);
3711
3712 case X86::BI__builtin_ia32_insertf32x4_256:
3713 case X86::BI__builtin_ia32_inserti32x4_256:
3714 case X86::BI__builtin_ia32_insertf64x2_256:
3715 case X86::BI__builtin_ia32_inserti64x2_256:
3716 case X86::BI__builtin_ia32_insertf32x4:
3717 case X86::BI__builtin_ia32_inserti32x4:
3718 case X86::BI__builtin_ia32_insertf64x2_512:
3719 case X86::BI__builtin_ia32_inserti64x2_512:
3720 case X86::BI__builtin_ia32_insertf32x8:
3721 case X86::BI__builtin_ia32_inserti32x8:
3722 case X86::BI__builtin_ia32_insertf64x4:
3723 case X86::BI__builtin_ia32_inserti64x4:
3724 case X86::BI__builtin_ia32_vinsertf128_ps256:
3725 case X86::BI__builtin_ia32_vinsertf128_pd256:
3726 case X86::BI__builtin_ia32_vinsertf128_si256:
3727 case X86::BI__builtin_ia32_insert128i256:
3728 return interp__builtin_x86_insert_subvector(S, OpPC, Call, BuiltinID);
3729
3730 default:
3731 S.FFDiag(S.Current->getLocation(OpPC),
3732 diag::note_invalid_subexpr_in_const_expr)
3733 << S.Current->getRange(OpPC);
3734
3735 return false;
3736 }
3737
3738 llvm_unreachable("Unhandled builtin ID");
3739}
3740
3742 ArrayRef<int64_t> ArrayIndices, int64_t &IntResult) {
3744 unsigned N = E->getNumComponents();
3745 assert(N > 0);
3746
3747 unsigned ArrayIndex = 0;
3748 QualType CurrentType = E->getTypeSourceInfo()->getType();
3749 for (unsigned I = 0; I != N; ++I) {
3750 const OffsetOfNode &Node = E->getComponent(I);
3751 switch (Node.getKind()) {
3752 case OffsetOfNode::Field: {
3753 const FieldDecl *MemberDecl = Node.getField();
3754 const auto *RD = CurrentType->getAsRecordDecl();
3755 if (!RD || RD->isInvalidDecl())
3756 return false;
3758 unsigned FieldIndex = MemberDecl->getFieldIndex();
3759 assert(FieldIndex < RL.getFieldCount() && "offsetof field in wrong type");
3760 Result +=
3762 CurrentType = MemberDecl->getType().getNonReferenceType();
3763 break;
3764 }
3765 case OffsetOfNode::Array: {
3766 // When generating bytecode, we put all the index expressions as Sint64 on
3767 // the stack.
3768 int64_t Index = ArrayIndices[ArrayIndex];
3769 const ArrayType *AT = S.getASTContext().getAsArrayType(CurrentType);
3770 if (!AT)
3771 return false;
3772 CurrentType = AT->getElementType();
3773 CharUnits ElementSize = S.getASTContext().getTypeSizeInChars(CurrentType);
3774 Result += Index * ElementSize;
3775 ++ArrayIndex;
3776 break;
3777 }
3778 case OffsetOfNode::Base: {
3779 const CXXBaseSpecifier *BaseSpec = Node.getBase();
3780 if (BaseSpec->isVirtual())
3781 return false;
3782
3783 // Find the layout of the class whose base we are looking into.
3784 const auto *RD = CurrentType->getAsCXXRecordDecl();
3785 if (!RD || RD->isInvalidDecl())
3786 return false;
3788
3789 // Find the base class itself.
3790 CurrentType = BaseSpec->getType();
3791 const auto *BaseRD = CurrentType->getAsCXXRecordDecl();
3792 if (!BaseRD)
3793 return false;
3794
3795 // Add the offset to the base.
3796 Result += RL.getBaseClassOffset(BaseRD);
3797 break;
3798 }
3800 llvm_unreachable("Dependent OffsetOfExpr?");
3801 }
3802 }
3803
3804 IntResult = Result.getQuantity();
3805
3806 return true;
3807}
3808
3810 const Pointer &Ptr, const APSInt &IntValue) {
3811
3812 const Record *R = Ptr.getRecord();
3813 assert(R);
3814 assert(R->getNumFields() == 1);
3815
3816 unsigned FieldOffset = R->getField(0u)->Offset;
3817 const Pointer &FieldPtr = Ptr.atField(FieldOffset);
3818 PrimType FieldT = *S.getContext().classify(FieldPtr.getType());
3819
3820 INT_TYPE_SWITCH(FieldT,
3821 FieldPtr.deref<T>() = T::from(IntValue.getSExtValue()));
3822 FieldPtr.initialize();
3823 return true;
3824}
3825
3826static void zeroAll(Pointer &Dest) {
3827 const Descriptor *Desc = Dest.getFieldDesc();
3828
3829 if (Desc->isPrimitive()) {
3830 TYPE_SWITCH(Desc->getPrimType(), {
3831 Dest.deref<T>().~T();
3832 new (&Dest.deref<T>()) T();
3833 });
3834 return;
3835 }
3836
3837 if (Desc->isRecord()) {
3838 const Record *R = Desc->ElemRecord;
3839 for (const Record::Field &F : R->fields()) {
3840 Pointer FieldPtr = Dest.atField(F.Offset);
3841 zeroAll(FieldPtr);
3842 }
3843 return;
3844 }
3845
3846 if (Desc->isPrimitiveArray()) {
3847 for (unsigned I = 0, N = Desc->getNumElems(); I != N; ++I) {
3848 TYPE_SWITCH(Desc->getPrimType(), {
3849 Dest.deref<T>().~T();
3850 new (&Dest.deref<T>()) T();
3851 });
3852 }
3853 return;
3854 }
3855
3856 if (Desc->isCompositeArray()) {
3857 for (unsigned I = 0, N = Desc->getNumElems(); I != N; ++I) {
3858 Pointer ElemPtr = Dest.atIndex(I).narrow();
3859 zeroAll(ElemPtr);
3860 }
3861 return;
3862 }
3863}
3864
3865static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
3866 Pointer &Dest, bool Activate);
3867static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src,
3868 Pointer &Dest, bool Activate = false) {
3869 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
3870 const Descriptor *DestDesc = Dest.getFieldDesc();
3871
3872 auto copyField = [&](const Record::Field &F, bool Activate) -> bool {
3873 Pointer DestField = Dest.atField(F.Offset);
3874 if (OptPrimType FT = S.Ctx.classify(F.Decl->getType())) {
3875 TYPE_SWITCH(*FT, {
3876 DestField.deref<T>() = Src.atField(F.Offset).deref<T>();
3877 if (Src.atField(F.Offset).isInitialized())
3878 DestField.initialize();
3879 if (Activate)
3880 DestField.activate();
3881 });
3882 return true;
3883 }
3884 // Composite field.
3885 return copyComposite(S, OpPC, Src.atField(F.Offset), DestField, Activate);
3886 };
3887
3888 assert(SrcDesc->isRecord());
3889 assert(SrcDesc->ElemRecord == DestDesc->ElemRecord);
3890 const Record *R = DestDesc->ElemRecord;
3891 for (const Record::Field &F : R->fields()) {
3892 if (R->isUnion()) {
3893 // For unions, only copy the active field. Zero all others.
3894 const Pointer &SrcField = Src.atField(F.Offset);
3895 if (SrcField.isActive()) {
3896 if (!copyField(F, /*Activate=*/true))
3897 return false;
3898 } else {
3899 if (!CheckMutable(S, OpPC, Src.atField(F.Offset)))
3900 return false;
3901 Pointer DestField = Dest.atField(F.Offset);
3902 zeroAll(DestField);
3903 }
3904 } else {
3905 if (!copyField(F, Activate))
3906 return false;
3907 }
3908 }
3909
3910 for (const Record::Base &B : R->bases()) {
3911 Pointer DestBase = Dest.atField(B.Offset);
3912 if (!copyRecord(S, OpPC, Src.atField(B.Offset), DestBase, Activate))
3913 return false;
3914 }
3915
3916 Dest.initialize();
3917 return true;
3918}
3919
3920static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
3921 Pointer &Dest, bool Activate = false) {
3922 assert(Src.isLive() && Dest.isLive());
3923
3924 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
3925 const Descriptor *DestDesc = Dest.getFieldDesc();
3926
3927 assert(!DestDesc->isPrimitive() && !SrcDesc->isPrimitive());
3928
3929 if (DestDesc->isPrimitiveArray()) {
3930 assert(SrcDesc->isPrimitiveArray());
3931 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
3932 PrimType ET = DestDesc->getPrimType();
3933 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
3934 Pointer DestElem = Dest.atIndex(I);
3935 TYPE_SWITCH(ET, {
3936 DestElem.deref<T>() = Src.elem<T>(I);
3937 DestElem.initialize();
3938 });
3939 }
3940 return true;
3941 }
3942
3943 if (DestDesc->isCompositeArray()) {
3944 assert(SrcDesc->isCompositeArray());
3945 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
3946 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
3947 const Pointer &SrcElem = Src.atIndex(I).narrow();
3948 Pointer DestElem = Dest.atIndex(I).narrow();
3949 if (!copyComposite(S, OpPC, SrcElem, DestElem, Activate))
3950 return false;
3951 }
3952 return true;
3953 }
3954
3955 if (DestDesc->isRecord())
3956 return copyRecord(S, OpPC, Src, Dest, Activate);
3957 return Invalid(S, OpPC);
3958}
3959
3960bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest) {
3961 return copyComposite(S, OpPC, Src, Dest);
3962}
3963
3964} // namespace interp
3965} // namespace clang
#define V(N, I)
Defines enum values for all the target-independent builtin functions.
llvm::APSInt APSInt
Definition Compiler.cpp:23
GCCTypeClass
Values returned by __builtin_classify_type, chosen to match the values produced by GCC's builtin.
CharUnits GetAlignOfExpr(const ASTContext &Ctx, const Expr *E, UnaryExprOrTypeTrait ExprKind)
GCCTypeClass EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts)
EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way as GCC.
static bool isOneByteCharacterType(QualType T)
static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const LValue &LVal)
Attempts to detect a user writing into a piece of memory that's impossible to figure out the size of ...
TokenType getType() const
Returns the token's type, e.g.
#define X(type, name)
Definition Value.h:97
static DiagnosticBuilder Diag(DiagnosticsEngine *Diags, const LangOptions &Features, FullSourceLoc TokLoc, const char *TokBegin, const char *TokRangeBegin, const char *TokRangeEnd, unsigned DiagID)
Produce a diagnostic highlighting some portion of a literal.
#define INT_TYPE_SWITCH_NO_BOOL(Expr, B)
Definition PrimType.h:247
#define INT_TYPE_SWITCH(Expr, B)
Definition PrimType.h:228
#define TYPE_SWITCH(Expr, B)
Definition PrimType.h:207
static std::string toString(const clang::SanitizerSet &Sanitizers)
Produce a string containing comma-separated names of sanitizers in Sanitizers set.
static QualType getPointeeType(const MemRegion *R)
Enumerates target-specific builtins in their own namespaces within namespace clang.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition APValue.h:122
CharUnits & getLValueOffset()
Definition APValue.cpp:993
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const llvm::fltSemantics & getFloatTypeSemantics(QualType T) const
Return the APFloat 'semantics' for the specified scalar floating point type.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
Builtin::Context & BuiltinInfo
Definition ASTContext.h:774
QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize, const Expr *SizeExpr, ArraySizeModifier ASM, unsigned IndexTypeQuals) const
Return the unique reference to the type for a constant array of the specified element type.
const LangOptions & getLangOpts() const
Definition ASTContext.h:926
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
QualType getWCharType() const
Return the unique wchar_t type available in C++ (and available as __wchar_t as a Microsoft extension)...
bool hasSameUnqualifiedType(QualType T1, QualType T2) const
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:891
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
CanQualType getCanonicalTagType(const TagDecl *TD) const
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
CharUnits getVBaseClassOffset(const CXXRecordDecl *VBase) const
getVBaseClassOffset - Get the offset, in chars, for the given base class.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3722
QualType getElementType() const
Definition TypeBase.h:3734
std::string getQuotedName(unsigned ID) const
Return the identifier name for the specified builtin inside single quotes for a diagnostic,...
Definition Builtins.cpp:85
bool isConstantEvaluated(unsigned ID) const
Return true if this function can be constant evaluated by Clang frontend.
Definition Builtins.h:431
Represents a base class of a C++ class.
Definition DeclCXX.h:146
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition DeclCXX.h:203
QualType getType() const
Retrieves the type of the base class.
Definition DeclCXX.h:249
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2877
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3081
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Definition CharUnits.h:201
static unsigned getMaxSizeBits(const ASTContext &Context)
Determine the maximum number of active bits that an array's size can require, which limits the maximu...
Definition Type.cpp:254
This represents one expression.
Definition Expr.h:112
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3157
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3242
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3393
Represents a function declaration or definition.
Definition Decl.h:1999
One of these records is kept for each identifier that is lexed.
bool isStr(const char(&Str)[StrLen]) const
Return true if this is the identifier for the specified string.
OffsetOfExpr - [C99 7.17] - This represents an expression of the form offsetof(record-type,...
Definition Expr.h:2527
const OffsetOfNode & getComponent(unsigned Idx) const
Definition Expr.h:2574
TypeSourceInfo * getTypeSourceInfo() const
Definition Expr.h:2567
unsigned getNumComponents() const
Definition Expr.h:2582
Helper class for OffsetOfExpr.
Definition Expr.h:2421
FieldDecl * getField() const
For a field offsetof node, returns the field.
Definition Expr.h:2485
@ Array
An index into an array.
Definition Expr.h:2426
@ Identifier
A field in a dependent type, known only by its name.
Definition Expr.h:2430
@ Field
A field.
Definition Expr.h:2428
@ Base
An implicit indirection through a C++ base class, when the field found is in a base class.
Definition Expr.h:2433
Kind getKind() const
Determine what kind of offsetof node this is.
Definition Expr.h:2475
CXXBaseSpecifier * getBase() const
For a base class node, returns the base specifier.
Definition Expr.h:2495
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3328
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition Type.cpp:2867
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8287
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition TypeBase.h:8472
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:334
unsigned getMaxAtomicInlineWidth() const
Return the maximum width lock-free atomic operation which can be inlined given the supported features...
Definition TargetInfo.h:853
bool isBigEndian() const
virtual int getEHDataRegisterNumber(unsigned RegNo) const
Return the register number that __builtin_eh_return_regno would return with the specified argument.
virtual bool isNan2008() const
Returns true if NaN encoding is IEEE 754-2008.
QualType getType() const
Return the type wrapped by this type source info.
Definition TypeBase.h:8269
bool isBooleanType() const
Definition TypeBase.h:9010
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2225
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition Type.cpp:2273
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
bool isPointerType() const
Definition TypeBase.h:8524
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8924
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9167
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
Definition Type.cpp:2436
bool isVectorType() const
Definition TypeBase.h:8663
bool isFloatingType() const
Definition Type.cpp:2304
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9100
QualType getType() const
Definition Decl.h:722
Represents a GCC generic vector type.
Definition TypeBase.h:4175
unsigned getNumElements() const
Definition TypeBase.h:4190
QualType getElementType() const
Definition TypeBase.h:4189
A memory block, either on the stack or in the heap.
Definition InterpBlock.h:44
const Descriptor * getDescriptor() const
Returns the block's descriptor.
Definition InterpBlock.h:73
bool isDynamic() const
Definition InterpBlock.h:83
Wrapper around boolean types.
Definition Boolean.h:25
static Boolean from(T Value)
Definition Boolean.h:97
Pointer into the code segment.
Definition Source.h:30
const LangOptions & getLangOpts() const
Returns the language options.
Definition Context.cpp:326
OptPrimType classify(QualType T) const
Classifies a type.
Definition Context.cpp:360
unsigned getEvalID() const
Definition Context.h:145
Manages dynamic memory allocations done during bytecode interpretation.
bool deallocate(const Expr *Source, const Block *BlockToDelete, InterpState &S)
Deallocate the given source+block combination.
std::optional< Form > getAllocationForm(const Expr *Source) const
Checks whether the allocation done at the given source is an array allocation.
Block * allocate(const Descriptor *D, unsigned EvalID, Form AllocForm)
Allocate ONE element of the given descriptor.
If a Floating is constructed from Memory, it DOES NOT OWN THAT MEMORY.
Definition Floating.h:35
void copy(const APFloat &F)
Definition Floating.h:122
llvm::FPClassTest classify() const
Definition Floating.h:153
bool isSignaling() const
Definition Floating.h:148
bool isNormal() const
Definition Floating.h:151
ComparisonCategoryResult compare(const Floating &RHS) const
Definition Floating.h:156
bool isZero() const
Definition Floating.h:143
bool isNegative() const
Definition Floating.h:142
bool isFinite() const
Definition Floating.h:150
bool isDenormal() const
Definition Floating.h:152
APFloat::fltCategory getCategory() const
Definition Floating.h:154
APFloat getAPFloat() const
Definition Floating.h:63
Base class for stack frames, shared between VM and walker.
Definition Frame.h:25
virtual const FunctionDecl * getCallee() const =0
Returns the called function's declaration.
If an IntegralAP is constructed from Memory, it DOES NOT OWN THAT MEMORY.
Definition IntegralAP.h:36
Frame storing local variables.
Definition InterpFrame.h:27
const Expr * getExpr(CodePtr PC) const
InterpFrame * Caller
The frame of the previous function.
Definition InterpFrame.h:30
SourceInfo getSource(CodePtr PC) const
Map a location to a source.
CodePtr getRetPC() const
Returns the return address of the frame.
SourceLocation getLocation(CodePtr PC) const
SourceRange getRange(CodePtr PC) const
unsigned getDepth() const
const FunctionDecl * getCallee() const override
Returns the caller.
Stack frame storing temporaries and parameters.
Definition InterpStack.h:25
T pop()
Returns the value from the top of the stack and removes it.
Definition InterpStack.h:39
void push(Tys &&...Args)
Constructs a value in place on the top of the stack.
Definition InterpStack.h:33
void discard()
Discards the top value from the stack.
Definition InterpStack.h:50
T & peek() const
Returns a reference to the value on the top of the stack.
Definition InterpStack.h:62
Interpreter context.
Definition InterpState.h:43
Expr::EvalStatus & getEvalStatus() const override
Definition InterpState.h:67
Context & getContext() const
DynamicAllocator & getAllocator()
Context & Ctx
Interpreter Context.
Floating allocFloat(const llvm::fltSemantics &Sem)
llvm::SmallVector< const Block * > InitializingBlocks
List of blocks we're currently running either constructors or destructors for.
ASTContext & getASTContext() const override
Definition InterpState.h:70
InterpStack & Stk
Temporary stack.
const VarDecl * EvaluatingDecl
Declaration we're initializing/evaluting, if any.
InterpFrame * Current
The current frame.
T allocAP(unsigned BitWidth)
const LangOptions & getLangOpts() const
Definition InterpState.h:71
StdAllocatorCaller getStdAllocatorCaller(StringRef Name) const
Program & P
Reference to the module containing all bytecode.
PrimType value_or(PrimType PT) const
Definition PrimType.h:68
A pointer to a memory block, live or dead.
Definition Pointer.h:91
Pointer narrow() const
Restricts the scope of an array element pointer.
Definition Pointer.h:188
bool isInitialized() const
Checks if an object was initialized.
Definition Pointer.cpp:440
Pointer atIndex(uint64_t Idx) const
Offsets a pointer inside an array.
Definition Pointer.h:156
bool isDummy() const
Checks if the pointer points to a dummy value.
Definition Pointer.h:546
int64_t getIndex() const
Returns the index into an array.
Definition Pointer.h:611
bool isActive() const
Checks if the object is active.
Definition Pointer.h:535
Pointer atField(unsigned Off) const
Creates a pointer to a field.
Definition Pointer.h:173
T & deref() const
Dereferences the pointer, if it's live.
Definition Pointer.h:662
unsigned getNumElems() const
Returns the number of elements.
Definition Pointer.h:595
Pointer getArray() const
Returns the parent array.
Definition Pointer.h:315
bool isUnknownSizeArray() const
Checks if the structure is an array of unknown size.
Definition Pointer.h:414
void activate() const
Activats a field.
Definition Pointer.cpp:576
bool isIntegralPointer() const
Definition Pointer.h:468
QualType getType() const
Returns the type of the innermost field.
Definition Pointer.h:335
bool isArrayElement() const
Checks if the pointer points to an array.
Definition Pointer.h:420
void initializeAllElements() const
Initialize all elements of a primitive array at once.
Definition Pointer.cpp:545
bool isLive() const
Checks if the pointer is live.
Definition Pointer.h:267
T & elem(unsigned I) const
Dereferences the element at index I.
Definition Pointer.h:678
Pointer getBase() const
Returns a pointer to the object of which this pointer is a field.
Definition Pointer.h:306
std::string toDiagnosticString(const ASTContext &Ctx) const
Converts the pointer to a string usable in diagnostics.
Definition Pointer.cpp:427
bool isZero() const
Checks if the pointer is null.
Definition Pointer.h:254
bool isRoot() const
Pointer points directly to a block.
Definition Pointer.h:436
const Descriptor * getDeclDesc() const
Accessor for information about the declaration site.
Definition Pointer.h:281
static bool pointToSameBlock(const Pointer &A, const Pointer &B)
Checks if both given pointers point to the same block.
Definition Pointer.cpp:652
APValue toAPValue(const ASTContext &ASTCtx) const
Converts the pointer to an APValue.
Definition Pointer.cpp:171
bool isOnePastEnd() const
Checks if the index is one past end.
Definition Pointer.h:628
uint64_t getIntegerRepresentation() const
Definition Pointer.h:143
const FieldDecl * getField() const
Returns the field information.
Definition Pointer.h:480
Pointer expand() const
Expands a pointer to the containing array, undoing narrowing.
Definition Pointer.h:221
bool isBlockPointer() const
Definition Pointer.h:467
const Block * block() const
Definition Pointer.h:601
const Descriptor * getFieldDesc() const
Accessors for information about the innermost field.
Definition Pointer.h:325
bool isVirtualBaseClass() const
Definition Pointer.h:542
bool isBaseClass() const
Checks if a structure is a base class.
Definition Pointer.h:541
size_t elemSize() const
Returns the element size of the innermost field.
Definition Pointer.h:357
bool canBeInitialized() const
If this pointer has an InlineDescriptor we can use to initialize.
Definition Pointer.h:443
Lifetime getLifetime() const
Definition Pointer.h:723
void initialize() const
Initializes a field.
Definition Pointer.cpp:493
bool isField() const
Checks if the item is a field in an object.
Definition Pointer.h:273
const Record * getRecord() const
Returns the record descriptor of a class.
Definition Pointer.h:473
Descriptor * createDescriptor(const DeclTy &D, PrimType T, const Type *SourceTy=nullptr, Descriptor::MetadataSize MDSize=std::nullopt, bool IsConst=false, bool IsTemporary=false, bool IsMutable=false, bool IsVolatile=false)
Creates a descriptor for a primitive type.
Definition Program.h:119
Structure/Class descriptor.
Definition Record.h:25
const RecordDecl * getDecl() const
Returns the underlying declaration.
Definition Record.h:53
bool isUnion() const
Checks if the record is a union.
Definition Record.h:57
const Field * getField(const FieldDecl *FD) const
Returns a field.
Definition Record.cpp:47
llvm::iterator_range< const_base_iter > bases() const
Definition Record.h:92
unsigned getNumFields() const
Definition Record.h:88
llvm::iterator_range< const_field_iter > fields() const
Definition Record.h:84
Describes the statement/declaration an opcode was generated from.
Definition Source.h:73
OptionalDiagnostic Note(SourceLocation Loc, diag::kind DiagId)
Add a note to a prior diagnostic.
Definition State.cpp:63
DiagnosticBuilder report(SourceLocation Loc, diag::kind DiagId)
Directly reports a diagnostic message.
Definition State.cpp:74
OptionalDiagnostic FFDiag(SourceLocation Loc, diag::kind DiagId=diag::note_invalid_subexpr_in_const_expr, unsigned ExtraNotes=0)
Diagnose that the evaluation could not be folded (FF => FoldFailure)
Definition State.cpp:21
OptionalDiagnostic CCEDiag(SourceLocation Loc, diag::kind DiagId=diag::note_invalid_subexpr_in_const_expr, unsigned ExtraNotes=0)
Diagnose that the evaluation does not produce a C++11 core constant expression.
Definition State.cpp:42
bool checkingPotentialConstantExpression() const
Are we checking whether the expression is a potential constant expression?
Definition State.h:99
Defines the clang::TargetInfo interface.
bool computeOSLogBufferLayout(clang::ASTContext &Ctx, const clang::CallExpr *E, OSLogBufferLayout &layout)
Definition OSLog.cpp:192
static bool interp__builtin_is_within_lifetime(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static void assignInteger(InterpState &S, const Pointer &Dest, PrimType ValueT, const APSInt &Value)
bool readPointerToBuffer(const Context &Ctx, const Pointer &FromPtr, BitcastBuffer &Buffer, bool ReturnOnUninit)
static Floating abs(InterpState &S, const Floating &In)
static bool interp__builtin_ia32_pdep(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_fmax(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool IsNumBuiltin)
static bool interp__builtin_elementwise_maxmin(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_elementwise_triop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_assume(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool CheckNewDeleteForms(InterpState &S, CodePtr OpPC, DynamicAllocator::Form AllocForm, DynamicAllocator::Form DeleteForm, const Descriptor *D, const Expr *NewExpr)
Diagnose mismatched new[]/delete or new/delete[] pairs.
Definition Interp.cpp:1103
static bool interp__builtin_isnan(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
Defined as __builtin_isnan(...), to accommodate the fact that it can take a float,...
static llvm::RoundingMode getRoundingMode(FPOptions FPO)
Definition Interp.h:406
static bool interp__builtin_elementwise_countzeroes(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
Can be called with an integer or vector as the first and only parameter.
bool Call(InterpState &S, CodePtr OpPC, const Function *Func, uint32_t VarArgSize)
Definition Interp.cpp:1553
static bool interp__builtin_classify_type(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_fmin(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool IsNumBuiltin)
static bool interp__builtin_blend(InterpState &S, CodePtr OpPC, const CallExpr *Call)
bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC, const Pointer &Ptr, const APSInt &IntValue)
Sets the given integral value to the pointer, which is of a std::{weak,partial,strong}...
static bool interp__builtin_operator_delete(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame)
static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
bool __atomic_always_lock_free(size_t, void const volatile*) bool __atomic_is_lock_free(size_t,...
static llvm::APSInt convertBoolVectorToInt(const Pointer &Val)
static bool interp__builtin_move(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_clz(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
bool CheckMutable(InterpState &S, CodePtr OpPC, const Pointer &Ptr)
Checks if a pointer points to a mutable field.
Definition Interp.cpp:594
static bool interp__builtin_parity(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
__builtin_is_aligned() __builtin_align_up() __builtin_align_down() The first parameter is either an i...
static bool interp__builtin_bitreverse(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_rotate(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, bool Right)
rotateleft(value, amount)
static bool Activate(InterpState &S, CodePtr OpPC)
Definition Interp.h:1984
static bool interp__builtin_ia32_addcarry_subborrow(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
(CarryIn, LHS, RHS, Result)
static bool interp__builtin_ia32_bextr(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool isOneByteCharacterType(QualType T)
Determine if T is a character type for which we guarantee that sizeof(T) == 1.
static unsigned computePointerOffset(const ASTContext &ASTCtx, const Pointer &Ptr)
Compute the byte offset of Ptr in the full declaration.
static bool interp__builtin_ia32_bzhi(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a value can be loaded from a block.
Definition Interp.cpp:793
static bool interp__builtin_overflowop(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned BuiltinOp)
static bool interp__builtin_inf(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_isinf(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool CheckSign, const CallExpr *Call)
static bool interp__builtin_os_log_format_buffer_size(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E, ArrayRef< int64_t > ArrayIndices, int64_t &Result)
Interpret an offsetof operation.
static bool interp__builtin_x86_insert_subvector(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is in range.
Definition Interp.cpp:519
static bool pointsToLastObject(const Pointer &Ptr)
Does Ptr point to the last subobject?
static bool interp__builtin_select(InterpState &S, CodePtr OpPC, const CallExpr *Call)
AVX512 predicated move: "Result = Mask[] ? LHS[] : RHS[]".
static bool interp__builtin_ia32_pmul(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned BuiltinID)
llvm::APFloat APFloat
Definition Floating.h:27
static void discard(InterpStack &Stk, PrimType T)
bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is live and accessible.
Definition Interp.cpp:414
static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
Five int values followed by one floating value.
static bool interp__builtin_abs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp_floating_comparison(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool handleOverflow(InterpState &S, CodePtr OpPC, const T &SrcValue)
Definition Interp.h:153
llvm::APInt APInt
Definition FixedPoint.h:19
static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest, bool Activate)
static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest, bool Activate=false)
static bool interp__builtin_c11_atomic_is_lock_free(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool __c11_atomic_is_lock_free(size_t)
static void zeroAll(Pointer &Dest)
static bool interp__builtin_elementwise_int_binop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_issubnormal(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_arithmetic_fence(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
PrimType
Enumeration of the primitive types of the VM.
Definition PrimType.h:34
static bool interp__builtin_isfinite(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, uint32_t BuiltinID)
Interpret a builtin function.
static bool interp__builtin_expect(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_complex(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
__builtin_complex(Float A, float B);
bool CheckDummy(InterpState &S, CodePtr OpPC, const Block *B, AccessKinds AK)
Checks if a pointer is a dummy pointer.
Definition Interp.cpp:1154
static bool interp__builtin_assume_aligned(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
__builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
static bool interp__builtin_ptrauth_string_discriminator(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_ffs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_memchr(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_x86_pack(InterpState &S, CodePtr, const CallExpr *E, llvm::function_ref< APInt(const APSInt &)> PackFn)
static void pushInteger(InterpState &S, const APSInt &Val, QualType QT)
Pushes Val on the stack as the type given by QT.
static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
bool CheckArray(InterpState &S, CodePtr OpPC, const Pointer &Ptr)
Checks if the array is offsetable.
Definition Interp.cpp:406
static bool interp__builtin_elementwise_abs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_copysign(InterpState &S, CodePtr OpPC, const InterpFrame *Frame)
static bool interp__builtin_iszero(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_elementwise_popcount(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
Can be called with an integer or vector as the first and only parameter.
static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_signbit(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool Error(InterpState &S, CodePtr OpPC)
Do nothing and just abort execution.
Definition Interp.h:3309
static bool interp__builtin_vector_reduce(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
static bool interp__builtin_elementwise_triop_fp(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APFloat(const APFloat &, const APFloat &, const APFloat &, llvm::RoundingMode)> Fn)
static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_object_size(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
bool CheckArraySize(InterpState &S, CodePtr OpPC, uint64_t NumElems)
Definition Interp.h:3690
static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_is_constant_evaluated(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static APSInt popToAPSInt(InterpStack &Stk, PrimType T)
static std::optional< unsigned > computeFullDescSize(const ASTContext &ASTCtx, const Descriptor *Desc)
static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
First parameter to __builtin_isfpclass is the floating value, the second one is an integral value.
static bool interp__builtin_issignaling(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_nan(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, bool Signaling)
static bool interp__builtin_clrsb(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest)
Copy the contents of Src into Dest.
static bool interp__builtin_elementwise_int_unaryop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &)> Fn)
constexpr bool isIntegralType(PrimType T)
Definition PrimType.h:124
static bool interp__builtin_eh_return_data_regno(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static LLVM_ATTRIBUTE_UNUSED bool isNoopBuiltin(unsigned ID)
static void diagnoseNonConstexprBuiltin(InterpState &S, CodePtr OpPC, unsigned ID)
llvm::APSInt APSInt
Definition FixedPoint.h:20
static QualType getElemType(const Pointer &P)
static bool interp__builtin_isnormal(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_ia32_pext(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static void swapBytes(std::byte *M, size_t N)
The JSON file list parser is used to communicate input to InstallAPI.
if(T->getSizeExpr()) TRY_TO(TraverseStmt(const_cast< Expr * >(T -> getSizeExpr())))
ComparisonCategoryResult
An enumeration representing the possible results of a three-way comparison.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
@ AK_Read
Definition State.h:27
const FunctionProtoType * T
U cast(CodeGen::Address addr)
Definition Address.h:327
SmallVectorImpl< PartialDiagnosticAt > * Diag
Diag - If this is non-null, it will be filled in with a stack of notes indicating why evaluation fail...
Definition Expr.h:633
Track what bits have been initialized to known values and which ones have indeterminate value.
std::unique_ptr< std::byte[]> Data
A quantity in bits.
A quantity in bytes.
size_t getQuantity() const
Describes a memory block created by an allocation site.
Definition Descriptor.h:122
unsigned getNumElems() const
Returns the number of elements stored in the block.
Definition Descriptor.h:249
bool isPrimitive() const
Checks if the descriptor is of a primitive.
Definition Descriptor.h:263
QualType getElemQualType() const
bool isCompositeArray() const
Checks if the descriptor is of an array of composites.
Definition Descriptor.h:256
const ValueDecl * asValueDecl() const
Definition Descriptor.h:214
static constexpr unsigned MaxArrayElemBytes
Maximum number of bytes to be used for array elements.
Definition Descriptor.h:148
QualType getType() const
const Decl * asDecl() const
Definition Descriptor.h:210
static constexpr MetadataSize InlineDescMD
Definition Descriptor.h:144
unsigned getElemSize() const
returns the size of an element when the structure is viewed as an array.
Definition Descriptor.h:244
bool isPrimitiveArray() const
Checks if the descriptor is of an array of primitives.
Definition Descriptor.h:254
const VarDecl * asVarDecl() const
Definition Descriptor.h:218
PrimType getPrimType() const
Definition Descriptor.h:236
bool isRecord() const
Checks if the descriptor is of a record.
Definition Descriptor.h:268
const Record *const ElemRecord
Pointer to the record, if block contains records.
Definition Descriptor.h:153
const Expr * asExpr() const
Definition Descriptor.h:211
bool isArray() const
Checks if the descriptor is of an array.
Definition Descriptor.h:266
Mapping from primitive types to their representation.
Definition PrimType.h:134