Thanks to visit codestin.com
Credit goes to clang.llvm.org

clang 22.0.0git
SemaAMDGPU.cpp
Go to the documentation of this file.
1//===------ SemaAMDGPU.cpp ------- AMDGPU target-specific routines --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements semantic analysis functions specific to AMDGPU.
10//
11//===----------------------------------------------------------------------===//
12
17#include "clang/Sema/Sema.h"
18#include "llvm/Support/AMDGPUAddrSpace.h"
19#include "llvm/Support/AtomicOrdering.h"
20#include <cstdint>
21
22namespace clang {
23
25
27 CallExpr *TheCall) {
28 // position of memory order and scope arguments in the builtin
29 unsigned OrderIndex, ScopeIndex;
30
31 const auto *FD = SemaRef.getCurFunctionDecl(/*AllowLambda=*/true);
32 assert(FD && "AMDGPU builtins should not be used outside of a function");
33 llvm::StringMap<bool> CallerFeatureMap;
34 getASTContext().getFunctionFeatureMap(CallerFeatureMap, FD);
35 bool HasGFX950Insts =
36 Builtin::evaluateRequiredTargetFeatures("gfx950-insts", CallerFeatureMap);
37
38 switch (BuiltinID) {
39 case AMDGPU::BI__builtin_amdgcn_raw_ptr_buffer_load_lds:
40 case AMDGPU::BI__builtin_amdgcn_struct_ptr_buffer_load_lds:
41 case AMDGPU::BI__builtin_amdgcn_load_to_lds:
42 case AMDGPU::BI__builtin_amdgcn_global_load_lds: {
43 constexpr const int SizeIdx = 2;
44 llvm::APSInt Size;
45 Expr *ArgExpr = TheCall->getArg(SizeIdx);
46 [[maybe_unused]] ExprResult R =
47 SemaRef.VerifyIntegerConstantExpression(ArgExpr, &Size);
48 assert(!R.isInvalid());
49 switch (Size.getSExtValue()) {
50 case 1:
51 case 2:
52 case 4:
53 return false;
54 case 12:
55 case 16: {
56 if (HasGFX950Insts)
57 return false;
58 [[fallthrough]];
59 }
60 default:
61 SemaRef.targetDiag(ArgExpr->getExprLoc(),
62 diag::err_amdgcn_load_lds_size_invalid_value)
63 << ArgExpr->getSourceRange();
64 SemaRef.targetDiag(ArgExpr->getExprLoc(),
65 diag::note_amdgcn_load_lds_size_valid_value)
66 << HasGFX950Insts << ArgExpr->getSourceRange();
67 return true;
68 }
69 }
70 case AMDGPU::BI__builtin_amdgcn_get_fpenv:
71 case AMDGPU::BI__builtin_amdgcn_set_fpenv:
72 return false;
73 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
74 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
75 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
76 case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
77 OrderIndex = 2;
78 ScopeIndex = 3;
79 break;
80 case AMDGPU::BI__builtin_amdgcn_fence:
81 OrderIndex = 0;
82 ScopeIndex = 1;
83 break;
84 case AMDGPU::BI__builtin_amdgcn_mov_dpp:
85 return checkMovDPPFunctionCall(TheCall, 5, 1);
86 case AMDGPU::BI__builtin_amdgcn_mov_dpp8:
87 return checkMovDPPFunctionCall(TheCall, 2, 1);
88 case AMDGPU::BI__builtin_amdgcn_update_dpp:
89 return checkMovDPPFunctionCall(TheCall, 6, 2);
90 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f16_fp8:
91 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_bf16_fp8:
92 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f16_bf8:
93 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_bf16_bf8:
94 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f16_fp4:
95 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_bf16_fp4:
96 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f32_fp8:
97 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f32_bf8:
98 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f32_fp4:
99 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk16_f16_fp6:
100 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk16_bf16_fp6:
101 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk16_f16_bf6:
102 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk16_bf16_bf6:
103 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk16_f32_fp6:
104 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk16_f32_bf6:
105 return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 15);
106 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_load_32x4B:
107 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_load_16x8B:
108 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_load_8x16B:
109 return checkCoopAtomicFunctionCall(TheCall, /*IsStore=*/false);
110 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_32x4B:
111 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_16x8B:
112 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_8x16B:
113 return checkCoopAtomicFunctionCall(TheCall, /*IsStore=*/true);
114 default:
115 return false;
116 }
117
118 ExprResult Arg = TheCall->getArg(OrderIndex);
119 auto ArgExpr = Arg.get();
120 Expr::EvalResult ArgResult;
121
122 if (!ArgExpr->EvaluateAsInt(ArgResult, getASTContext()))
123 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int)
124 << ArgExpr->getType();
125 auto Ord = ArgResult.Val.getInt().getZExtValue();
126
127 // Check validity of memory ordering as per C11 / C++11's memory model.
128 // Only fence needs check. Atomic dec/inc allow all memory orders.
129 if (!llvm::isValidAtomicOrderingCABI(Ord))
130 return Diag(ArgExpr->getBeginLoc(),
131 diag::warn_atomic_op_has_invalid_memory_order)
132 << 0 << ArgExpr->getSourceRange();
133 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) {
134 case llvm::AtomicOrderingCABI::relaxed:
135 case llvm::AtomicOrderingCABI::consume:
136 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence)
137 return Diag(ArgExpr->getBeginLoc(),
138 diag::warn_atomic_op_has_invalid_memory_order)
139 << 0 << ArgExpr->getSourceRange();
140 break;
141 case llvm::AtomicOrderingCABI::acquire:
142 case llvm::AtomicOrderingCABI::release:
143 case llvm::AtomicOrderingCABI::acq_rel:
144 case llvm::AtomicOrderingCABI::seq_cst:
145 break;
146 }
147
148 Arg = TheCall->getArg(ScopeIndex);
149 ArgExpr = Arg.get();
150 Expr::EvalResult ArgResult1;
151 // Check that sync scope is a constant literal
152 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, getASTContext()))
153 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal)
154 << ArgExpr->getType();
155
156 return false;
157}
158
160 bool Fail = false;
161
162 // First argument is a global or generic pointer.
163 Expr *PtrArg = TheCall->getArg(0);
164 QualType PtrTy = PtrArg->getType()->getPointeeType();
165 unsigned AS = getASTContext().getTargetAddressSpace(PtrTy.getAddressSpace());
166 if (AS != llvm::AMDGPUAS::FLAT_ADDRESS &&
167 AS != llvm::AMDGPUAS::GLOBAL_ADDRESS) {
168 Fail = true;
169 Diag(TheCall->getBeginLoc(), diag::err_amdgcn_coop_atomic_invalid_as)
170 << PtrArg->getSourceRange();
171 }
172
173 // Check atomic ordering
174 Expr *AtomicOrdArg = TheCall->getArg(IsStore ? 2 : 1);
175 Expr::EvalResult AtomicOrdArgRes;
176 if (!AtomicOrdArg->EvaluateAsInt(AtomicOrdArgRes, getASTContext()))
177 llvm_unreachable("Intrinsic requires imm for atomic ordering argument!");
178 auto Ord =
179 llvm::AtomicOrderingCABI(AtomicOrdArgRes.Val.getInt().getZExtValue());
180
181 // Atomic ordering cannot be acq_rel in any case, acquire for stores or
182 // release for loads.
183 if (!llvm::isValidAtomicOrderingCABI((unsigned)Ord) ||
184 (Ord == llvm::AtomicOrderingCABI::acq_rel) ||
185 Ord == (IsStore ? llvm::AtomicOrderingCABI::acquire
186 : llvm::AtomicOrderingCABI::release)) {
187 return Diag(AtomicOrdArg->getBeginLoc(),
188 diag::warn_atomic_op_has_invalid_memory_order)
189 << 0 << AtomicOrdArg->getSourceRange();
190 }
191
192 // Last argument is a string literal
193 Expr *Arg = TheCall->getArg(TheCall->getNumArgs() - 1);
195 Fail = true;
196 Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
197 << Arg->getSourceRange();
198 }
199
200 return Fail;
201}
202
203bool SemaAMDGPU::checkMovDPPFunctionCall(CallExpr *TheCall, unsigned NumArgs,
204 unsigned NumDataArgs) {
205 assert(NumDataArgs <= 2);
206 if (SemaRef.checkArgCountRange(TheCall, NumArgs, NumArgs))
207 return true;
208 Expr *Args[2];
209 QualType ArgTys[2];
210 for (unsigned I = 0; I != NumDataArgs; ++I) {
211 Args[I] = TheCall->getArg(I);
212 ArgTys[I] = Args[I]->getType();
213 // TODO: Vectors can also be supported.
214 if (!ArgTys[I]->isArithmeticType() || ArgTys[I]->isAnyComplexType()) {
215 SemaRef.Diag(Args[I]->getBeginLoc(),
216 diag::err_typecheck_cond_expect_int_float)
217 << ArgTys[I] << Args[I]->getSourceRange();
218 return true;
219 }
220 }
221 if (NumDataArgs < 2)
222 return false;
223
224 if (getASTContext().hasSameUnqualifiedType(ArgTys[0], ArgTys[1]))
225 return false;
226
227 if (((ArgTys[0]->isUnsignedIntegerType() &&
228 ArgTys[1]->isSignedIntegerType()) ||
229 (ArgTys[0]->isSignedIntegerType() &&
230 ArgTys[1]->isUnsignedIntegerType())) &&
231 getASTContext().getTypeSize(ArgTys[0]) ==
232 getASTContext().getTypeSize(ArgTys[1]))
233 return false;
234
235 SemaRef.Diag(Args[1]->getBeginLoc(),
236 diag::err_typecheck_call_different_arg_types)
237 << ArgTys[0] << ArgTys[1];
238 return true;
239}
240
241static bool
243 const AMDGPUFlatWorkGroupSizeAttr &Attr) {
244 // Accept template arguments for now as they depend on something else.
245 // We'll get to check them when they eventually get instantiated.
246 if (MinExpr->isValueDependent() || MaxExpr->isValueDependent())
247 return false;
248
249 uint32_t Min = 0;
250 if (!S.checkUInt32Argument(Attr, MinExpr, Min, 0))
251 return true;
252
253 uint32_t Max = 0;
254 if (!S.checkUInt32Argument(Attr, MaxExpr, Max, 1))
255 return true;
256
257 if (Min == 0 && Max != 0) {
258 S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
259 << &Attr << 0;
260 return true;
261 }
262 if (Min > Max) {
263 S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
264 << &Attr << 1;
265 return true;
266 }
267
268 return false;
269}
270
271AMDGPUFlatWorkGroupSizeAttr *
273 Expr *MinExpr, Expr *MaxExpr) {
274 ASTContext &Context = getASTContext();
275 AMDGPUFlatWorkGroupSizeAttr TmpAttr(Context, CI, MinExpr, MaxExpr);
276
277 if (checkAMDGPUFlatWorkGroupSizeArguments(SemaRef, MinExpr, MaxExpr, TmpAttr))
278 return nullptr;
279 return ::new (Context)
280 AMDGPUFlatWorkGroupSizeAttr(Context, CI, MinExpr, MaxExpr);
281}
282
284 const AttributeCommonInfo &CI,
285 Expr *MinExpr, Expr *MaxExpr) {
286 if (auto *Attr = CreateAMDGPUFlatWorkGroupSizeAttr(CI, MinExpr, MaxExpr))
287 D->addAttr(Attr);
288}
289
291 const ParsedAttr &AL) {
292 Expr *MinExpr = AL.getArgAsExpr(0);
293 Expr *MaxExpr = AL.getArgAsExpr(1);
294
295 addAMDGPUFlatWorkGroupSizeAttr(D, AL, MinExpr, MaxExpr);
296}
297
298static bool checkAMDGPUWavesPerEUArguments(Sema &S, Expr *MinExpr,
299 Expr *MaxExpr,
300 const AMDGPUWavesPerEUAttr &Attr) {
301 if (S.DiagnoseUnexpandedParameterPack(MinExpr) ||
302 (MaxExpr && S.DiagnoseUnexpandedParameterPack(MaxExpr)))
303 return true;
304
305 // Accept template arguments for now as they depend on something else.
306 // We'll get to check them when they eventually get instantiated.
307 if (MinExpr->isValueDependent() || (MaxExpr && MaxExpr->isValueDependent()))
308 return false;
309
310 uint32_t Min = 0;
311 if (!S.checkUInt32Argument(Attr, MinExpr, Min, 0))
312 return true;
313
314 uint32_t Max = 0;
315 if (MaxExpr && !S.checkUInt32Argument(Attr, MaxExpr, Max, 1))
316 return true;
317
318 if (Min == 0 && Max != 0) {
319 S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
320 << &Attr << 0;
321 return true;
322 }
323 if (Max != 0 && Min > Max) {
324 S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
325 << &Attr << 1;
326 return true;
327 }
328
329 return false;
330}
331
332AMDGPUWavesPerEUAttr *
334 Expr *MinExpr, Expr *MaxExpr) {
335 ASTContext &Context = getASTContext();
336 AMDGPUWavesPerEUAttr TmpAttr(Context, CI, MinExpr, MaxExpr);
337
338 if (checkAMDGPUWavesPerEUArguments(SemaRef, MinExpr, MaxExpr, TmpAttr))
339 return nullptr;
340
341 return ::new (Context) AMDGPUWavesPerEUAttr(Context, CI, MinExpr, MaxExpr);
342}
343
345 Expr *MinExpr, Expr *MaxExpr) {
346 if (auto *Attr = CreateAMDGPUWavesPerEUAttr(CI, MinExpr, MaxExpr))
347 D->addAttr(Attr);
348}
349
352 return;
353
354 Expr *MinExpr = AL.getArgAsExpr(0);
355 Expr *MaxExpr = (AL.getNumArgs() > 1) ? AL.getArgAsExpr(1) : nullptr;
356
357 addAMDGPUWavesPerEUAttr(D, AL, MinExpr, MaxExpr);
358}
359
361 uint32_t NumSGPR = 0;
362 Expr *NumSGPRExpr = AL.getArgAsExpr(0);
363 if (!SemaRef.checkUInt32Argument(AL, NumSGPRExpr, NumSGPR))
364 return;
365
366 D->addAttr(::new (getASTContext())
367 AMDGPUNumSGPRAttr(getASTContext(), AL, NumSGPR));
368}
369
371 uint32_t NumVGPR = 0;
372 Expr *NumVGPRExpr = AL.getArgAsExpr(0);
373 if (!SemaRef.checkUInt32Argument(AL, NumVGPRExpr, NumVGPR))
374 return;
375
376 D->addAttr(::new (getASTContext())
377 AMDGPUNumVGPRAttr(getASTContext(), AL, NumVGPR));
378}
379
380static bool
382 Expr *ZExpr,
383 const AMDGPUMaxNumWorkGroupsAttr &Attr) {
384 if (S.DiagnoseUnexpandedParameterPack(XExpr) ||
385 (YExpr && S.DiagnoseUnexpandedParameterPack(YExpr)) ||
386 (ZExpr && S.DiagnoseUnexpandedParameterPack(ZExpr)))
387 return true;
388
389 // Accept template arguments for now as they depend on something else.
390 // We'll get to check them when they eventually get instantiated.
391 if (XExpr->isValueDependent() || (YExpr && YExpr->isValueDependent()) ||
392 (ZExpr && ZExpr->isValueDependent()))
393 return false;
394
395 uint32_t NumWG = 0;
396 Expr *Exprs[3] = {XExpr, YExpr, ZExpr};
397 for (int i = 0; i < 3; i++) {
398 if (Exprs[i]) {
399 if (!S.checkUInt32Argument(Attr, Exprs[i], NumWG, i,
400 /*StrictlyUnsigned=*/true))
401 return true;
402 if (NumWG == 0) {
403 S.Diag(Attr.getLoc(), diag::err_attribute_argument_is_zero)
404 << &Attr << Exprs[i]->getSourceRange();
405 return true;
406 }
407 }
408 }
409
410 return false;
411}
412
414 const AttributeCommonInfo &CI, Expr *XExpr, Expr *YExpr, Expr *ZExpr) {
415 ASTContext &Context = getASTContext();
416 AMDGPUMaxNumWorkGroupsAttr TmpAttr(Context, CI, XExpr, YExpr, ZExpr);
417
418 if (checkAMDGPUMaxNumWorkGroupsArguments(SemaRef, XExpr, YExpr, ZExpr,
419 TmpAttr))
420 return nullptr;
421
422 return ::new (Context)
423 AMDGPUMaxNumWorkGroupsAttr(Context, CI, XExpr, YExpr, ZExpr);
424}
425
427 const AttributeCommonInfo &CI,
428 Expr *XExpr, Expr *YExpr,
429 Expr *ZExpr) {
430 if (auto *Attr = CreateAMDGPUMaxNumWorkGroupsAttr(CI, XExpr, YExpr, ZExpr))
431 D->addAttr(Attr);
432}
433
435 const ParsedAttr &AL) {
436 Expr *YExpr = (AL.getNumArgs() > 1) ? AL.getArgAsExpr(1) : nullptr;
437 Expr *ZExpr = (AL.getNumArgs() > 2) ? AL.getArgAsExpr(2) : nullptr;
438 addAMDGPUMaxNumWorkGroupsAttr(D, AL, AL.getArgAsExpr(0), YExpr, ZExpr);
439}
440
441} // namespace clang
This file declares semantic analysis functions specific to AMDGPU.
Enumerates target-specific builtins in their own namespaces within namespace clang.
APSInt & getInt()
Definition APValue.h:489
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:188
void getFunctionFeatureMap(llvm::StringMap< bool > &FeatureMap, const FunctionDecl *) const
unsigned getTargetAddressSpace(LangAS AS) const
PtrTy get() const
Definition Ownership.h:171
bool isInvalid() const
Definition Ownership.h:167
Attr - This represents one attribute.
Definition Attr.h:44
SourceLocation getLocation() const
Definition Attr.h:97
SourceLocation getLoc() const
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2877
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3081
SourceLocation getBeginLoc() const
Definition Expr.h:3211
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3068
Decl - This represents one declaration (or definition), e.g.
Definition DeclBase.h:86
void addAttr(Attr *A)
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool isValueDependent() const
Determines whether the value of this expression depends on.
Definition Expr.h:177
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition Expr.cpp:3085
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
ParsedAttr - Represents a syntactic attribute.
Definition ParsedAttr.h:119
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this attribute.
Definition ParsedAttr.h:371
Expr * getArgAsExpr(unsigned Arg) const
Definition ParsedAttr.h:383
bool checkAtLeastNumArgs(class Sema &S, unsigned Num) const
Check if the attribute has at least as many args as Num.
bool checkAtMostNumArgs(class Sema &S, unsigned Num) const
Check if the attribute has at most as many args as Num.
A (possibly-)qualified type.
Definition TypeBase.h:937
void handleAMDGPUMaxNumWorkGroupsAttr(Decl *D, const ParsedAttr &AL)
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max)
addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size attribute to a particular declar...
bool checkCoopAtomicFunctionCall(CallExpr *TheCall, bool IsStore)
void handleAMDGPUFlatWorkGroupSizeAttr(Decl *D, const ParsedAttr &AL)
void handleAMDGPUNumSGPRAttr(Decl *D, const ParsedAttr &AL)
AMDGPUMaxNumWorkGroupsAttr * CreateAMDGPUMaxNumWorkGroupsAttr(const AttributeCommonInfo &CI, Expr *XExpr, Expr *YExpr, Expr *ZExpr)
Create an AMDGPUMaxNumWorkGroupsAttr attribute.
AMDGPUWavesPerEUAttr * CreateAMDGPUWavesPerEUAttr(const AttributeCommonInfo &CI, Expr *Min, Expr *Max)
Create an AMDGPUWavesPerEUAttr attribute.
void handleAMDGPUNumVGPRAttr(Decl *D, const ParsedAttr &AL)
AMDGPUFlatWorkGroupSizeAttr * CreateAMDGPUFlatWorkGroupSizeAttr(const AttributeCommonInfo &CI, Expr *Min, Expr *Max)
Create an AMDGPUWavesPerEUAttr attribute.
bool checkMovDPPFunctionCall(CallExpr *TheCall, unsigned NumArgs, unsigned NumDataArgs)
void handleAMDGPUWavesPerEUAttr(Decl *D, const ParsedAttr &AL)
bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall)
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max)
addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a particular declaration.
void addAMDGPUMaxNumWorkGroupsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *XExpr, Expr *YExpr, Expr *ZExpr)
addAMDGPUMaxNumWorkGroupsAttr - Adds an amdgpu_max_num_work_groups attribute to a particular declarat...
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID, bool DeferHint=false)
Emit a diagnostic.
Definition SemaBase.cpp:61
SemaBase(Sema &S)
Definition SemaBase.cpp:7
ASTContext & getASTContext() const
Definition SemaBase.cpp:9
Sema & SemaRef
Definition SemaBase.h:40
Sema - This implements semantic analysis and AST building for C.
Definition Sema.h:853
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC)
If the given type contains an unexpanded parameter pack, diagnose the error.
bool checkUInt32Argument(const AttrInfo &AI, const Expr *Expr, uint32_t &Val, unsigned Idx=UINT_MAX, bool StrictlyUnsigned=false)
If Expr is a valid integer constant, get the value of the integer expression and return success or fa...
Definition Sema.h:4826
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:334
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:346
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool evaluateRequiredTargetFeatures(llvm::StringRef RequiredFatures, const llvm::StringMap< bool > &TargetFetureMap)
Returns true if the required target features of a builtin function are enabled.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
static bool checkAMDGPUMaxNumWorkGroupsArguments(Sema &S, Expr *XExpr, Expr *YExpr, Expr *ZExpr, const AMDGPUMaxNumWorkGroupsAttr &Attr)
static bool checkAMDGPUFlatWorkGroupSizeArguments(Sema &S, Expr *MinExpr, Expr *MaxExpr, const AMDGPUFlatWorkGroupSizeAttr &Attr)
ActionResult< Expr * > ExprResult
Definition Ownership.h:249
static bool checkAMDGPUWavesPerEUArguments(Sema &S, Expr *MinExpr, Expr *MaxExpr, const AMDGPUWavesPerEUAttr &Attr)
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647