aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2023-12-18 20:30:12 +0000
committerDimitry Andric <dim@FreeBSD.org>2024-04-06 20:11:55 +0000
commit5f757f3ff9144b609b3c433dfd370cc6bdc191ad (patch)
tree1b4e980b866cd26a00af34c0a653eb640bd09caf /contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
parent3e1c8a35f741a5d114d0ba670b15191355711fe9 (diff)
parent312c0ed19cc5276a17bacf2120097bec4515b0f1 (diff)
Diffstat (limited to 'contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp')
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp89
1 files changed, 46 insertions, 43 deletions
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
index 7ef893cb1a2d..2199d7b58fb9 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -495,12 +495,12 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
if (CurFnInfo->getMaxVectorWidth() > LargestVectorWidth)
LargestVectorWidth = CurFnInfo->getMaxVectorWidth();
- // Add the required-vector-width attribute. This contains the max width from:
+ // Add the min-legal-vector-width attribute. This contains the max width from:
// 1. min-vector-width attribute used in the source program.
// 2. Any builtins used that have a vector width specified.
// 3. Values passed in and out of inline assembly.
// 4. Width of vector arguments and return types for this function.
- // 5. Width of vector aguments and return types for functions called by this
+ // 5. Width of vector arguments and return types for functions called by this
// function.
if (getContext().getTargetInfo().getTriple().isX86())
CurFn->addFnAttr("min-legal-vector-width",
@@ -576,7 +576,7 @@ CodeGenFunction::getUBSanFunctionTypeHash(QualType Ty) const {
Ty = getContext().getFunctionTypeWithExceptionSpec(Ty, EST_None);
std::string Mangled;
llvm::raw_string_ostream Out(Mangled);
- CGM.getCXXABI().getMangleContext().mangleTypeName(Ty, Out, false);
+ CGM.getCXXABI().getMangleContext().mangleCanonicalTypeName(Ty, Out, false);
return llvm::ConstantInt::get(
CGM.Int32Ty, static_cast<uint32_t>(llvm::xxh3_64bits(Mangled)));
}
@@ -1121,11 +1121,9 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
Address(&*AI, ConvertType(RetTy),
CurFnInfo->getReturnInfo().getIndirectAlign(), KnownNonNull);
if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
- ReturnValuePointer =
- CreateDefaultAlignTempAlloca(Int8PtrTy, "result.ptr");
- Builder.CreateStore(Builder.CreatePointerBitCastOrAddrSpaceCast(
- ReturnValue.getPointer(), Int8PtrTy),
- ReturnValuePointer);
+ ReturnValuePointer = CreateDefaultAlignTempAlloca(
+ ReturnValue.getPointer()->getType(), "result.ptr");
+ Builder.CreateStore(ReturnValue.getPointer(), ReturnValuePointer);
}
} else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
!hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
@@ -1167,12 +1165,13 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
EmitFunctionProlog(*CurFnInfo, CurFn, Args);
- if (isa_and_nonnull<CXXMethodDecl>(D) &&
- cast<CXXMethodDecl>(D)->isInstance()) {
- CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
- if (MD->getParent()->isLambda() &&
- MD->getOverloadedOperator() == OO_Call) {
+ if (const CXXMethodDecl *MD = dyn_cast_if_present<CXXMethodDecl>(D);
+ MD && !MD->isStatic()) {
+ bool IsInLambda =
+ MD->getParent()->isLambda() && MD->getOverloadedOperator() == OO_Call;
+ if (MD->isImplicitObjectMemberFunction())
+ CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
+ if (IsInLambda) {
// We're in a lambda; figure out the captures.
MD->getParent()->getCaptureFields(LambdaCaptureFields,
LambdaThisCaptureField);
@@ -1202,7 +1201,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
VLASizeMap[VAT->getSizeExpr()] = ExprArg;
}
}
- } else {
+ } else if (MD->isImplicitObjectMemberFunction()) {
// Not in a lambda; just use 'this' from the method.
// FIXME: Should we generate a new load for each use of 'this'? The
// fast register allocator would be happier...
@@ -1215,11 +1214,10 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
SkippedChecks.set(SanitizerKind::ObjectSize, true);
QualType ThisTy = MD->getThisType();
- // If this is the call operator of a lambda with no capture-default, it
+ // If this is the call operator of a lambda with no captures, it
// may have a static invoker function, which may call this operator with
// a null 'this' pointer.
- if (isLambdaCallOperator(MD) &&
- MD->getParent()->getLambdaCaptureDefault() == LCD_None)
+ if (isLambdaCallOperator(MD) && MD->getParent()->isCapturelessLambda())
SkippedChecks.set(SanitizerKind::Null, true);
EmitTypeCheck(
@@ -1262,11 +1260,6 @@ void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
EmitCompoundStmtWithoutScope(*S);
else
EmitStmt(Body);
-
- // This is checked after emitting the function body so we know if there
- // are any permitted infinite loops.
- if (checkIfFunctionMustProgress())
- CurFn->addFnAttr(llvm::Attribute::MustProgress);
}
/// When instrumenting to collect profile data, the counts for some blocks
@@ -1313,7 +1306,7 @@ QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
QualType ResTy = FD->getReturnType();
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
- if (MD && MD->isInstance()) {
+ if (MD && MD->isImplicitObjectMemberFunction()) {
if (CGM.getCXXABI().HasThisReturn(GD))
ResTy = MD->getThisType();
else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
@@ -1338,7 +1331,7 @@ QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
auto *Implicit = ImplicitParamDecl::Create(
getContext(), Param->getDeclContext(), Param->getLocation(),
- /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other);
+ /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamKind::Other);
SizeArguments[Param] = Implicit;
Args.push_back(Implicit);
}
@@ -1445,6 +1438,11 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
if (Body && isa_and_nonnull<CoroutineBodyStmt>(Body))
llvm::append_range(FnArgs, FD->parameters());
+ // Ensure that the function adheres to the forward progress guarantee, which
+ // is required by certain optimizations.
+ if (checkIfFunctionMustProgress())
+ CurFn->addFnAttr(llvm::Attribute::MustProgress);
+
// Generate the body of the function.
PGO.assignRegionCounters(GD, CurFn);
if (isa<CXXDestructorDecl>(FD))
@@ -2049,8 +2047,7 @@ CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
NullConstant, Twine());
CharUnits NullAlign = DestPtr.getAlignment();
NullVariable->setAlignment(NullAlign.getAsAlign());
- Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
- Builder.getInt8Ty(), NullAlign);
+ Address SrcPtr(NullVariable, Builder.getInt8Ty(), NullAlign);
if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
@@ -2489,10 +2486,8 @@ llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
const AnnotateAttr *Attr) {
SmallVector<llvm::Value *, 5> Args = {
AnnotatedVal,
- Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr),
- ConstGlobalsPtrTy),
- Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location),
- ConstGlobalsPtrTy),
+ CGM.EmitAnnotationString(AnnotationStr),
+ CGM.EmitAnnotationUnit(Location),
CGM.EmitAnnotationLineNo(Location),
};
if (Attr)
@@ -2502,15 +2497,10 @@ llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
- // FIXME We create a new bitcast for every annotation because that's what
- // llvm-gcc was doing.
- unsigned AS = V->getType()->getPointerAddressSpace();
- llvm::Type *I8PtrTy = Builder.getInt8PtrTy(AS);
for (const auto *I : D->specific_attrs<AnnotateAttr>())
EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation,
- {I8PtrTy, CGM.ConstGlobalsPtrTy}),
- Builder.CreateBitCast(V, I8PtrTy, V->getName()),
- I->getAnnotation(), D->getLocation(), I);
+ {V->getType(), CGM.ConstGlobalsPtrTy}),
+ V, I->getAnnotation(), D->getLocation(), I);
}
Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
@@ -2595,10 +2585,15 @@ void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
std::string MissingFeature;
llvm::StringMap<bool> CallerFeatureMap;
CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD);
+ // When compiling in HipStdPar mode we have to be conservative in rejecting
+ // target specific features in the FE, and defer the possible error to the
+ // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
+ // referenced by an accelerator executable function, we emit an error.
+ bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
if (BuiltinID) {
StringRef FeatureList(CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID));
if (!Builtin::evaluateRequiredTargetFeatures(
- FeatureList, CallerFeatureMap)) {
+ FeatureList, CallerFeatureMap) && !IsHipStdPar) {
CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
<< TargetDecl->getDeclName()
<< FeatureList;
@@ -2631,7 +2626,7 @@ void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
return false;
}
return true;
- }))
+ }) && !IsHipStdPar)
CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
<< FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
} else if (!FD->isMultiVersion() && FD->hasAttr<TargetAttr>()) {
@@ -2640,7 +2635,8 @@ void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
for (const auto &F : CalleeFeatureMap) {
if (F.getValue() && (!CallerFeatureMap.lookup(F.getKey()) ||
- !CallerFeatureMap.find(F.getKey())->getValue()))
+ !CallerFeatureMap.find(F.getKey())->getValue()) &&
+ !IsHipStdPar)
CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
<< FD->getDeclName() << TargetDecl->getDeclName() << F.getKey();
}
@@ -2682,8 +2678,15 @@ llvm::Value *CodeGenFunction::FormX86ResolverCondition(
const MultiVersionResolverOption &RO) {
llvm::Value *Condition = nullptr;
- if (!RO.Conditions.Architecture.empty())
- Condition = EmitX86CpuIs(RO.Conditions.Architecture);
+ if (!RO.Conditions.Architecture.empty()) {
+ StringRef Arch = RO.Conditions.Architecture;
+ // If arch= specifies an x86-64 micro-architecture level, test the feature
+ // with __builtin_cpu_supports, otherwise use __builtin_cpu_is.
+ if (Arch.starts_with("x86-64"))
+ Condition = EmitX86CpuSupports({Arch});
+ else
+ Condition = EmitX86CpuIs(Arch);
+ }
if (!RO.Conditions.Features.empty()) {
llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features);