aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/CodeGen/CGExprScalar.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2023-07-26 19:03:47 +0000
committerDimitry Andric <dim@FreeBSD.org>2023-07-26 19:04:23 +0000
commit7fa27ce4a07f19b07799a767fc29416f3b625afb (patch)
tree27825c83636c4de341eb09a74f49f5d38a15d165 /clang/lib/CodeGen/CGExprScalar.cpp
parente3b557809604d036af6e00c60f012c2025b59a5e (diff)
Diffstat (limited to 'clang/lib/CodeGen/CGExprScalar.cpp')
-rw-r--r--clang/lib/CodeGen/CGExprScalar.cpp150
1 files changed, 96 insertions, 54 deletions
diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp
index a0dcb978b1ac..fe1a59b21f38 100644
--- a/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/clang/lib/CodeGen/CGExprScalar.cpp
@@ -814,13 +814,21 @@ public:
Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
QualType getPromotionType(QualType Ty) {
+ const auto &Ctx = CGF.getContext();
if (auto *CT = Ty->getAs<ComplexType>()) {
QualType ElementType = CT->getElementType();
- if (ElementType.UseExcessPrecision(CGF.getContext()))
- return CGF.getContext().getComplexType(CGF.getContext().FloatTy);
+ if (ElementType.UseExcessPrecision(Ctx))
+ return Ctx.getComplexType(Ctx.FloatTy);
}
- if (Ty.UseExcessPrecision(CGF.getContext()))
- return CGF.getContext().FloatTy;
+
+ if (Ty.UseExcessPrecision(Ctx)) {
+ if (auto *VT = Ty->getAs<VectorType>()) {
+ unsigned NumElements = VT->getNumElements();
+ return Ctx.getVectorType(Ctx.FloatTy, NumElements, VT->getVectorKind());
+ }
+ return Ctx.FloatTy;
+ }
+
return QualType();
}
@@ -1861,6 +1869,23 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
return Visit(E->getInit(0));
}
+ if (isa<llvm::ScalableVectorType>(VType)) {
+ if (NumInitElements == 0) {
+ // C++11 value-initialization for the vector.
+ return EmitNullValue(E->getType());
+ }
+
+ if (NumInitElements == 1) {
+ Expr *InitVector = E->getInit(0);
+
+ // Initialize from another scalable vector of the same type.
+ if (InitVector->getType() == E->getType())
+ return Visit(InitVector);
+ }
+
+ llvm_unreachable("Unexpected initialization of a scalable vector!");
+ }
+
unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
// Loop over initializers collecting the Value for each, and remembering
@@ -2038,15 +2063,15 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_LValueBitCast:
case CK_ObjCObjectLValueCast: {
Address Addr = EmitLValue(E).getAddress(CGF);
- Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
+ Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
return EmitLoadOfLValue(LV, CE->getExprLoc());
}
case CK_LValueToRValueBitCast: {
LValue SourceLVal = CGF.EmitLValue(E);
- Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(CGF),
- CGF.ConvertTypeForMem(DestTy));
+ Address Addr = SourceLVal.getAddress(CGF).withElementType(
+ CGF.ConvertTypeForMem(DestTy));
LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
return EmitLoadOfLValue(DestLV, CE->getExprLoc());
@@ -2098,7 +2123,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
// Update heapallocsite metadata when there is an explicit pointer cast.
if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
- if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE)) {
+ if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE) &&
+ !isa<CastExpr>(E)) {
QualType PointeeType = DestTy->getPointeeType();
if (!PointeeType.isNull())
CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
@@ -2126,7 +2152,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
llvm::Value *UndefVec = llvm::UndefValue::get(DstTy);
llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
llvm::Value *Result = Builder.CreateInsertVector(
- DstTy, UndefVec, Src, Zero, "castScalableSve");
+ DstTy, UndefVec, Src, Zero, "cast.scalable");
if (NeedsBitCast)
Result = Builder.CreateBitCast(Result, OrigType);
return Result;
@@ -2150,7 +2176,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
}
if (ScalableSrc->getElementType() == FixedDst->getElementType()) {
llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
- return Builder.CreateExtractVector(DstTy, Src, Zero, "castFixedSve");
+ return Builder.CreateExtractVector(DstTy, Src, Zero, "cast.fixed");
}
}
}
@@ -2168,8 +2194,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value");
LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
CGF.EmitStoreOfScalar(Src, LV);
- Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy),
- "castFixedSve");
+ Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
return EmitLoadOfLValue(DestLV, CE->getExprLoc());
@@ -2681,15 +2706,13 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
} else if (type->isFunctionType()) {
llvm::Value *amt = Builder.getInt32(amount);
- value = CGF.EmitCastToVoidPtr(value);
if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr");
else
- value = CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt,
- /*SignedIndices=*/false,
- isSubtraction, E->getExprLoc(),
- "incdec.funcptr");
- value = Builder.CreateBitCast(value, input->getType());
+ value =
+ CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt,
+ /*SignedIndices=*/false, isSubtraction,
+ E->getExprLoc(), "incdec.funcptr");
// For everything else, we can just do a simple increment.
} else {
@@ -2800,7 +2823,6 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// Objective-C pointer types.
} else {
const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
- value = CGF.EmitCastToVoidPtr(value);
CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
if (!isInc) size = -size;
@@ -3456,21 +3478,7 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
llvm::Value *Val;
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
- if ((CGF.getLangOpts().OpenCL &&
- !CGF.CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
- (CGF.getLangOpts().HIP && CGF.getLangOpts().CUDAIsDevice &&
- !CGF.CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
- // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
- // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
- // build option allows an application to specify that single precision
- // floating-point divide (x/y and 1/x) and sqrt used in the program
- // source are correctly rounded.
- llvm::Type *ValTy = Val->getType();
- if (ValTy->isFloatTy() ||
- (isa<llvm::VectorType>(ValTy) &&
- cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy()))
- CGF.SetFPAccuracy(Val, 2.5);
- }
+ CGF.SetDivFPAccuracy(Val);
return Val;
}
else if (Ops.isFixedPointOp())
@@ -3711,11 +3719,8 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
// Explicitly handle GNU void* and function pointer arithmetic extensions. The
// GNU void* casts amount to no-ops since our void* type is i8*, but this is
// future proof.
- if (elementType->isVoidType() || elementType->isFunctionType()) {
- Value *result = CGF.EmitCastToVoidPtr(pointer);
- result = CGF.Builder.CreateGEP(CGF.Int8Ty, result, index, "add.ptr");
- return CGF.Builder.CreateBitCast(result, pointer->getType());
- }
+ if (elementType->isVoidType() || elementType->isFunctionType())
+ return CGF.Builder.CreateGEP(CGF.Int8Ty, pointer, index, "add.ptr");
llvm::Type *elemTy = CGF.ConvertTypeForMem(elementType);
if (CGF.getLangOpts().isSignedOverflowDefined())
@@ -3734,8 +3739,6 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
const CodeGenFunction &CGF, CGBuilderTy &Builder,
bool negMul, bool negAdd) {
- assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.");
-
Value *MulOp0 = MulOp->getOperand(0);
Value *MulOp1 = MulOp->getOperand(1);
if (negMul)
@@ -3780,31 +3783,70 @@ static Value* tryEmitFMulAdd(const BinOpInfo &op,
if (!op.FPFeatures.allowFPContractWithinStatement())
return nullptr;
+ Value *LHS = op.LHS;
+ Value *RHS = op.RHS;
+
+ // Peek through fneg to look for fmul. Make sure fneg has no users, and that
+ // it is the only use of its operand.
+ bool NegLHS = false;
+ if (auto *LHSUnOp = dyn_cast<llvm::UnaryOperator>(LHS)) {
+ if (LHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
+ LHSUnOp->use_empty() && LHSUnOp->getOperand(0)->hasOneUse()) {
+ LHS = LHSUnOp->getOperand(0);
+ NegLHS = true;
+ }
+ }
+
+ bool NegRHS = false;
+ if (auto *RHSUnOp = dyn_cast<llvm::UnaryOperator>(RHS)) {
+ if (RHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
+ RHSUnOp->use_empty() && RHSUnOp->getOperand(0)->hasOneUse()) {
+ RHS = RHSUnOp->getOperand(0);
+ NegRHS = true;
+ }
+ }
+
// We have a potentially fusable op. Look for a mul on one of the operands.
// Also, make sure that the mul result isn't used directly. In that case,
// there's no point creating a muladd operation.
- if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) {
+ if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(LHS)) {
if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
- LHSBinOp->use_empty())
- return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
+ (LHSBinOp->use_empty() || NegLHS)) {
+ // If we looked through fneg, erase it.
+ if (NegLHS)
+ cast<llvm::Instruction>(op.LHS)->eraseFromParent();
+ return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
+ }
}
- if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) {
+ if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(RHS)) {
if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
- RHSBinOp->use_empty())
- return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
+ (RHSBinOp->use_empty() || NegRHS)) {
+ // If we looked through fneg, erase it.
+ if (NegRHS)
+ cast<llvm::Instruction>(op.RHS)->eraseFromParent();
+ return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
+ }
}
- if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(op.LHS)) {
+ if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(LHS)) {
if (LHSBinOp->getIntrinsicID() ==
llvm::Intrinsic::experimental_constrained_fmul &&
- LHSBinOp->use_empty())
- return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
+ (LHSBinOp->use_empty() || NegLHS)) {
+ // If we looked through fneg, erase it.
+ if (NegLHS)
+ cast<llvm::Instruction>(op.LHS)->eraseFromParent();
+ return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
+ }
}
- if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(op.RHS)) {
+ if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(RHS)) {
if (RHSBinOp->getIntrinsicID() ==
llvm::Intrinsic::experimental_constrained_fmul &&
- RHSBinOp->use_empty())
- return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
+ (RHSBinOp->use_empty() || NegRHS)) {
+ // If we looked through fneg, erase it.
+ if (NegRHS)
+ cast<llvm::Instruction>(op.RHS)->eraseFromParent();
+ return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
+ }
}
return nullptr;
@@ -5098,7 +5140,7 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
}
// Cast the address to Class*.
- Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType()));
+ Addr = Addr.withElementType(ConvertType(E->getType()));
return MakeAddrLValue(Addr, E->getType());
}