aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/CodeGen/CGExprScalar.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2023-12-09 13:28:42 +0000
committerDimitry Andric <dim@FreeBSD.org>2023-12-09 13:28:42 +0000
commitb1c73532ee8997fe5dfbeb7d223027bdf99758a0 (patch)
tree7d6e51c294ab6719475d660217aa0c0ad0526292 /clang/lib/CodeGen/CGExprScalar.cpp
parent7fa27ce4a07f19b07799a767fc29416f3b625afb (diff)
Diffstat (limited to 'clang/lib/CodeGen/CGExprScalar.cpp')
-rw-r--r--clang/lib/CodeGen/CGExprScalar.cpp80
1 files changed, 44 insertions, 36 deletions
diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp
index fe1a59b21f38..41ad2ddac30d 100644
--- a/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/clang/lib/CodeGen/CGExprScalar.cpp
@@ -1798,7 +1798,7 @@ Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
// careful, because the base of a vector subscript is occasionally an rvalue,
// so we can't get it as an lvalue.
if (!E->getBase()->getType()->isVectorType() &&
- !E->getBase()->getType()->isVLSTBuiltinType())
+ !E->getBase()->getType()->isSveVLSBuiltinType())
return EmitLoadOfLValue(E);
// Handle the vector case. The base must be a vector, the index must be an
@@ -2084,11 +2084,10 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
Value *Src = Visit(const_cast<Expr*>(E));
llvm::Type *SrcTy = Src->getType();
llvm::Type *DstTy = ConvertType(DestTy);
- if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
- SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
- llvm_unreachable("wrong cast for pointers in different address spaces"
- "(must be an address space cast)!");
- }
+ assert(
+ (!SrcTy->isPtrOrPtrVectorTy() || !DstTy->isPtrOrPtrVectorTy() ||
+ SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace()) &&
+ "Address-space cast must be used to convert address spaces");
if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
if (auto *PT = DestTy->getAs<PointerType>()) {
@@ -2225,16 +2224,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
return Visit(const_cast<Expr*>(E));
case CK_NoOp: {
- llvm::Value *V = Visit(const_cast<Expr *>(E));
- if (V) {
- // CK_NoOp can model a pointer qualification conversion, which can remove
- // an array bound and change the IR type.
- // FIXME: Once pointee types are removed from IR, remove this.
- llvm::Type *T = ConvertType(DestTy);
- if (T != V->getType())
- V = Builder.CreateBitCast(V, T);
- }
- return V;
+ return CE->changesVolatileQualification() ? EmitLoadOfLValue(CE)
+ : Visit(const_cast<Expr *>(E));
}
case CK_BaseToDerived: {
@@ -2580,7 +2571,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// For atomic bool increment, we just store true and return it for
// preincrement, do an atomic swap with true for postincrement
return Builder.CreateAtomicRMW(
- llvm::AtomicRMWInst::Xchg, LV.getPointer(CGF), True,
+ llvm::AtomicRMWInst::Xchg, LV.getAddress(CGF), True,
llvm::AtomicOrdering::SequentiallyConsistent);
}
// Special case for atomic increment / decrement on integers, emit
@@ -2598,7 +2589,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
llvm::Value *amt = CGF.EmitToMemory(
llvm::ConstantInt::get(ConvertType(type), 1, true), type);
llvm::Value *old =
- Builder.CreateAtomicRMW(aop, LV.getPointer(CGF), amt,
+ Builder.CreateAtomicRMW(aop, LV.getAddress(CGF), amt,
llvm::AtomicOrdering::SequentiallyConsistent);
return isPre ? Builder.CreateBinOp(op, old, amt) : old;
}
@@ -2764,8 +2755,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
amt = llvm::ConstantFP::get(VMContext,
llvm::APFloat(static_cast<double>(amount)));
else {
- // Remaining types are Half, LongDouble, __ibm128 or __float128. Convert
- // from float.
+ // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128.
+ // Convert from float.
llvm::APFloat F(static_cast<float>(amount));
bool ignored;
const llvm::fltSemantics *FS;
@@ -2775,6 +2766,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
FS = &CGF.getTarget().getFloat128Format();
else if (value->getType()->isHalfTy())
FS = &CGF.getTarget().getHalfFormat();
+ else if (value->getType()->isBFloatTy())
+ FS = &CGF.getTarget().getBFloat16Format();
else if (value->getType()->isPPC_FP128Ty())
FS = &CGF.getTarget().getIbm128Format();
else
@@ -2928,7 +2921,7 @@ Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
// Perform vector logical not on comparison with zero vector.
if (E->getType()->isVectorType() &&
E->getType()->castAs<VectorType>()->getVectorKind() ==
- VectorType::GenericVector) {
+ VectorKind::Generic) {
Value *Oper = Visit(E->getSubExpr());
Value *Zero = llvm::Constant::getNullValue(Oper->getType());
Value *Result;
@@ -3050,9 +3043,10 @@ Value *
ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
const UnaryExprOrTypeTraitExpr *E) {
QualType TypeToSize = E->getTypeOfArgument();
- if (E->getKind() == UETT_SizeOf) {
+ if (auto Kind = E->getKind();
+ Kind == UETT_SizeOf || Kind == UETT_DataSizeOf) {
if (const VariableArrayType *VAT =
- CGF.getContext().getAsVariableArrayType(TypeToSize)) {
+ CGF.getContext().getAsVariableArrayType(TypeToSize)) {
if (E->isArgumentType()) {
// sizeof(type) - make sure to emit the VLA size.
CGF.EmitVariablyModifiedType(TypeToSize);
@@ -3079,6 +3073,9 @@ ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
E->getTypeOfArgument()->getPointeeType()))
.getQuantity();
return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
+ } else if (E->getKind() == UETT_VectorElements) {
+ auto *VecTy = cast<llvm::VectorType>(ConvertType(E->getTypeOfArgument()));
+ return Builder.CreateElementCount(CGF.SizeTy, VecTy->getElementCount());
}
// If this isn't sizeof(vla), the result must be constant; use the constant
@@ -3317,7 +3314,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
E->getExprLoc()),
LHSTy);
Value *OldVal = Builder.CreateAtomicRMW(
- AtomicOp, LHSLV.getPointer(CGF), Amt,
+ AtomicOp, LHSLV.getAddress(CGF), Amt,
llvm::AtomicOrdering::SequentiallyConsistent);
// Since operation is atomic, the result type is guaranteed to be the
@@ -3688,8 +3685,8 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
index = CGF.Builder.CreateMul(index, objectSize);
- Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
- result = CGF.Builder.CreateGEP(CGF.Int8Ty, result, index, "add.ptr");
+ Value *result =
+ CGF.Builder.CreateGEP(CGF.Int8Ty, pointer, index, "add.ptr");
return CGF.Builder.CreateBitCast(result, pointer->getType());
}
@@ -3719,10 +3716,12 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
// Explicitly handle GNU void* and function pointer arithmetic extensions. The
// GNU void* casts amount to no-ops since our void* type is i8*, but this is
// future proof.
+ llvm::Type *elemTy;
if (elementType->isVoidType() || elementType->isFunctionType())
- return CGF.Builder.CreateGEP(CGF.Int8Ty, pointer, index, "add.ptr");
+ elemTy = CGF.Int8Ty;
+ else
+ elemTy = CGF.ConvertTypeForMem(elementType);
- llvm::Type *elemTy = CGF.ConvertTypeForMem(elementType);
if (CGF.getLangOpts().isSignedOverflowDefined())
return CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
@@ -3872,6 +3871,14 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
}
}
+ // For vector and matrix adds, try to fold into a fmuladd.
+ if (op.LHS->getType()->isFPOrFPVectorTy()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
+ // Try to form an fmuladd.
+ if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
+ return FMulAdd;
+ }
+
if (op.Ty->isConstantMatrixType()) {
llvm::MatrixBuilder MB(Builder);
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
@@ -3885,10 +3892,6 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
if (op.LHS->getType()->isFPOrFPVectorTy()) {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
- // Try to form an fmuladd.
- if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
- return FMulAdd;
-
return Builder.CreateFAdd(op.LHS, op.RHS, "add");
}
@@ -4022,6 +4025,14 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
}
}
+ // For vector and matrix subs, try to fold into a fmuladd.
+ if (op.LHS->getType()->isFPOrFPVectorTy()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
+ // Try to form an fmuladd.
+ if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
+ return FMulAdd;
+ }
+
if (op.Ty->isConstantMatrixType()) {
llvm::MatrixBuilder MB(Builder);
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
@@ -4035,9 +4046,6 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
if (op.LHS->getType()->isFPOrFPVectorTy()) {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
- // Try to form an fmuladd.
- if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
- return FMulAdd;
return Builder.CreateFSub(op.LHS, op.RHS, "sub");
}
@@ -4856,7 +4864,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
}
if (condExpr->getType()->isVectorType() ||
- condExpr->getType()->isVLSTBuiltinType()) {
+ condExpr->getType()->isSveVLSBuiltinType()) {
CGF.incrementProfileCounter(E);
llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);