aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm/lib/Analysis/ConstantFolding.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Analysis/ConstantFolding.cpp')
-rw-r--r--contrib/llvm/lib/Analysis/ConstantFolding.cpp149
1 files changed, 89 insertions, 60 deletions
diff --git a/contrib/llvm/lib/Analysis/ConstantFolding.cpp b/contrib/llvm/lib/Analysis/ConstantFolding.cpp
index e88b8f14d54e..c5281c57bc19 100644
--- a/contrib/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/contrib/llvm/lib/Analysis/ConstantFolding.cpp
@@ -286,7 +286,7 @@ bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
APInt &Offset, const DataLayout &DL) {
// Trivial case, constant is the global.
if ((GV = dyn_cast<GlobalValue>(C))) {
- unsigned BitWidth = DL.getPointerTypeSizeInBits(GV->getType());
+ unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
Offset = APInt(BitWidth, 0);
return true;
}
@@ -305,7 +305,7 @@ bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
if (!GEP)
return false;
- unsigned BitWidth = DL.getPointerTypeSizeInBits(GEP->getType());
+ unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
APInt TmpOffset(BitWidth, 0);
// If the base isn't a global+constant, we aren't either.
@@ -320,6 +320,41 @@ bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
return true;
}
+Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
+ const DataLayout &DL) {
+ do {
+ Type *SrcTy = C->getType();
+
+ // If the type sizes are the same and a cast is legal, just directly
+ // cast the constant.
+ if (DL.getTypeSizeInBits(DestTy) == DL.getTypeSizeInBits(SrcTy)) {
+ Instruction::CastOps Cast = Instruction::BitCast;
+ // If we are going from a pointer to int or vice versa, we spell the cast
+ // differently.
+ if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
+ Cast = Instruction::IntToPtr;
+ else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
+ Cast = Instruction::PtrToInt;
+
+ if (CastInst::castIsValid(Cast, C, DestTy))
+ return ConstantExpr::getCast(Cast, C, DestTy);
+ }
+
+ // If this isn't an aggregate type, there is nothing we can do to drill down
+ // and find a bitcastable constant.
+ if (!SrcTy->isAggregateType())
+ return nullptr;
+
+ // We're simulating a load through a pointer that was bitcast to point to
+ // a different type, so we can try to walk down through the initial
+ // elements of an aggregate to see if some part of th e aggregate is
+ // castable to implement the "load" semantic model.
+ C = C->getAggregateElement(0u);
+ } while (C);
+
+ return nullptr;
+}
+
namespace {
/// Recursive helper to read bits out of global. C is the constant being copied
@@ -537,8 +572,8 @@ Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy,
return ConstantInt::get(IntType->getContext(), ResultVal);
}
-Constant *ConstantFoldLoadThroughBitcast(ConstantExpr *CE, Type *DestTy,
- const DataLayout &DL) {
+Constant *ConstantFoldLoadThroughBitcastExpr(ConstantExpr *CE, Type *DestTy,
+ const DataLayout &DL) {
auto *SrcPtr = CE->getOperand(0);
auto *SrcPtrTy = dyn_cast<PointerType>(SrcPtr->getType());
if (!SrcPtrTy)
@@ -549,37 +584,7 @@ Constant *ConstantFoldLoadThroughBitcast(ConstantExpr *CE, Type *DestTy,
if (!C)
return nullptr;
- do {
- Type *SrcTy = C->getType();
-
- // If the type sizes are the same and a cast is legal, just directly
- // cast the constant.
- if (DL.getTypeSizeInBits(DestTy) == DL.getTypeSizeInBits(SrcTy)) {
- Instruction::CastOps Cast = Instruction::BitCast;
- // If we are going from a pointer to int or vice versa, we spell the cast
- // differently.
- if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
- Cast = Instruction::IntToPtr;
- else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
- Cast = Instruction::PtrToInt;
-
- if (CastInst::castIsValid(Cast, C, DestTy))
- return ConstantExpr::getCast(Cast, C, DestTy);
- }
-
- // If this isn't an aggregate type, there is nothing we can do to drill down
- // and find a bitcastable constant.
- if (!SrcTy->isAggregateType())
- return nullptr;
-
- // We're simulating a load through a pointer that was bitcast to point to
- // a different type, so we can try to walk down through the initial
- // elements of an aggregate to see if some part of th e aggregate is
- // castable to implement the "load" semantic model.
- C = C->getAggregateElement(0u);
- } while (C);
-
- return nullptr;
+ return llvm::ConstantFoldLoadThroughBitcast(C, DestTy, DL);
}
} // end anonymous namespace
@@ -611,7 +616,7 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
}
if (CE->getOpcode() == Instruction::BitCast)
- if (Constant *LoadedC = ConstantFoldLoadThroughBitcast(CE, Ty, DL))
+ if (Constant *LoadedC = ConstantFoldLoadThroughBitcastExpr(CE, Ty, DL))
return LoadedC;
// Instead of loading constant c string, use corresponding integer value
@@ -808,26 +813,26 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
// If this is a constant expr gep that is effectively computing an
// "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
- if (!isa<ConstantInt>(Ops[i])) {
-
- // If this is "gep i8* Ptr, (sub 0, V)", fold this as:
- // "inttoptr (sub (ptrtoint Ptr), V)"
- if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) {
- auto *CE = dyn_cast<ConstantExpr>(Ops[1]);
- assert((!CE || CE->getType() == IntPtrTy) &&
- "CastGEPIndices didn't canonicalize index types!");
- if (CE && CE->getOpcode() == Instruction::Sub &&
- CE->getOperand(0)->isNullValue()) {
- Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
- Res = ConstantExpr::getSub(Res, CE->getOperand(1));
- Res = ConstantExpr::getIntToPtr(Res, ResTy);
- if (auto *FoldedRes = ConstantFoldConstant(Res, DL, TLI))
- Res = FoldedRes;
- return Res;
+ if (!isa<ConstantInt>(Ops[i])) {
+
+ // If this is "gep i8* Ptr, (sub 0, V)", fold this as:
+ // "inttoptr (sub (ptrtoint Ptr), V)"
+ if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) {
+ auto *CE = dyn_cast<ConstantExpr>(Ops[1]);
+ assert((!CE || CE->getType() == IntPtrTy) &&
+ "CastGEPIndices didn't canonicalize index types!");
+ if (CE && CE->getOpcode() == Instruction::Sub &&
+ CE->getOperand(0)->isNullValue()) {
+ Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
+ Res = ConstantExpr::getSub(Res, CE->getOperand(1));
+ Res = ConstantExpr::getIntToPtr(Res, ResTy);
+ if (auto *FoldedRes = ConstantFoldConstant(Res, DL, TLI))
+ Res = FoldedRes;
+ return Res;
+ }
}
+ return nullptr;
}
- return nullptr;
- }
unsigned BitWidth = DL.getTypeSizeInBits(IntPtrTy);
APInt Offset =
@@ -1387,6 +1392,8 @@ bool llvm::canConstantFoldCallTo(ImmutableCallSite CS, const Function *F) {
case Intrinsic::fma:
case Intrinsic::fmuladd:
case Intrinsic::copysign:
+ case Intrinsic::launder_invariant_group:
+ case Intrinsic::strip_invariant_group:
case Intrinsic::round:
case Intrinsic::masked_load:
case Intrinsic::sadd_with_overflow:
@@ -1582,16 +1589,37 @@ double getValueAsDouble(ConstantFP *Op) {
Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, Type *Ty,
ArrayRef<Constant *> Operands,
- const TargetLibraryInfo *TLI) {
+ const TargetLibraryInfo *TLI,
+ ImmutableCallSite CS) {
if (Operands.size() == 1) {
if (isa<UndefValue>(Operands[0])) {
// cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN
if (IntrinsicID == Intrinsic::cos)
return Constant::getNullValue(Ty);
if (IntrinsicID == Intrinsic::bswap ||
- IntrinsicID == Intrinsic::bitreverse)
+ IntrinsicID == Intrinsic::bitreverse ||
+ IntrinsicID == Intrinsic::launder_invariant_group ||
+ IntrinsicID == Intrinsic::strip_invariant_group)
return Operands[0];
}
+
+ if (isa<ConstantPointerNull>(Operands[0])) {
+ // launder(null) == null == strip(null) iff in addrspace 0
+ if (IntrinsicID == Intrinsic::launder_invariant_group ||
+ IntrinsicID == Intrinsic::strip_invariant_group) {
+ // If instruction is not yet put in a basic block (e.g. when cloning
+ // a function during inlining), CS caller may not be available.
+ // So check CS's BB first before querying CS.getCaller.
+ const Function *Caller = CS.getParent() ? CS.getCaller() : nullptr;
+ if (Caller &&
+ !NullPointerIsDefined(
+ Caller, Operands[0]->getType()->getPointerAddressSpace())) {
+ return Operands[0];
+ }
+ return nullptr;
+ }
+ }
+
if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) {
if (IntrinsicID == Intrinsic::convert_to_fp16) {
APFloat Val(Op->getValueAPF());
@@ -1988,7 +2016,8 @@ Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, Type *Ty,
Constant *ConstantFoldVectorCall(StringRef Name, unsigned IntrinsicID,
VectorType *VTy, ArrayRef<Constant *> Operands,
const DataLayout &DL,
- const TargetLibraryInfo *TLI) {
+ const TargetLibraryInfo *TLI,
+ ImmutableCallSite CS) {
SmallVector<Constant *, 4> Result(VTy->getNumElements());
SmallVector<Constant *, 4> Lane(Operands.size());
Type *Ty = VTy->getElementType();
@@ -2051,7 +2080,7 @@ Constant *ConstantFoldVectorCall(StringRef Name, unsigned IntrinsicID,
}
// Use the regular scalar folding to simplify this column.
- Constant *Folded = ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI);
+ Constant *Folded = ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, CS);
if (!Folded)
return nullptr;
Result[I] = Folded;
@@ -2076,9 +2105,9 @@ llvm::ConstantFoldCall(ImmutableCallSite CS, Function *F,
if (auto *VTy = dyn_cast<VectorType>(Ty))
return ConstantFoldVectorCall(Name, F->getIntrinsicID(), VTy, Operands,
- F->getParent()->getDataLayout(), TLI);
+ F->getParent()->getDataLayout(), TLI, CS);
- return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI);
+ return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI, CS);
}
bool llvm::isMathLibCallNoop(CallSite CS, const TargetLibraryInfo *TLI) {