aboutsummaryrefslogtreecommitdiff
path: root/lib/IR/ConstantFold.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/IR/ConstantFold.cpp')
-rw-r--r--lib/IR/ConstantFold.cpp134
1 files changed, 105 insertions, 29 deletions
diff --git a/lib/IR/ConstantFold.cpp b/lib/IR/ConstantFold.cpp
index 57de6b042303..835fbb3443b8 100644
--- a/lib/IR/ConstantFold.cpp
+++ b/lib/IR/ConstantFold.cpp
@@ -1,9 +1,8 @@
//===- ConstantFold.cpp - LLVM constant folder ----------------------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -27,6 +26,7 @@
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/Support/ErrorHandling.h"
@@ -268,19 +268,20 @@ static Constant *ExtractConstantBytes(Constant *C, unsigned ByteStart,
ConstantInt *Amt = dyn_cast<ConstantInt>(CE->getOperand(1));
if (!Amt)
return nullptr;
- unsigned ShAmt = Amt->getZExtValue();
+ APInt ShAmt = Amt->getValue();
// Cannot analyze non-byte shifts.
if ((ShAmt & 7) != 0)
return nullptr;
- ShAmt >>= 3;
+ ShAmt.lshrInPlace(3);
// If the extract is known to be all zeros, return zero.
- if (ByteStart >= CSize-ShAmt)
- return Constant::getNullValue(IntegerType::get(CE->getContext(),
- ByteSize*8));
+ if (ShAmt.uge(CSize - ByteStart))
+ return Constant::getNullValue(
+ IntegerType::get(CE->getContext(), ByteSize * 8));
// If the extract is known to be fully in the input, extract it.
- if (ByteStart+ByteSize+ShAmt <= CSize)
- return ExtractConstantBytes(CE->getOperand(0), ByteStart+ShAmt, ByteSize);
+ if (ShAmt.ule(CSize - (ByteStart + ByteSize)))
+ return ExtractConstantBytes(CE->getOperand(0),
+ ByteStart + ShAmt.getZExtValue(), ByteSize);
// TODO: Handle the 'partially zero' case.
return nullptr;
@@ -290,19 +291,20 @@ static Constant *ExtractConstantBytes(Constant *C, unsigned ByteStart,
ConstantInt *Amt = dyn_cast<ConstantInt>(CE->getOperand(1));
if (!Amt)
return nullptr;
- unsigned ShAmt = Amt->getZExtValue();
+ APInt ShAmt = Amt->getValue();
// Cannot analyze non-byte shifts.
if ((ShAmt & 7) != 0)
return nullptr;
- ShAmt >>= 3;
+ ShAmt.lshrInPlace(3);
// If the extract is known to be all zeros, return zero.
- if (ByteStart+ByteSize <= ShAmt)
- return Constant::getNullValue(IntegerType::get(CE->getContext(),
- ByteSize*8));
+ if (ShAmt.uge(ByteStart + ByteSize))
+ return Constant::getNullValue(
+ IntegerType::get(CE->getContext(), ByteSize * 8));
// If the extract is known to be fully in the input, extract it.
- if (ByteStart >= ShAmt)
- return ExtractConstantBytes(CE->getOperand(0), ByteStart-ShAmt, ByteSize);
+ if (ShAmt.ule(ByteStart))
+ return ExtractConstantBytes(CE->getOperand(0),
+ ByteStart - ShAmt.getZExtValue(), ByteSize);
// TODO: Handle the 'partially zero' case.
return nullptr;
@@ -916,6 +918,52 @@ Constant *llvm::ConstantFoldInsertValueInstruction(Constant *Agg,
return ConstantVector::get(Result);
}
+Constant *llvm::ConstantFoldUnaryInstruction(unsigned Opcode, Constant *C) {
+ assert(Instruction::isUnaryOp(Opcode) && "Non-unary instruction detected");
+
+ // Handle scalar UndefValue. Vectors are always evaluated per element.
+ bool HasScalarUndef = !C->getType()->isVectorTy() && isa<UndefValue>(C);
+
+ if (HasScalarUndef) {
+ switch (static_cast<Instruction::UnaryOps>(Opcode)) {
+ case Instruction::FNeg:
+ return C; // -undef -> undef
+ case Instruction::UnaryOpsEnd:
+ llvm_unreachable("Invalid UnaryOp");
+ }
+ }
+
+ // Constant should not be UndefValue, unless these are vector constants.
+ assert(!HasScalarUndef && "Unexpected UndefValue");
+ // We only have FP UnaryOps right now.
+ assert(!isa<ConstantInt>(C) && "Unexpected Integer UnaryOp");
+
+ if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
+ const APFloat &CV = CFP->getValueAPF();
+ switch (Opcode) {
+ default:
+ break;
+ case Instruction::FNeg:
+ return ConstantFP::get(C->getContext(), neg(CV));
+ }
+ } else if (VectorType *VTy = dyn_cast<VectorType>(C->getType())) {
+ // Fold each element and create a vector constant from those constants.
+ SmallVector<Constant*, 16> Result;
+ Type *Ty = IntegerType::get(VTy->getContext(), 32);
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
+ Constant *ExtractIdx = ConstantInt::get(Ty, i);
+ Constant *Elt = ConstantExpr::getExtractElement(C, ExtractIdx);
+
+ Result.push_back(ConstantExpr::get(Opcode, Elt));
+ }
+
+ return ConstantVector::get(Result);
+ }
+
+ // We don't know how to fold this.
+ return nullptr;
+}
+
Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode, Constant *C1,
Constant *C2) {
assert(Instruction::isBinaryOp(Opcode) && "Non-binary instruction detected");
@@ -1077,10 +1125,29 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode, Constant *C1,
isa<GlobalValue>(CE1->getOperand(0))) {
GlobalValue *GV = cast<GlobalValue>(CE1->getOperand(0));
- // Functions are at least 4-byte aligned.
- unsigned GVAlign = GV->getAlignment();
- if (isa<Function>(GV))
- GVAlign = std::max(GVAlign, 4U);
+ unsigned GVAlign;
+
+ if (Module *TheModule = GV->getParent()) {
+ GVAlign = GV->getPointerAlignment(TheModule->getDataLayout());
+
+ // If the function alignment is not specified then assume that it
+ // is 4.
+ // This is dangerous; on x86, the alignment of the pointer
+ // corresponds to the alignment of the function, but might be less
+ // than 4 if it isn't explicitly specified.
+ // However, a fix for this behaviour was reverted because it
+ // increased code size (see https://reviews.llvm.org/D55115)
+ // FIXME: This code should be deleted once existing targets have
+ // appropriate defaults
+ if (GVAlign == 0U && isa<Function>(GV))
+ GVAlign = 4U;
+ } else if (isa<Function>(GV)) {
+ // Without a datalayout we have to assume the worst case: that the
+ // function pointer isn't aligned at all.
+ GVAlign = 0U;
+ } else {
+ GVAlign = GV->getAlignment();
+ }
if (GVAlign > 1) {
unsigned DstWidth = CI2->getType()->getBitWidth();
@@ -1360,8 +1427,9 @@ static FCmpInst::Predicate evaluateFCmpRelation(Constant *V1, Constant *V2) {
assert(V1->getType() == V2->getType() &&
"Cannot compare values of different types!");
- // Handle degenerate case quickly
- if (V1 == V2) return FCmpInst::FCMP_OEQ;
+ // We do not know if a constant expression will evaluate to a number or NaN.
+ // Therefore, we can only say that the relation is unordered or equal.
+ if (V1 == V2) return FCmpInst::FCMP_UEQ;
if (!isa<ConstantExpr>(V1)) {
if (!isa<ConstantExpr>(V2)) {
@@ -1552,7 +1620,7 @@ static ICmpInst::Predicate evaluateICmpRelation(Constant *V1, Constant *V2,
case Instruction::ZExt:
case Instruction::SExt:
// We can't evaluate floating point casts or truncations.
- if (CE1Op0->getType()->isFloatingPointTy())
+ if (CE1Op0->getType()->isFPOrFPVectorTy())
break;
// If the cast is not actually changing bits, and the second operand is a
@@ -1856,7 +1924,6 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
default: llvm_unreachable("Unknown relation!");
case FCmpInst::FCMP_UNO:
case FCmpInst::FCMP_ORD:
- case FCmpInst::FCMP_UEQ:
case FCmpInst::FCMP_UNE:
case FCmpInst::FCMP_ULT:
case FCmpInst::FCMP_UGT:
@@ -1902,6 +1969,13 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
else if (pred == FCmpInst::FCMP_ONE || pred == FCmpInst::FCMP_UNE)
Result = 1;
break;
+ case FCmpInst::FCMP_UEQ: // We know that C1 == C2 || isUnordered(C1, C2).
+ // We can only partially decide this relation.
+ if (pred == FCmpInst::FCMP_ONE)
+ Result = 0;
+ else if (pred == FCmpInst::FCMP_UEQ)
+ Result = 1;
+ break;
}
// If we evaluated the result, return it now.
@@ -1981,11 +2055,13 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
// If the right hand side is a bitcast, try using its inverse to simplify
// it by moving it to the left hand side. We can't do this if it would turn
- // a vector compare into a scalar compare or visa versa.
+ // a vector compare into a scalar compare or visa versa, or if it would turn
+ // the operands into FP values.
if (ConstantExpr *CE2 = dyn_cast<ConstantExpr>(C2)) {
Constant *CE2Op0 = CE2->getOperand(0);
if (CE2->getOpcode() == Instruction::BitCast &&
- CE2->getType()->isVectorTy() == CE2Op0->getType()->isVectorTy()) {
+ CE2->getType()->isVectorTy() == CE2Op0->getType()->isVectorTy() &&
+ !CE2Op0->getType()->isFPOrFPVectorTy()) {
Constant *Inverse = ConstantExpr::getBitCast(C1, CE2Op0->getType());
return ConstantExpr::getICmp(pred, Inverse, CE2Op0);
}
@@ -2072,7 +2148,7 @@ Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C,
if (Idxs.empty()) return C;
Type *GEPTy = GetElementPtrInst::getGEPReturnType(
- C, makeArrayRef((Value *const *)Idxs.data(), Idxs.size()));
+ PointeeTy, C, makeArrayRef((Value *const *)Idxs.data(), Idxs.size()));
if (isa<UndefValue>(C))
return UndefValue::get(GEPTy);