aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/CodeGen/CodeGenPrepare.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2022-03-20 11:40:34 +0000
committerDimitry Andric <dim@FreeBSD.org>2022-05-14 11:43:05 +0000
commit349cc55c9796c4596a5b9904cd3281af295f878f (patch)
tree410c5a785075730a35f1272ca6a7adf72222ad03 /contrib/llvm-project/llvm/lib/CodeGen/CodeGenPrepare.cpp
parentcb2ae6163174b90e999326ecec3699ee093a5d43 (diff)
parentc0981da47d5696fe36474fcf86b4ce03ae3ff818 (diff)
Diffstat (limited to 'contrib/llvm-project/llvm/lib/CodeGen/CodeGenPrepare.cpp')
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/CodeGenPrepare.cpp92
1 files changed, 42 insertions, 50 deletions
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/CodeGenPrepare.cpp b/contrib/llvm-project/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 77ce3d2fb563..ac4180c4c3ab 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -530,10 +530,9 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
while (MadeChange) {
MadeChange = false;
DT.reset();
- for (Function::iterator I = F.begin(); I != F.end(); ) {
- BasicBlock *BB = &*I++;
+ for (BasicBlock &BB : llvm::make_early_inc_range(F)) {
bool ModifiedDTOnIteration = false;
- MadeChange |= optimizeBlock(*BB, ModifiedDTOnIteration);
+ MadeChange |= optimizeBlock(BB, ModifiedDTOnIteration);
// Restart BB iteration if the dominator tree of the Function was changed
if (ModifiedDTOnIteration)
@@ -660,12 +659,8 @@ void CodeGenPrepare::removeAllAssertingVHReferences(Value *V) {
return;
auto &GEPVector = VecI->second;
- const auto &I =
- llvm::find_if(GEPVector, [=](auto &Elt) { return Elt.first == GEP; });
- if (I == GEPVector.end())
- return;
+ llvm::erase_if(GEPVector, [=](auto &Elt) { return Elt.first == GEP; });
- GEPVector.erase(I);
if (GEPVector.empty())
LargeOffsetGEPMap.erase(VecI);
}
@@ -2037,7 +2032,7 @@ static bool despeculateCountZeros(IntrinsicInst *CountZeros,
// Only handle legal scalar cases. Anything else requires too much work.
Type *Ty = CountZeros->getType();
- unsigned SizeInBits = Ty->getPrimitiveSizeInBits();
+ unsigned SizeInBits = Ty->getScalarSizeInBits();
if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits())
return false;
@@ -2108,7 +2103,7 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) {
// idea
unsigned MinSize, PrefAlign;
if (TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
- for (auto &Arg : CI->arg_operands()) {
+ for (auto &Arg : CI->args()) {
// We want to align both objects whose address is used directly and
// objects whose address is used in casts and GEPs, though it only makes
// sense for GEPs if the offset is a multiple of the desired alignment and
@@ -2159,7 +2154,7 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) {
// into their uses. TODO: generalize this to work over profiling data
if (CI->hasFnAttr(Attribute::Cold) &&
!OptSize && !llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
- for (auto &Arg : CI->arg_operands()) {
+ for (auto &Arg : CI->args()) {
if (!Arg->getType()->isPointerTy())
continue;
unsigned AS = Arg->getType()->getPointerAddressSpace();
@@ -3718,7 +3713,8 @@ private:
// Traverse all Phis until we found equivalent or fail to do that.
bool IsMatched = false;
for (auto &P : PHI->getParent()->phis()) {
- if (&P == PHI)
+ // Skip new Phi nodes.
+ if (PhiNodesToMatch.count(&P))
continue;
if ((IsMatched = MatchPhiNode(PHI, &P, Matched, PhiNodesToMatch)))
break;
@@ -4187,7 +4183,7 @@ bool TypePromotionHelper::canGetThrough(const Instruction *Inst,
if (Inst->getOpcode() == Instruction::Xor) {
const ConstantInt *Cst = dyn_cast<ConstantInt>(Inst->getOperand(1));
// Make sure it is not a NOT.
- if (Cst && !Cst->getValue().isAllOnesValue())
+ if (Cst && !Cst->getValue().isAllOnes())
return true;
}
@@ -4858,10 +4854,9 @@ static constexpr int MaxMemoryUsesToScan = 20;
/// Recursively walk all the uses of I until we find a memory use.
/// If we find an obviously non-foldable instruction, return true.
-/// Add the ultimately found memory instructions to MemoryUses.
+/// Add accessed addresses and types to MemoryUses.
static bool FindAllMemoryUses(
- Instruction *I,
- SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses,
+ Instruction *I, SmallVectorImpl<std::pair<Value *, Type *>> &MemoryUses,
SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI,
const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI,
BlockFrequencyInfo *BFI, int SeenInsts = 0) {
@@ -4882,31 +4877,28 @@ static bool FindAllMemoryUses(
Instruction *UserI = cast<Instruction>(U.getUser());
if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) {
- MemoryUses.push_back(std::make_pair(LI, U.getOperandNo()));
+ MemoryUses.push_back({U.get(), LI->getType()});
continue;
}
if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
- unsigned opNo = U.getOperandNo();
- if (opNo != StoreInst::getPointerOperandIndex())
+ if (U.getOperandNo() != StoreInst::getPointerOperandIndex())
return true; // Storing addr, not into addr.
- MemoryUses.push_back(std::make_pair(SI, opNo));
+ MemoryUses.push_back({U.get(), SI->getValueOperand()->getType()});
continue;
}
if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) {
- unsigned opNo = U.getOperandNo();
- if (opNo != AtomicRMWInst::getPointerOperandIndex())
+ if (U.getOperandNo() != AtomicRMWInst::getPointerOperandIndex())
return true; // Storing addr, not into addr.
- MemoryUses.push_back(std::make_pair(RMW, opNo));
+ MemoryUses.push_back({U.get(), RMW->getValOperand()->getType()});
continue;
}
if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) {
- unsigned opNo = U.getOperandNo();
- if (opNo != AtomicCmpXchgInst::getPointerOperandIndex())
+ if (U.getOperandNo() != AtomicCmpXchgInst::getPointerOperandIndex())
return true; // Storing addr, not into addr.
- MemoryUses.push_back(std::make_pair(CmpX, opNo));
+ MemoryUses.push_back({U.get(), CmpX->getCompareOperand()->getType()});
continue;
}
@@ -5016,7 +5008,7 @@ isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
// we can remove the addressing mode and effectively trade one live register
// for another (at worst.) In this context, folding an addressing mode into
// the use is just a particularly nice way of sinking it.
- SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses;
+ SmallVector<std::pair<Value *, Type *>, 16> MemoryUses;
SmallPtrSet<Instruction*, 16> ConsideredInsts;
if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI, OptSize,
PSI, BFI))
@@ -5032,18 +5024,10 @@ isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
// growth since most architectures have some reasonable small and fast way to
// compute an effective address. (i.e LEA on x86)
SmallVector<Instruction*, 32> MatchedAddrModeInsts;
- for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) {
- Instruction *User = MemoryUses[i].first;
- unsigned OpNo = MemoryUses[i].second;
-
- // Get the access type of this use. If the use isn't a pointer, we don't
- // know what it accesses.
- Value *Address = User->getOperand(OpNo);
- PointerType *AddrTy = dyn_cast<PointerType>(Address->getType());
- if (!AddrTy)
- return false;
- Type *AddressAccessTy = AddrTy->getElementType();
- unsigned AS = AddrTy->getAddressSpace();
+ for (const std::pair<Value *, Type *> &Pair : MemoryUses) {
+ Value *Address = Pair.first;
+ Type *AddressAccessTy = Pair.second;
+ unsigned AS = Address->getType()->getPointerAddressSpace();
// Do a match against the root of this address, ignoring profitability. This
// will tell us if the addressing mode for the memory operation will
@@ -5124,8 +5108,7 @@ bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
TypePromotionTransaction::ConstRestorationPt LastKnownGood =
TPT.getRestorationPoint();
while (!worklist.empty()) {
- Value *V = worklist.back();
- worklist.pop_back();
+ Value *V = worklist.pop_back_val();
// We allow traversing cyclic Phi nodes.
// In case of success after this loop we ensure that traversing through
@@ -6477,8 +6460,7 @@ bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) {
APInt WidestAndBits(BitWidth, 0);
while (!WorkList.empty()) {
- Instruction *I = WorkList.back();
- WorkList.pop_back();
+ Instruction *I = WorkList.pop_back_val();
// Break use-def graph loops.
if (!Visited.insert(I).second)
@@ -6950,16 +6932,26 @@ bool CodeGenPrepare::tryToSinkFreeOperands(Instruction *I) {
BasicBlock *TargetBB = I->getParent();
bool Changed = false;
SmallVector<Use *, 4> ToReplace;
+ Instruction *InsertPoint = I;
+ DenseMap<const Instruction *, unsigned long> InstOrdering;
+ unsigned long InstNumber = 0;
+ for (const auto &I : *TargetBB)
+ InstOrdering[&I] = InstNumber++;
+
for (Use *U : reverse(OpsToSink)) {
auto *UI = cast<Instruction>(U->get());
- if (UI->getParent() == TargetBB || isa<PHINode>(UI))
+ if (isa<PHINode>(UI))
continue;
+ if (UI->getParent() == TargetBB) {
+ if (InstOrdering[UI] < InstOrdering[InsertPoint])
+ InsertPoint = UI;
+ continue;
+ }
ToReplace.push_back(U);
}
SetVector<Instruction *> MaybeDead;
DenseMap<Instruction *, Instruction *> NewInstructions;
- Instruction *InsertPoint = I;
for (Use *U : ToReplace) {
auto *UI = cast<Instruction>(U->get());
Instruction *NI = UI->clone();
@@ -7863,8 +7855,9 @@ bool CodeGenPrepare::optimizeInst(Instruction *I, bool &ModifiedDT) {
BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I);
- if (BinOp && (BinOp->getOpcode() == Instruction::And) && EnableAndCmpSinking)
- return sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts);
+ if (BinOp && BinOp->getOpcode() == Instruction::And && EnableAndCmpSinking &&
+ sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts))
+ return true;
// TODO: Move this into the switch on opcode - it handles shifts already.
if (BinOp && (BinOp->getOpcode() == Instruction::AShr ||
@@ -8030,9 +8023,8 @@ bool CodeGenPrepare::placeDbgValues(Function &F) {
DominatorTree DT(F);
for (BasicBlock &BB : F) {
- for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) {
- Instruction *Insn = &*BI++;
- DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn);
+ for (Instruction &Insn : llvm::make_early_inc_range(BB)) {
+ DbgValueInst *DVI = dyn_cast<DbgValueInst>(&Insn);
if (!DVI)
continue;