aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Transforms/Utils/Local.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2023-07-26 19:03:47 +0000
committerDimitry Andric <dim@FreeBSD.org>2023-07-26 19:04:23 +0000
commit7fa27ce4a07f19b07799a767fc29416f3b625afb (patch)
tree27825c83636c4de341eb09a74f49f5d38a15d165 /llvm/lib/Transforms/Utils/Local.cpp
parente3b557809604d036af6e00c60f012c2025b59a5e (diff)
Diffstat (limited to 'llvm/lib/Transforms/Utils/Local.cpp')
-rw-r--r--llvm/lib/Transforms/Utils/Local.cpp293
1 files changed, 186 insertions, 107 deletions
diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp
index 31cdd2ee56b9..f153ace5d3fc 100644
--- a/llvm/lib/Transforms/Utils/Local.cpp
+++ b/llvm/lib/Transforms/Utils/Local.cpp
@@ -25,7 +25,6 @@
#include "llvm/Analysis/AssumeBundleQueries.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/DomTreeUpdater.h"
-#include "llvm/Analysis/EHPersonalities.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/MemorySSAUpdater.h"
@@ -47,6 +46,7 @@
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
+#include "llvm/IR/EHPersonalities.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalObject.h"
@@ -201,16 +201,16 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
bool Changed = false;
// Figure out which case it goes to.
- for (auto i = SI->case_begin(), e = SI->case_end(); i != e;) {
+ for (auto It = SI->case_begin(), End = SI->case_end(); It != End;) {
// Found case matching a constant operand?
- if (i->getCaseValue() == CI) {
- TheOnlyDest = i->getCaseSuccessor();
+ if (It->getCaseValue() == CI) {
+ TheOnlyDest = It->getCaseSuccessor();
break;
}
// Check to see if this branch is going to the same place as the default
// dest. If so, eliminate it as an explicit compare.
- if (i->getCaseSuccessor() == DefaultDest) {
+ if (It->getCaseSuccessor() == DefaultDest) {
MDNode *MD = getValidBranchWeightMDNode(*SI);
unsigned NCases = SI->getNumCases();
// Fold the case metadata into the default if there will be any branches
@@ -221,11 +221,11 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
extractBranchWeights(MD, Weights);
// Merge weight of this case to the default weight.
- unsigned idx = i->getCaseIndex();
+ unsigned Idx = It->getCaseIndex();
// TODO: Add overflow check.
- Weights[0] += Weights[idx+1];
+ Weights[0] += Weights[Idx + 1];
// Remove weight for this case.
- std::swap(Weights[idx+1], Weights.back());
+ std::swap(Weights[Idx + 1], Weights.back());
Weights.pop_back();
SI->setMetadata(LLVMContext::MD_prof,
MDBuilder(BB->getContext()).
@@ -234,14 +234,14 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
// Remove this entry.
BasicBlock *ParentBB = SI->getParent();
DefaultDest->removePredecessor(ParentBB);
- i = SI->removeCase(i);
- e = SI->case_end();
+ It = SI->removeCase(It);
+ End = SI->case_end();
// Removing this case may have made the condition constant. In that
// case, update CI and restart iteration through the cases.
if (auto *NewCI = dyn_cast<ConstantInt>(SI->getCondition())) {
CI = NewCI;
- i = SI->case_begin();
+ It = SI->case_begin();
}
Changed = true;
@@ -251,11 +251,11 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
// Otherwise, check to see if the switch only branches to one destination.
// We do this by reseting "TheOnlyDest" to null when we find two non-equal
// destinations.
- if (i->getCaseSuccessor() != TheOnlyDest)
+ if (It->getCaseSuccessor() != TheOnlyDest)
TheOnlyDest = nullptr;
// Increment this iterator as we haven't removed the case.
- ++i;
+ ++It;
}
if (CI && !TheOnlyDest) {
@@ -424,18 +424,10 @@ bool llvm::wouldInstructionBeTriviallyDead(Instruction *I,
if (I->isEHPad())
return false;
- // We don't want debug info removed by anything this general, unless
- // debug info is empty.
- if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) {
- if (DDI->getAddress())
- return false;
- return true;
- }
- if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(I)) {
- if (DVI->hasArgList() || DVI->getValue(0))
- return false;
- return true;
- }
+ // We don't want debug info removed by anything this general.
+ if (isa<DbgVariableIntrinsic>(I))
+ return false;
+
if (DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(I)) {
if (DLI->getLabel())
return false;
@@ -555,7 +547,7 @@ bool llvm::RecursivelyDeleteTriviallyDeadInstructionsPermissive(
std::function<void(Value *)> AboutToDeleteCallback) {
unsigned S = 0, E = DeadInsts.size(), Alive = 0;
for (; S != E; ++S) {
- auto *I = dyn_cast<Instruction>(DeadInsts[S]);
+ auto *I = dyn_cast_or_null<Instruction>(DeadInsts[S]);
if (!I || !isInstructionTriviallyDead(I)) {
DeadInsts[S] = nullptr;
++Alive;
@@ -1231,12 +1223,10 @@ bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB,
// If the unconditional branch we replaced contains llvm.loop metadata, we
// add the metadata to the branch instructions in the predecessors.
- unsigned LoopMDKind = BB->getContext().getMDKindID("llvm.loop");
- Instruction *TI = BB->getTerminator();
- if (TI)
- if (MDNode *LoopMD = TI->getMetadata(LoopMDKind))
+ if (Instruction *TI = BB->getTerminator())
+ if (MDNode *LoopMD = TI->getMetadata(LLVMContext::MD_loop))
for (BasicBlock *Pred : predecessors(BB))
- Pred->getTerminator()->setMetadata(LoopMDKind, LoopMD);
+ Pred->getTerminator()->setMetadata(LLVMContext::MD_loop, LoopMD);
// Everything that jumped to BB now goes to Succ.
BB->replaceAllUsesWith(Succ);
@@ -1423,6 +1413,12 @@ static Align tryEnforceAlignment(Value *V, Align PrefAlign,
if (!GO->canIncreaseAlignment())
return CurrentAlign;
+ if (GO->isThreadLocal()) {
+ unsigned MaxTLSAlign = GO->getParent()->getMaxTLSAlignment() / CHAR_BIT;
+ if (MaxTLSAlign && PrefAlign > Align(MaxTLSAlign))
+ PrefAlign = Align(MaxTLSAlign);
+ }
+
GO->setAlignment(PrefAlign);
return PrefAlign;
}
@@ -1480,19 +1476,16 @@ static bool PhiHasDebugValue(DILocalVariable *DIVar,
/// (or fragment of the variable) described by \p DII.
///
/// This is primarily intended as a helper for the different
-/// ConvertDebugDeclareToDebugValue functions. The dbg.declare/dbg.addr that is
-/// converted describes an alloca'd variable, so we need to use the
-/// alloc size of the value when doing the comparison. E.g. an i1 value will be
-/// identified as covering an n-bit fragment, if the store size of i1 is at
-/// least n bits.
+/// ConvertDebugDeclareToDebugValue functions. The dbg.declare that is converted
+/// describes an alloca'd variable, so we need to use the alloc size of the
+/// value when doing the comparison. E.g. an i1 value will be identified as
+/// covering an n-bit fragment, if the store size of i1 is at least n bits.
static bool valueCoversEntireFragment(Type *ValTy, DbgVariableIntrinsic *DII) {
const DataLayout &DL = DII->getModule()->getDataLayout();
TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy);
- if (std::optional<uint64_t> FragmentSize = DII->getFragmentSizeInBits()) {
- assert(!ValueSize.isScalable() &&
- "Fragments don't work on scalable types.");
- return ValueSize.getFixedValue() >= *FragmentSize;
- }
+ if (std::optional<uint64_t> FragmentSize = DII->getFragmentSizeInBits())
+ return TypeSize::isKnownGE(ValueSize, TypeSize::getFixed(*FragmentSize));
+
// We can't always calculate the size of the DI variable (e.g. if it is a
// VLA). Try to use the size of the alloca that the dbg intrinsic describes
// intead.
@@ -1513,7 +1506,7 @@ static bool valueCoversEntireFragment(Type *ValTy, DbgVariableIntrinsic *DII) {
}
/// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
-/// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
+/// that has an associated llvm.dbg.declare intrinsic.
void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
StoreInst *SI, DIBuilder &Builder) {
assert(DII->isAddressOfVariable() || isa<DbgAssignIntrinsic>(DII));
@@ -1524,24 +1517,39 @@ void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
DebugLoc NewLoc = getDebugValueLoc(DII);
- if (!valueCoversEntireFragment(DV->getType(), DII)) {
- // FIXME: If storing to a part of the variable described by the dbg.declare,
- // then we want to insert a dbg.value for the corresponding fragment.
- LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
- << *DII << '\n');
- // For now, when there is a store to parts of the variable (but we do not
- // know which part) we insert an dbg.value intrinsic to indicate that we
- // know nothing about the variable's content.
- DV = UndefValue::get(DV->getType());
+ // If the alloca describes the variable itself, i.e. the expression in the
+ // dbg.declare doesn't start with a dereference, we can perform the
+ // conversion if the value covers the entire fragment of DII.
+ // If the alloca describes the *address* of DIVar, i.e. DIExpr is
+ // *just* a DW_OP_deref, we use DV as is for the dbg.value.
+ // We conservatively ignore other dereferences, because the following two are
+ // not equivalent:
+ // dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2))
+ // dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2))
+ // The former is adding 2 to the address of the variable, whereas the latter
+ // is adding 2 to the value of the variable. As such, we insist on just a
+ // deref expression.
+ bool CanConvert =
+ DIExpr->isDeref() || (!DIExpr->startsWithDeref() &&
+ valueCoversEntireFragment(DV->getType(), DII));
+ if (CanConvert) {
Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI);
return;
}
+ // FIXME: If storing to a part of the variable described by the dbg.declare,
+ // then we want to insert a dbg.value for the corresponding fragment.
+ LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DII
+ << '\n');
+ // For now, when there is a store to parts of the variable (but we do not
+ // know which part) we insert an dbg.value intrinsic to indicate that we
+ // know nothing about the variable's content.
+ DV = UndefValue::get(DV->getType());
Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI);
}
/// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
-/// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
+/// that has an associated llvm.dbg.declare intrinsic.
void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
LoadInst *LI, DIBuilder &Builder) {
auto *DIVar = DII->getVariable();
@@ -1569,7 +1577,7 @@ void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
}
/// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
-/// llvm.dbg.declare or llvm.dbg.addr intrinsic.
+/// llvm.dbg.declare intrinsic.
void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
PHINode *APN, DIBuilder &Builder) {
auto *DIVar = DII->getVariable();
@@ -1752,8 +1760,8 @@ void llvm::insertDebugValuesForPHIs(BasicBlock *BB,
bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
DIBuilder &Builder, uint8_t DIExprFlags,
int Offset) {
- auto DbgAddrs = FindDbgAddrUses(Address);
- for (DbgVariableIntrinsic *DII : DbgAddrs) {
+ auto DbgDeclares = FindDbgDeclareUses(Address);
+ for (DbgVariableIntrinsic *DII : DbgDeclares) {
const DebugLoc &Loc = DII->getDebugLoc();
auto *DIVar = DII->getVariable();
auto *DIExpr = DII->getExpression();
@@ -1764,7 +1772,7 @@ bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
Builder.insertDeclare(NewAddress, DIVar, DIExpr, Loc, DII);
DII->eraseFromParent();
}
- return !DbgAddrs.empty();
+ return !DbgDeclares.empty();
}
static void replaceOneDbgValueForAlloca(DbgValueInst *DVI, Value *NewAddress,
@@ -1860,9 +1868,8 @@ void llvm::salvageDebugInfoForDbgValues(
continue;
}
- // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they
- // are implicitly pointing out the value as a DWARF memory location
- // description.
+ // Do not add DW_OP_stack_value for DbgDeclare, because they are implicitly
+ // pointing out the value as a DWARF memory location description.
bool StackValue = isa<DbgValueInst>(DII);
auto DIILocation = DII->location_ops();
assert(
@@ -1896,17 +1903,14 @@ void llvm::salvageDebugInfoForDbgValues(
bool IsValidSalvageExpr = SalvagedExpr->getNumElements() <= MaxExpressionSize;
if (AdditionalValues.empty() && IsValidSalvageExpr) {
DII->setExpression(SalvagedExpr);
- } else if (isa<DbgValueInst>(DII) && !isa<DbgAssignIntrinsic>(DII) &&
- IsValidSalvageExpr &&
+ } else if (isa<DbgValueInst>(DII) && IsValidSalvageExpr &&
DII->getNumVariableLocationOps() + AdditionalValues.size() <=
MaxDebugArgs) {
DII->addVariableLocationOps(AdditionalValues, SalvagedExpr);
} else {
- // Do not salvage using DIArgList for dbg.addr/dbg.declare, as it is
- // not currently supported in those instructions. Do not salvage using
- // DIArgList for dbg.assign yet. FIXME: support this.
- // Also do not salvage if the resulting DIArgList would contain an
- // unreasonably large number of values.
+ // Do not salvage using DIArgList for dbg.declare, as it is not currently
+ // supported in those instructions. Also do not salvage if the resulting
+ // DIArgList would contain an unreasonably large number of values.
DII->setKillLocation();
}
LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n');
@@ -1934,7 +1938,7 @@ Value *getSalvageOpsForGEP(GetElementPtrInst *GEP, const DataLayout &DL,
Opcodes.insert(Opcodes.begin(), {dwarf::DW_OP_LLVM_arg, 0});
CurrentLocOps = 1;
}
- for (auto Offset : VariableOffsets) {
+ for (const auto &Offset : VariableOffsets) {
AdditionalValues.push_back(Offset.first);
assert(Offset.second.isStrictlyPositive() &&
"Expected strictly positive multiplier for offset.");
@@ -1976,6 +1980,18 @@ uint64_t getDwarfOpForBinOp(Instruction::BinaryOps Opcode) {
}
}
+static void handleSSAValueOperands(uint64_t CurrentLocOps,
+ SmallVectorImpl<uint64_t> &Opcodes,
+ SmallVectorImpl<Value *> &AdditionalValues,
+ Instruction *I) {
+ if (!CurrentLocOps) {
+ Opcodes.append({dwarf::DW_OP_LLVM_arg, 0});
+ CurrentLocOps = 1;
+ }
+ Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps});
+ AdditionalValues.push_back(I->getOperand(1));
+}
+
Value *getSalvageOpsForBinOp(BinaryOperator *BI, uint64_t CurrentLocOps,
SmallVectorImpl<uint64_t> &Opcodes,
SmallVectorImpl<Value *> &AdditionalValues) {
@@ -1998,12 +2014,7 @@ Value *getSalvageOpsForBinOp(BinaryOperator *BI, uint64_t CurrentLocOps,
}
Opcodes.append({dwarf::DW_OP_constu, Val});
} else {
- if (!CurrentLocOps) {
- Opcodes.append({dwarf::DW_OP_LLVM_arg, 0});
- CurrentLocOps = 1;
- }
- Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps});
- AdditionalValues.push_back(BI->getOperand(1));
+ handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, BI);
}
// Add salvaged binary operator to expression stack, if it has a valid
@@ -2015,6 +2026,60 @@ Value *getSalvageOpsForBinOp(BinaryOperator *BI, uint64_t CurrentLocOps,
return BI->getOperand(0);
}
+uint64_t getDwarfOpForIcmpPred(CmpInst::Predicate Pred) {
+ // The signedness of the operation is implicit in the typed stack, signed and
+ // unsigned instructions map to the same DWARF opcode.
+ switch (Pred) {
+ case CmpInst::ICMP_EQ:
+ return dwarf::DW_OP_eq;
+ case CmpInst::ICMP_NE:
+ return dwarf::DW_OP_ne;
+ case CmpInst::ICMP_UGT:
+ case CmpInst::ICMP_SGT:
+ return dwarf::DW_OP_gt;
+ case CmpInst::ICMP_UGE:
+ case CmpInst::ICMP_SGE:
+ return dwarf::DW_OP_ge;
+ case CmpInst::ICMP_ULT:
+ case CmpInst::ICMP_SLT:
+ return dwarf::DW_OP_lt;
+ case CmpInst::ICMP_ULE:
+ case CmpInst::ICMP_SLE:
+ return dwarf::DW_OP_le;
+ default:
+ return 0;
+ }
+}
+
+Value *getSalvageOpsForIcmpOp(ICmpInst *Icmp, uint64_t CurrentLocOps,
+ SmallVectorImpl<uint64_t> &Opcodes,
+ SmallVectorImpl<Value *> &AdditionalValues) {
+ // Handle icmp operations with constant integer operands as a special case.
+ auto *ConstInt = dyn_cast<ConstantInt>(Icmp->getOperand(1));
+ // Values wider than 64 bits cannot be represented within a DIExpression.
+ if (ConstInt && ConstInt->getBitWidth() > 64)
+ return nullptr;
+ // Push any Constant Int operand onto the expression stack.
+ if (ConstInt) {
+ if (Icmp->isSigned())
+ Opcodes.push_back(dwarf::DW_OP_consts);
+ else
+ Opcodes.push_back(dwarf::DW_OP_constu);
+ uint64_t Val = ConstInt->getSExtValue();
+ Opcodes.push_back(Val);
+ } else {
+ handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, Icmp);
+ }
+
+ // Add salvaged binary operator to expression stack, if it has a valid
+ // representation in a DIExpression.
+ uint64_t DwarfIcmpOp = getDwarfOpForIcmpPred(Icmp->getPredicate());
+ if (!DwarfIcmpOp)
+ return nullptr;
+ Opcodes.push_back(DwarfIcmpOp);
+ return Icmp->getOperand(0);
+}
+
Value *llvm::salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps,
SmallVectorImpl<uint64_t> &Ops,
SmallVectorImpl<Value *> &AdditionalValues) {
@@ -2054,6 +2119,8 @@ Value *llvm::salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps,
return getSalvageOpsForGEP(GEP, DL, CurrentLocOps, Ops, AdditionalValues);
if (auto *BI = dyn_cast<BinaryOperator>(&I))
return getSalvageOpsForBinOp(BI, CurrentLocOps, Ops, AdditionalValues);
+ if (auto *IC = dyn_cast<ICmpInst>(&I))
+ return getSalvageOpsForIcmpOp(IC, CurrentLocOps, Ops, AdditionalValues);
// *Not* to do: we should not attempt to salvage load instructions,
// because the validity and lifetime of a dbg.value containing
@@ -2661,43 +2728,52 @@ void llvm::combineMetadata(Instruction *K, const Instruction *J,
intersectAccessGroups(K, J));
break;
case LLVMContext::MD_range:
-
- // If K does move, use most generic range. Otherwise keep the range of
- // K.
- if (DoesKMove)
- // FIXME: If K does move, we should drop the range info and nonnull.
- // Currently this function is used with DoesKMove in passes
- // doing hoisting/sinking and the current behavior of using the
- // most generic range is correct in those cases.
+ if (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef))
K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
break;
case LLVMContext::MD_fpmath:
K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
break;
case LLVMContext::MD_invariant_load:
- // Only set the !invariant.load if it is present in both instructions.
- K->setMetadata(Kind, JMD);
+ // If K moves, only set the !invariant.load if it is present in both
+ // instructions.
+ if (DoesKMove)
+ K->setMetadata(Kind, JMD);
break;
case LLVMContext::MD_nonnull:
- // If K does move, keep nonull if it is present in both instructions.
- if (DoesKMove)
+ if (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef))
K->setMetadata(Kind, JMD);
break;
case LLVMContext::MD_invariant_group:
// Preserve !invariant.group in K.
break;
case LLVMContext::MD_align:
- K->setMetadata(Kind,
- MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
+ if (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef))
+ K->setMetadata(
+ Kind, MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
break;
case LLVMContext::MD_dereferenceable:
case LLVMContext::MD_dereferenceable_or_null:
- K->setMetadata(Kind,
- MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
+ if (DoesKMove)
+ K->setMetadata(Kind,
+ MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
break;
case LLVMContext::MD_preserve_access_index:
// Preserve !preserve.access.index in K.
break;
+ case LLVMContext::MD_noundef:
+ // If K does move, keep noundef if it is present in both instructions.
+ if (DoesKMove)
+ K->setMetadata(Kind, JMD);
+ break;
+ case LLVMContext::MD_nontemporal:
+ // Preserve !nontemporal if it is present on both instructions.
+ K->setMetadata(Kind, JMD);
+ break;
+ case LLVMContext::MD_prof:
+ if (DoesKMove)
+ K->setMetadata(Kind, MDNode::getMergedProfMetadata(KMD, JMD, K, J));
+ break;
}
}
// Set !invariant.group from J if J has it. If both instructions have it
@@ -2713,14 +2789,22 @@ void llvm::combineMetadata(Instruction *K, const Instruction *J,
void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J,
bool KDominatesJ) {
- unsigned KnownIDs[] = {
- LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
- LLVMContext::MD_noalias, LLVMContext::MD_range,
- LLVMContext::MD_invariant_load, LLVMContext::MD_nonnull,
- LLVMContext::MD_invariant_group, LLVMContext::MD_align,
- LLVMContext::MD_dereferenceable,
- LLVMContext::MD_dereferenceable_or_null,
- LLVMContext::MD_access_group, LLVMContext::MD_preserve_access_index};
+ unsigned KnownIDs[] = {LLVMContext::MD_tbaa,
+ LLVMContext::MD_alias_scope,
+ LLVMContext::MD_noalias,
+ LLVMContext::MD_range,
+ LLVMContext::MD_fpmath,
+ LLVMContext::MD_invariant_load,
+ LLVMContext::MD_nonnull,
+ LLVMContext::MD_invariant_group,
+ LLVMContext::MD_align,
+ LLVMContext::MD_dereferenceable,
+ LLVMContext::MD_dereferenceable_or_null,
+ LLVMContext::MD_access_group,
+ LLVMContext::MD_preserve_access_index,
+ LLVMContext::MD_prof,
+ LLVMContext::MD_nontemporal,
+ LLVMContext::MD_noundef};
combineMetadata(K, J, KnownIDs, KDominatesJ);
}
@@ -2799,13 +2883,7 @@ void llvm::patchReplacementInstruction(Instruction *I, Value *Repl) {
// In general, GVN unifies expressions over different control-flow
// regions, and so we need a conservative combination of the noalias
// scopes.
- static const unsigned KnownIDs[] = {
- LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
- LLVMContext::MD_noalias, LLVMContext::MD_range,
- LLVMContext::MD_fpmath, LLVMContext::MD_invariant_load,
- LLVMContext::MD_invariant_group, LLVMContext::MD_nonnull,
- LLVMContext::MD_access_group, LLVMContext::MD_preserve_access_index};
- combineMetadata(ReplInst, I, KnownIDs, false);
+ combineMetadataForCSE(ReplInst, I, false);
}
template <typename RootType, typename DominatesFn>
@@ -2930,7 +3008,8 @@ void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI,
return;
unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy);
- if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
+ if (BitWidth == OldLI.getType()->getScalarSizeInBits() &&
+ !getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
MDNode *NN = MDNode::get(OldLI.getContext(), std::nullopt);
NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
}
@@ -2969,7 +3048,7 @@ void llvm::hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt,
for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
Instruction *I = &*II;
- I->dropUndefImplyingAttrsAndUnknownMetadata();
+ I->dropUBImplyingAttrsAndMetadata();
if (I->isUsedByMetadata())
dropDebugUsers(*I);
if (I->isDebugOrPseudoInst()) {
@@ -3125,7 +3204,7 @@ collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
// Check that the mask allows a multiple of 8 bits for a bswap, for an
// early exit.
- unsigned NumMaskedBits = AndMask.countPopulation();
+ unsigned NumMaskedBits = AndMask.popcount();
if (!MatchBitReversals && (NumMaskedBits % 8) != 0)
return Result;