aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/CodeGen/ExpandVectorPredication.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/llvm/lib/CodeGen/ExpandVectorPredication.cpp')
-rw-r--r--contrib/llvm-project/llvm/lib/CodeGen/ExpandVectorPredication.cpp27
1 files changed, 17 insertions, 10 deletions
diff --git a/contrib/llvm-project/llvm/lib/CodeGen/ExpandVectorPredication.cpp b/contrib/llvm-project/llvm/lib/CodeGen/ExpandVectorPredication.cpp
index bb8d2b3e9a78..7883a48d121c 100644
--- a/contrib/llvm-project/llvm/lib/CodeGen/ExpandVectorPredication.cpp
+++ b/contrib/llvm-project/llvm/lib/CodeGen/ExpandVectorPredication.cpp
@@ -23,13 +23,11 @@
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
-#include "llvm/IR/Module.h"
#include "llvm/InitializePasses.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/MathExtras.h"
using namespace llvm;
@@ -115,6 +113,17 @@ static void replaceOperation(Value &NewOp, VPIntrinsic &OldOp) {
OldOp.eraseFromParent();
}
+static bool maySpeculateLanes(VPIntrinsic &VPI) {
+ // The result of VP reductions depends on the mask and evl.
+ if (isa<VPReductionIntrinsic>(VPI))
+ return false;
+ // Fallback to whether the intrinsic is speculatable.
+ Optional<unsigned> OpcOpt = VPI.getFunctionalOpcode();
+ unsigned FunctionalOpc = OpcOpt.value_or((unsigned)Instruction::Call);
+ return isSafeToSpeculativelyExecuteWithOpcode(FunctionalOpc,
+ cast<Operator>(&VPI));
+}
+
//// } Helpers
namespace {
@@ -218,8 +227,7 @@ Value *CachingVPExpander::convertEVLToMask(IRBuilder<> &Builder,
Value *
CachingVPExpander::expandPredicationInBinaryOperator(IRBuilder<> &Builder,
VPIntrinsic &VPI) {
- assert((isSafeToSpeculativelyExecute(&VPI) ||
- VPI.canIgnoreVectorLengthParam()) &&
+ assert((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) &&
"Implicitly dropping %evl in non-speculatable operator!");
auto OC = static_cast<Instruction::BinaryOps>(*VPI.getFunctionalOpcode());
@@ -298,8 +306,7 @@ static Value *getNeutralReductionElement(const VPReductionIntrinsic &VPI,
Value *
CachingVPExpander::expandPredicationInReduction(IRBuilder<> &Builder,
VPReductionIntrinsic &VPI) {
- assert((isSafeToSpeculativelyExecute(&VPI) ||
- VPI.canIgnoreVectorLengthParam()) &&
+ assert((maySpeculateLanes(VPI) || VPI.canIgnoreVectorLengthParam()) &&
"Implicitly dropping %evl in non-speculatable operator!");
Value *Mask = VPI.getMaskParam();
@@ -473,9 +480,9 @@ struct TransformJob {
bool isDone() const { return Strategy.shouldDoNothing(); }
};
-void sanitizeStrategy(Instruction &I, VPLegalization &LegalizeStrat) {
- // Speculatable instructions do not strictly need predication.
- if (isSafeToSpeculativelyExecute(&I)) {
+void sanitizeStrategy(VPIntrinsic &VPI, VPLegalization &LegalizeStrat) {
+ // Operations with speculatable lanes do not strictly need predication.
+ if (maySpeculateLanes(VPI)) {
// Converting a speculatable VP intrinsic means dropping %mask and %evl.
// No need to expand %evl into the %mask only to ignore that code.
if (LegalizeStrat.OpStrategy == VPLegalization::Convert)
@@ -520,7 +527,7 @@ bool CachingVPExpander::expandVectorPredication() {
if (!VPI)
continue;
auto VPStrat = getVPLegalizationStrategy(*VPI);
- sanitizeStrategy(I, VPStrat);
+ sanitizeStrategy(*VPI, VPStrat);
if (!VPStrat.shouldDoNothing())
Worklist.emplace_back(VPI, VPStrat);
}