aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelLowering.cpp')
-rw-r--r--contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelLowering.cpp27
1 files changed, 19 insertions, 8 deletions
diff --git a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 11454841cab7..5c1a4cb16568 100644
--- a/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/contrib/llvm-project/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -9111,13 +9111,15 @@ SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
Op0.getOperand(1));
}
-static const SDValue *getNormalLoadInput(const SDValue &Op) {
+static const SDValue *getNormalLoadInput(const SDValue &Op, bool &IsPermuted) {
const SDValue *InputLoad = &Op;
if (InputLoad->getOpcode() == ISD::BITCAST)
InputLoad = &InputLoad->getOperand(0);
if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR ||
- InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED)
+ InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED) {
+ IsPermuted = InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED;
InputLoad = &InputLoad->getOperand(0);
+ }
if (InputLoad->getOpcode() != ISD::LOAD)
return nullptr;
LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
@@ -9289,7 +9291,9 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
if (!BVNIsConstantSplat || SplatBitSize > 32) {
- const SDValue *InputLoad = getNormalLoadInput(Op.getOperand(0));
+ bool IsPermutedLoad = false;
+ const SDValue *InputLoad =
+ getNormalLoadInput(Op.getOperand(0), IsPermutedLoad);
// Handle load-and-splat patterns as we have instructions that will do this
// in one go.
if (InputLoad && DAG.isSplatValue(Op, true)) {
@@ -9912,7 +9916,8 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
// If this is a load-and-splat, we can do that with a single instruction
// in some cases. However if the load has multiple uses, we don't want to
// combine it because that will just produce multiple loads.
- const SDValue *InputLoad = getNormalLoadInput(V1);
+ bool IsPermutedLoad = false;
+ const SDValue *InputLoad = getNormalLoadInput(V1, IsPermutedLoad);
if (InputLoad && Subtarget.hasVSX() && V2.isUndef() &&
(PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) &&
InputLoad->hasOneUse()) {
@@ -9920,6 +9925,16 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
int SplatIdx =
PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG);
+ // The splat index for permuted loads will be in the left half of the vector
+ // which is strictly wider than the loaded value by 8 bytes. So we need to
+ // adjust the splat index to point to the correct address in memory.
+ if (IsPermutedLoad) {
+ assert(isLittleEndian && "Unexpected permuted load on big endian target");
+ SplatIdx += IsFourByte ? 2 : 1;
+ assert((SplatIdx < (IsFourByte ? 4 : 2)) &&
+ "Splat of a value outside of the loaded memory");
+ }
+
LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
// For 4-byte load-and-splat, we need Power9.
if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) {
@@ -9929,10 +9944,6 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
else
Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8;
- // If we are loading a partial vector, it does not make sense to adjust
- // the base pointer. This happens with (splat (s_to_v_permuted (ld))).
- if (LD->getMemoryVT().getSizeInBits() == (IsFourByte ? 32 : 64))
- Offset = 0;
SDValue BasePtr = LD->getBasePtr();
if (Offset != 0)
BasePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),