diff options
author | Dimitry Andric <dim@FreeBSD.org> | 2024-05-04 10:19:32 +0000 |
---|---|---|
committer | Dimitry Andric <dim@FreeBSD.org> | 2024-05-04 15:44:49 +0000 |
commit | 5678d1d98a348f315453555377ccb28821a2ffcd (patch) | |
tree | 9dda9eea3f362a43cae6ec4fa369567efa7002fa /contrib/llvm-project/llvm | |
parent | 0ad9b235e1eaef36e07247c8c7635a8eac98f4b1 (diff) | |
parent | 9a7cb8417a2a13bcb7a300c65c43960389b85456 (diff) |
Diffstat (limited to 'contrib/llvm-project/llvm')
15 files changed, 95 insertions, 63 deletions
diff --git a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h index 0f20a33f3a75..7990997835d0 100644 --- a/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h +++ b/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/LoadStoreOpt.h @@ -35,11 +35,23 @@ struct LegalityQuery; class MachineRegisterInfo; namespace GISelAddressing { /// Helper struct to store a base, index and offset that forms an address -struct BaseIndexOffset { +class BaseIndexOffset { +private: Register BaseReg; Register IndexReg; - int64_t Offset = 0; - bool IsIndexSignExt = false; + std::optional<int64_t> Offset; + +public: + BaseIndexOffset() = default; + Register getBase() { return BaseReg; } + Register getBase() const { return BaseReg; } + Register getIndex() { return IndexReg; } + Register getIndex() const { return IndexReg; } + void setBase(Register NewBase) { BaseReg = NewBase; } + void setIndex(Register NewIndex) { IndexReg = NewIndex; } + void setOffset(std::optional<int64_t> NewOff) { Offset = NewOff; } + bool hasValidOffset() const { return Offset.has_value(); } + int64_t getOffset() const { return *Offset; } }; /// Returns a BaseIndexOffset which describes the pointer in \p Ptr. @@ -89,7 +101,7 @@ private: // order stores are writing to incremeneting consecutive addresses. So when // we walk the block in reverse order, the next eligible store must write to // an offset one store width lower than CurrentLowestOffset. - uint64_t CurrentLowestOffset; + int64_t CurrentLowestOffset; SmallVector<GStore *> Stores; // A vector of MachineInstr/unsigned pairs to denote potential aliases that // need to be checked before the candidate is considered safe to merge. The diff --git a/contrib/llvm-project/llvm/lib/CodeGen/CodeGenPrepare.cpp b/contrib/llvm-project/llvm/lib/CodeGen/CodeGenPrepare.cpp index 8ee1f19e083e..1cca56fc19cf 100644 --- a/contrib/llvm-project/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/contrib/llvm-project/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -8154,6 +8154,7 @@ static bool optimizeBranch(BranchInst *Branch, const TargetLowering &TLI, IRBuilder<> Builder(Branch); if (UI->getParent() != Branch->getParent()) UI->moveBefore(Branch); + UI->dropPoisonGeneratingFlags(); Value *NewCmp = Builder.CreateCmp(ICmpInst::ICMP_EQ, UI, ConstantInt::get(UI->getType(), 0)); LLVM_DEBUG(dbgs() << "Converting " << *Cmp << "\n"); @@ -8167,6 +8168,7 @@ static bool optimizeBranch(BranchInst *Branch, const TargetLowering &TLI, IRBuilder<> Builder(Branch); if (UI->getParent() != Branch->getParent()) UI->moveBefore(Branch); + UI->dropPoisonGeneratingFlags(); Value *NewCmp = Builder.CreateCmp(Cmp->getPredicate(), UI, ConstantInt::get(UI->getType(), 0)); LLVM_DEBUG(dbgs() << "Converting " << *Cmp << "\n"); diff --git a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp index 772229215e79..61ddc858ba44 100644 --- a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -591,8 +591,8 @@ bool CombinerHelper::matchCombineExtendingLoads(MachineInstr &MI, UseMI.getOpcode() == TargetOpcode::G_ZEXT || (UseMI.getOpcode() == TargetOpcode::G_ANYEXT)) { const auto &MMO = LoadMI->getMMO(); - // For atomics, only form anyextending loads. - if (MMO.isAtomic() && UseMI.getOpcode() != TargetOpcode::G_ANYEXT) + // Don't do anything for atomics. + if (MMO.isAtomic()) continue; // Check for legality. if (!isPreLegalize()) { diff --git a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp index c0c22e36004f..47d045ac4817 100644 --- a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -4180,6 +4180,10 @@ LegalizerHelper::fewerElementsVectorPhi(GenericMachineInstr &MI, } } + // Set the insert point after the existing PHIs + MachineBasicBlock &MBB = *MI.getParent(); + MIRBuilder.setInsertPt(MBB, MBB.getFirstNonPHI()); + // Merge small outputs into MI's def. if (NumLeftovers) { mergeMixedSubvectors(MI.getReg(0), OutputRegs); diff --git a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp index 246aa88b09ac..ee499c41c558 100644 --- a/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp +++ b/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp @@ -84,21 +84,20 @@ BaseIndexOffset GISelAddressing::getPointerInfo(Register Ptr, MachineRegisterInfo &MRI) { BaseIndexOffset Info; Register PtrAddRHS; - if (!mi_match(Ptr, MRI, m_GPtrAdd(m_Reg(Info.BaseReg), m_Reg(PtrAddRHS)))) { - Info.BaseReg = Ptr; - Info.IndexReg = Register(); - Info.IsIndexSignExt = false; + Register BaseReg; + if (!mi_match(Ptr, MRI, m_GPtrAdd(m_Reg(BaseReg), m_Reg(PtrAddRHS)))) { + Info.setBase(Ptr); + Info.setOffset(0); return Info; } - + Info.setBase(BaseReg); auto RHSCst = getIConstantVRegValWithLookThrough(PtrAddRHS, MRI); if (RHSCst) - Info.Offset = RHSCst->Value.getSExtValue(); + Info.setOffset(RHSCst->Value.getSExtValue()); // Just recognize a simple case for now. In future we'll need to match // indexing patterns for base + index + constant. - Info.IndexReg = PtrAddRHS; - Info.IsIndexSignExt = false; + Info.setIndex(PtrAddRHS); return Info; } @@ -114,15 +113,16 @@ bool GISelAddressing::aliasIsKnownForLoadStore(const MachineInstr &MI1, BaseIndexOffset BasePtr0 = getPointerInfo(LdSt1->getPointerReg(), MRI); BaseIndexOffset BasePtr1 = getPointerInfo(LdSt2->getPointerReg(), MRI); - if (!BasePtr0.BaseReg.isValid() || !BasePtr1.BaseReg.isValid()) + if (!BasePtr0.getBase().isValid() || !BasePtr1.getBase().isValid()) return false; int64_t Size1 = LdSt1->getMemSize(); int64_t Size2 = LdSt2->getMemSize(); int64_t PtrDiff; - if (BasePtr0.BaseReg == BasePtr1.BaseReg) { - PtrDiff = BasePtr1.Offset - BasePtr0.Offset; + if (BasePtr0.getBase() == BasePtr1.getBase() && BasePtr0.hasValidOffset() && + BasePtr1.hasValidOffset()) { + PtrDiff = BasePtr1.getOffset() - BasePtr0.getOffset(); // If the size of memory access is unknown, do not use it to do analysis. // One example of unknown size memory access is to load/store scalable // vector objects on the stack. @@ -151,8 +151,8 @@ bool GISelAddressing::aliasIsKnownForLoadStore(const MachineInstr &MI1, // able to calculate their relative offset if at least one arises // from an alloca. However, these allocas cannot overlap and we // can infer there is no alias. - auto *Base0Def = getDefIgnoringCopies(BasePtr0.BaseReg, MRI); - auto *Base1Def = getDefIgnoringCopies(BasePtr1.BaseReg, MRI); + auto *Base0Def = getDefIgnoringCopies(BasePtr0.getBase(), MRI); + auto *Base1Def = getDefIgnoringCopies(BasePtr1.getBase(), MRI); if (!Base0Def || !Base1Def) return false; // Couldn't tell anything. @@ -520,16 +520,20 @@ bool LoadStoreOpt::addStoreToCandidate(GStore &StoreMI, Register StoreAddr = StoreMI.getPointerReg(); auto BIO = getPointerInfo(StoreAddr, *MRI); - Register StoreBase = BIO.BaseReg; - uint64_t StoreOffCst = BIO.Offset; + Register StoreBase = BIO.getBase(); if (C.Stores.empty()) { + C.BasePtr = StoreBase; + if (!BIO.hasValidOffset()) { + C.CurrentLowestOffset = 0; + } else { + C.CurrentLowestOffset = BIO.getOffset(); + } // This is the first store of the candidate. // If the offset can't possibly allow for a lower addressed store with the // same base, don't bother adding it. - if (StoreOffCst < ValueTy.getSizeInBytes()) + if (BIO.hasValidOffset() && + BIO.getOffset() < static_cast<int64_t>(ValueTy.getSizeInBytes())) return false; - C.BasePtr = StoreBase; - C.CurrentLowestOffset = StoreOffCst; C.Stores.emplace_back(&StoreMI); LLVM_DEBUG(dbgs() << "Starting a new merge candidate group with: " << StoreMI); @@ -549,8 +553,12 @@ bool LoadStoreOpt::addStoreToCandidate(GStore &StoreMI, // writes to the next lowest adjacent address. if (C.BasePtr != StoreBase) return false; - if ((C.CurrentLowestOffset - ValueTy.getSizeInBytes()) != - static_cast<uint64_t>(StoreOffCst)) + // If we don't have a valid offset, we can't guarantee to be an adjacent + // offset. + if (!BIO.hasValidOffset()) + return false; + if ((C.CurrentLowestOffset - + static_cast<int64_t>(ValueTy.getSizeInBytes())) != BIO.getOffset()) return false; // This writes to an adjacent address. Allow it. diff --git a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index e806e0f0731f..5038f8a1fc15 100644 --- a/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -9636,8 +9636,15 @@ static SDValue combineShiftOfShiftedLogic(SDNode *Shift, SelectionDAG &DAG) { if (ShiftAmtVal->getBitWidth() != C1Val.getBitWidth()) return false; + // The fold is not valid if the sum of the shift values doesn't fit in the + // given shift amount type. + bool Overflow = false; + APInt NewShiftAmt = C1Val.uadd_ov(*ShiftAmtVal, Overflow); + if (Overflow) + return false; + // The fold is not valid if the sum of the shift values exceeds bitwidth. - if ((*ShiftAmtVal + C1Val).uge(V.getScalarValueSizeInBits())) + if (NewShiftAmt.uge(V.getScalarValueSizeInBits())) return false; return true; diff --git a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.td index 03baa7497615..ac61dd8745d4 100644 --- a/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ b/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -4885,19 +4885,9 @@ defm UABDL : SIMDLongThreeVectorBHSabdl<1, 0b0111, "uabdl", def : Pat<(abs (v8i16 (sub (zext (v8i8 V64:$opA)), (zext (v8i8 V64:$opB))))), (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>; -def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))), - (v8i16 (add (sub (zext (v8i8 V64:$opA)), - (zext (v8i8 V64:$opB))), - (AArch64vashr v8i16:$src, (i32 15))))), - (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>; def : Pat<(abs (v8i16 (sub (zext (extract_high_v16i8 (v16i8 V128:$opA))), (zext (extract_high_v16i8 (v16i8 V128:$opB)))))), (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>; -def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))), - (v8i16 (add (sub (zext (extract_high_v16i8 (v16i8 V128:$opA))), - (zext (extract_high_v16i8 (v16i8 V128:$opB)))), - (AArch64vashr v8i16:$src, (i32 15))))), - (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>; def : Pat<(abs (v4i32 (sub (zext (v4i16 V64:$opA)), (zext (v4i16 V64:$opB))))), (UABDLv4i16_v4i32 V64:$opA, V64:$opB)>; diff --git a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp index 84b9330ef963..50d8bfa87508 100644 --- a/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp +++ b/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp @@ -2358,6 +2358,11 @@ bool SIGfx12CacheControl::enableVolatileAndOrNonTemporal( bool Changed = false; + if (IsNonTemporal) { + // Set non-temporal hint for all cache levels. + Changed |= setTH(MI, AMDGPU::CPol::TH_NT); + } + if (IsVolatile) { Changed |= setScope(MI, AMDGPU::CPol::SCOPE_SYS); @@ -2370,11 +2375,6 @@ bool SIGfx12CacheControl::enableVolatileAndOrNonTemporal( Position::AFTER); } - if (IsNonTemporal) { - // Set non-temporal hint for all cache levels. - Changed |= setTH(MI, AMDGPU::CPol::TH_NT); - } - return Changed; } diff --git a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index a0cec426002b..d46093b9e260 100644 --- a/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -14559,7 +14559,7 @@ static SDValue tryFoldSelectIntoOp(SDNode *N, SelectionDAG &DAG, EVT VT = N->getValueType(0); SDLoc DL(N); SDValue OtherOp = TrueVal.getOperand(1 - OpToFold); - EVT OtherOpVT = OtherOp->getValueType(0); + EVT OtherOpVT = OtherOp.getValueType(); SDValue IdentityOperand = DAG.getNeutralElement(Opc, DL, OtherOpVT, N->getFlags()); if (!Commutative) diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/contrib/llvm-project/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp index 833f058253d8..553d338b7790 100644 --- a/contrib/llvm-project/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/contrib/llvm-project/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -2923,11 +2923,10 @@ bool X86DAGToDAGISel::selectAddr(SDNode *Parent, SDValue N, SDValue &Base, } bool X86DAGToDAGISel::selectMOV64Imm32(SDValue N, SDValue &Imm) { - // Cannot use 32 bit constants to reference objects in kernel code model. - // Cannot use 32 bit constants to reference objects in large PIC mode since - // GOTOFF is 64 bits. + // Cannot use 32 bit constants to reference objects in kernel/large code + // model. if (TM.getCodeModel() == CodeModel::Kernel || - (TM.getCodeModel() == CodeModel::Large && TM.isPositionIndependent())) + TM.getCodeModel() == CodeModel::Large) return false; // In static codegen with small code model, we can get the address of a label diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.cpp b/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.cpp index 96bbd981ff24..201dd8382536 100644 --- a/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/contrib/llvm-project/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -47038,10 +47038,13 @@ static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG, if (SDValue V = combineShiftToPMULH(N, DAG, Subtarget)) return V; - // fold (ashr (shl, a, [56,48,32,24,16]), SarConst) - // into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or - // into (lshr, (sext (a), SarConst - [56,48,32,24,16])) - // depending on sign of (SarConst - [56,48,32,24,16]) + // fold (SRA (SHL X, ShlConst), SraConst) + // into (SHL (sext_in_reg X), ShlConst - SraConst) + // or (sext_in_reg X) + // or (SRA (sext_in_reg X), SraConst - ShlConst) + // depending on relation between SraConst and ShlConst. + // We only do this if (Size - ShlConst) is equal to 8, 16 or 32. That allows + // us to do the sext_in_reg from corresponding bit. // sexts in X86 are MOVs. The MOVs have the same code size // as above SHIFTs (only SHIFT on 1 has lower code size). @@ -47057,29 +47060,29 @@ static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG, SDValue N00 = N0.getOperand(0); SDValue N01 = N0.getOperand(1); APInt ShlConst = N01->getAsAPIntVal(); - APInt SarConst = N1->getAsAPIntVal(); + APInt SraConst = N1->getAsAPIntVal(); EVT CVT = N1.getValueType(); - if (SarConst.isNegative()) + if (CVT != N01.getValueType()) + return SDValue(); + if (SraConst.isNegative()) return SDValue(); for (MVT SVT : { MVT::i8, MVT::i16, MVT::i32 }) { unsigned ShiftSize = SVT.getSizeInBits(); - // skipping types without corresponding sext/zext and - // ShlConst that is not one of [56,48,32,24,16] + // Only deal with (Size - ShlConst) being equal to 8, 16 or 32. if (ShiftSize >= Size || ShlConst != Size - ShiftSize) continue; SDLoc DL(N); SDValue NN = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT)); - SarConst = SarConst - (Size - ShiftSize); - if (SarConst == 0) + if (SraConst.eq(ShlConst)) return NN; - if (SarConst.isNegative()) + if (SraConst.ult(ShlConst)) return DAG.getNode(ISD::SHL, DL, VT, NN, - DAG.getConstant(-SarConst, DL, CVT)); + DAG.getConstant(ShlConst - SraConst, DL, CVT)); return DAG.getNode(ISD::SRA, DL, VT, NN, - DAG.getConstant(SarConst, DL, CVT)); + DAG.getConstant(SraConst - ShlConst, DL, CVT)); } return SDValue(); } diff --git a/contrib/llvm-project/llvm/lib/Target/X86/X86Subtarget.h b/contrib/llvm-project/llvm/lib/Target/X86/X86Subtarget.h index a458b5f9ec8f..4d55a084b730 100644 --- a/contrib/llvm-project/llvm/lib/Target/X86/X86Subtarget.h +++ b/contrib/llvm-project/llvm/lib/Target/X86/X86Subtarget.h @@ -244,7 +244,8 @@ public: // TODO: Currently we're always allowing widening on CPUs without VLX, // because for many cases we don't have a better option. bool canExtendTo512DQ() const { - return hasAVX512() && (!hasVLX() || getPreferVectorWidth() >= 512); + return hasAVX512() && hasEVEX512() && + (!hasVLX() || getPreferVectorWidth() >= 512); } bool canExtendTo512BW() const { return hasBWI() && canExtendTo512DQ(); diff --git a/contrib/llvm-project/llvm/lib/TargetParser/Host.cpp b/contrib/llvm-project/llvm/lib/TargetParser/Host.cpp index 4466d50458e1..1adef15771fa 100644 --- a/contrib/llvm-project/llvm/lib/TargetParser/Host.cpp +++ b/contrib/llvm-project/llvm/lib/TargetParser/Host.cpp @@ -1266,8 +1266,10 @@ static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf, setFeature(X86::FEATURE_AVX2); if (HasLeaf7 && ((EBX >> 8) & 1)) setFeature(X86::FEATURE_BMI2); - if (HasLeaf7 && ((EBX >> 16) & 1) && HasAVX512Save) + if (HasLeaf7 && ((EBX >> 16) & 1) && HasAVX512Save) { setFeature(X86::FEATURE_AVX512F); + setFeature(X86::FEATURE_EVEX512); + } if (HasLeaf7 && ((EBX >> 17) & 1) && HasAVX512Save) setFeature(X86::FEATURE_AVX512DQ); if (HasLeaf7 && ((EBX >> 19) & 1)) @@ -1772,6 +1774,7 @@ bool sys::getHostCPUFeatures(StringMap<bool> &Features) { Features["rtm"] = HasLeaf7 && ((EBX >> 11) & 1); // AVX512 is only supported if the OS supports the context save for it. Features["avx512f"] = HasLeaf7 && ((EBX >> 16) & 1) && HasAVX512Save; + Features["evex512"] = Features["avx512f"]; Features["avx512dq"] = HasLeaf7 && ((EBX >> 17) & 1) && HasAVX512Save; Features["rdseed"] = HasLeaf7 && ((EBX >> 18) & 1); Features["adx"] = HasLeaf7 && ((EBX >> 19) & 1); diff --git a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp index 9f220ec003ec..8cc7901cbac7 100644 --- a/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp +++ b/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp @@ -2606,7 +2606,7 @@ static Instruction *foldSelectWithSRem(SelectInst &SI, InstCombinerImpl &IC, // %cnd = icmp slt i32 %rem, 0 // %add = add i32 %rem, %n // %sel = select i1 %cnd, i32 %add, i32 %rem - if (match(TrueVal, m_Add(m_Value(RemRes), m_Value(Remainder))) && + if (match(TrueVal, m_Add(m_Specific(RemRes), m_Value(Remainder))) && match(RemRes, m_SRem(m_Value(Op), m_Specific(Remainder))) && IC.isKnownToBeAPowerOfTwo(Remainder, /*OrZero*/ true) && FalseVal == RemRes) diff --git a/contrib/llvm-project/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/contrib/llvm-project/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp index 9df28747570c..104e8ceb7967 100644 --- a/contrib/llvm-project/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp +++ b/contrib/llvm-project/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp @@ -279,6 +279,9 @@ bool InductiveRangeCheck::parseRangeCheckICmp(Loop *L, ICmpInst *ICI, Value *LHS = ICI->getOperand(0); Value *RHS = ICI->getOperand(1); + if (!LHS->getType()->isIntegerTy()) + return false; + // Canonicalize to the `Index Pred Invariant` comparison if (IsLoopInvariant(LHS)) { std::swap(LHS, RHS); |