diff options
Diffstat (limited to 'lib/Target/X86/X86FastISel.cpp')
-rw-r--r-- | lib/Target/X86/X86FastISel.cpp | 611 |
1 files changed, 328 insertions, 283 deletions
diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index ae3eea4943d7..5d71eac7c05a 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -64,7 +64,7 @@ public: X86ScalarSSEf32 = Subtarget->hasSSE1(); } - bool TargetSelectInstruction(const Instruction *I) override; + bool fastSelectInstruction(const Instruction *I) override; /// \brief The specified machine instr operand is a vreg, and that /// vreg is being provided by the specified load instruction. If possible, @@ -73,9 +73,9 @@ public: bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, const LoadInst *LI) override; - bool FastLowerArguments() override; - bool FastLowerCall(CallLoweringInfo &CLI) override; - bool FastLowerIntrinsicCall(const IntrinsicInst *II) override; + bool fastLowerArguments() override; + bool fastLowerCall(CallLoweringInfo &CLI) override; + bool fastLowerIntrinsicCall(const IntrinsicInst *II) override; #include "X86GenFastISel.inc" @@ -127,7 +127,7 @@ private: bool X86SelectFPTrunc(const Instruction *I); const X86InstrInfo *getInstrInfo() const { - return getTargetMachine()->getInstrInfo(); + return getTargetMachine()->getSubtargetImpl()->getInstrInfo(); } const X86TargetMachine *getTargetMachine() const { return static_cast<const X86TargetMachine *>(&TM); @@ -135,11 +135,14 @@ private: bool handleConstantAddresses(const Value *V, X86AddressMode &AM); - unsigned TargetMaterializeConstant(const Constant *C) override; + unsigned X86MaterializeInt(const ConstantInt *CI, MVT VT); + unsigned X86MaterializeFP(const ConstantFP *CFP, MVT VT); + unsigned X86MaterializeGV(const GlobalValue *GV, MVT VT); + unsigned fastMaterializeConstant(const Constant *C) override; - unsigned TargetMaterializeAlloca(const AllocaInst *C) override; + unsigned fastMaterializeAlloca(const AllocaInst *C) override; - unsigned TargetMaterializeFloatZero(const ConstantFP *CF) override; + unsigned fastMaterializeFloatZero(const ConstantFP *CF) override; /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is /// computed in an SSE register, not on the X87 floating point stack. @@ -161,46 +164,6 @@ private: } // end anonymous namespace. -static CmpInst::Predicate optimizeCmpPredicate(const CmpInst *CI) { - // If both operands are the same, then try to optimize or fold the cmp. - CmpInst::Predicate Predicate = CI->getPredicate(); - if (CI->getOperand(0) != CI->getOperand(1)) - return Predicate; - - switch (Predicate) { - default: llvm_unreachable("Invalid predicate!"); - case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break; - case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break; - case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break; - case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break; - case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break; - case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break; - case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break; - case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break; - case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break; - case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break; - case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break; - case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break; - case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break; - case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break; - case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break; - case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break; - - case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break; - case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break; - case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break; - case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break; - case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break; - case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break; - case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break; - case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break; - case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break; - case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break; - } - - return Predicate; -} - static std::pair<X86::CondCode, bool> getX86ConditionCode(CmpInst::Predicate Predicate) { X86::CondCode CC = X86::COND_INVALID; @@ -529,7 +492,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val, bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT, unsigned &ResultReg) { - unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, + unsigned RR = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, Src, /*TODO: Kill=*/false); if (RR == 0) return false; @@ -581,7 +544,7 @@ bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) { // Ok, we need to do a load from a stub. If we've already loaded from // this stub, reuse the loaded pointer, otherwise emit the load now. - DenseMap<const Value*, unsigned>::iterator I = LocalValueMap.find(V); + DenseMap<const Value *, unsigned>::iterator I = LocalValueMap.find(V); unsigned LoadReg; if (I != LocalValueMap.end() && I->second != 0) { LoadReg = I->second; @@ -692,7 +655,7 @@ redo_gep: case Instruction::Alloca: { // Do static allocas. const AllocaInst *A = cast<AllocaInst>(V); - DenseMap<const AllocaInst*, int>::iterator SI = + DenseMap<const AllocaInst *, int>::iterator SI = FuncInfo.StaticAllocaMap.find(A); if (SI != FuncInfo.StaticAllocaMap.end()) { AM.BaseType = X86AddressMode::FrameIndexBase; @@ -940,7 +903,7 @@ bool X86FastISel::X86SelectStore(const Instruction *I) { unsigned Alignment = S->getAlignment(); unsigned ABIAlignment = DL.getABITypeAlignment(Val->getType()); - if (Alignment == 0) // Ensure that codegen never sees alignment 0 + if (Alignment == 0) // Ensure that codegen never sees alignment 0 Alignment = ABIAlignment; bool Aligned = Alignment >= ABIAlignment; @@ -993,8 +956,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) { // Analyze operands of the call, assigning locations to each operand. SmallVector<CCValAssign, 16> ValLocs; - CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs, - I->getContext()); + CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext()); CCInfo.AnalyzeReturn(Outs, RetCC_X86); const Value *RV = Ret->getOperand(0); @@ -1017,7 +979,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) { // The calling-convention tables for x87 returns don't tell // the whole story. - if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) + if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) return false; unsigned SrcReg = Reg + VA.getValNo(); @@ -1036,23 +998,23 @@ bool X86FastISel::X86SelectRet(const Instruction *I) { if (SrcVT == MVT::i1) { if (Outs[0].Flags.isSExt()) return false; - SrcReg = FastEmitZExtFromI1(MVT::i8, SrcReg, /*TODO: Kill=*/false); + SrcReg = fastEmitZExtFromI1(MVT::i8, SrcReg, /*TODO: Kill=*/false); SrcVT = MVT::i8; } unsigned Op = Outs[0].Flags.isZExt() ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND; - SrcReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op, + SrcReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op, SrcReg, /*TODO: Kill=*/false); } // Make the copy. unsigned DstReg = VA.getLocReg(); - const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); + const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg); // Avoid a cross-class copy. This is very unlikely. if (!SrcRC->contains(DstReg)) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), - DstReg).addReg(SrcReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg); // Add register to return instruction. RetRegs.push_back(VA.getLocReg()); @@ -1068,14 +1030,15 @@ bool X86FastISel::X86SelectRet(const Instruction *I) { assert(Reg && "SRetReturnReg should have been set in LowerFormalArguments()!"); unsigned RetReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), - RetReg).addReg(Reg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), RetReg).addReg(Reg); RetRegs.push_back(RetReg); } // Now emit the RET. MachineInstrBuilder MIB = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Subtarget->is64Bit() ? X86::RETQ : X86::RETL)); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(Subtarget->is64Bit() ? X86::RETQ : X86::RETL)); for (unsigned i = 0, e = RetRegs.size(); i != e; ++i) MIB.addReg(RetRegs[i], RegState::Implicit); return true; @@ -1104,7 +1067,7 @@ bool X86FastISel::X86SelectLoad(const Instruction *I) { if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg)) return false; - UpdateValueMap(I, ResultReg); + updateValueMap(I, ResultReg); return true; } @@ -1194,7 +1157,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) { ResultReg = createResultReg(&X86::GR32RegClass); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV32r0), ResultReg); - ResultReg = FastEmitInst_extractsubreg(MVT::i8, ResultReg, /*Kill=*/true, + ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg, /*Kill=*/true, X86::sub_8bit); if (!ResultReg) return false; @@ -1209,7 +1172,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) { } if (ResultReg) { - UpdateValueMap(I, ResultReg); + updateValueMap(I, ResultReg); return true; } @@ -1250,7 +1213,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) { FlagReg2); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[2]), ResultReg).addReg(FlagReg1).addReg(FlagReg2); - UpdateValueMap(I, ResultReg); + updateValueMap(I, ResultReg); return true; } @@ -1268,7 +1231,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) { return false; BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg); - UpdateValueMap(I, ResultReg); + updateValueMap(I, ResultReg); return true; } @@ -1285,7 +1248,7 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) { MVT SrcVT = TLI.getSimpleValueType(I->getOperand(0)->getType()); if (SrcVT.SimpleTy == MVT::i1) { // Set the high bits to zero. - ResultReg = FastEmitZExtFromI1(MVT::i8, ResultReg, /*TODO: Kill=*/false); + ResultReg = fastEmitZExtFromI1(MVT::i8, ResultReg, /*TODO: Kill=*/false); SrcVT = MVT::i8; if (ResultReg == 0) @@ -1312,17 +1275,16 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) { ResultReg) .addImm(0).addReg(Result32).addImm(X86::sub_32bit); } else if (DstVT != MVT::i8) { - ResultReg = FastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND, + ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND, ResultReg, /*Kill=*/true); if (ResultReg == 0) return false; } - UpdateValueMap(I, ResultReg); + updateValueMap(I, ResultReg); return true; } - bool X86FastISel::X86SelectBranch(const Instruction *I) { // Unconditional branches are selected by tablegen-generated code. // Handle a conditional branch. @@ -1342,8 +1304,8 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { CmpInst::Predicate Predicate = optimizeCmpPredicate(CI); switch (Predicate) { default: break; - case CmpInst::FCMP_FALSE: FastEmitBranch(FalseMBB, DbgLoc); return true; - case CmpInst::FCMP_TRUE: FastEmitBranch(TrueMBB, DbgLoc); return true; + case CmpInst::FCMP_FALSE: fastEmitBranch(FalseMBB, DbgLoc); return true; + case CmpInst::FCMP_TRUE: fastEmitBranch(TrueMBB, DbgLoc); return true; } const Value *CmpLHS = CI->getOperand(0); @@ -1400,7 +1362,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { // X86 requires a second branch to handle UNE (and OEQ, which is mapped // to UNE above). if (NeedExtraBranch) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JP_4)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JP_1)) .addMBB(TrueMBB); } @@ -1413,7 +1375,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { // Emits an unconditional branch to the FalseBB, obtains the branch // weight, and adds it to the successor list. - FastEmitBranch(FalseMBB, DbgLoc); + fastEmitBranch(FalseMBB, DbgLoc); return true; } @@ -1437,15 +1399,15 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TestOpc)) .addReg(OpReg).addImm(1); - unsigned JmpOpc = X86::JNE_4; + unsigned JmpOpc = X86::JNE_1; if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) { std::swap(TrueMBB, FalseMBB); - JmpOpc = X86::JE_4; + JmpOpc = X86::JE_1; } BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(JmpOpc)) .addMBB(TrueMBB); - FastEmitBranch(FalseMBB, DbgLoc); + fastEmitBranch(FalseMBB, DbgLoc); uint32_t BranchWeight = 0; if (FuncInfo.BPI) BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(), @@ -1465,7 +1427,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BranchOpc)) .addMBB(TrueMBB); - FastEmitBranch(FalseMBB, DbgLoc); + fastEmitBranch(FalseMBB, DbgLoc); uint32_t BranchWeight = 0; if (FuncInfo.BPI) BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(), @@ -1482,9 +1444,9 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) .addReg(OpReg).addImm(1); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JNE_4)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JNE_1)) .addMBB(TrueMBB); - FastEmitBranch(FalseMBB, DbgLoc); + fastEmitBranch(FalseMBB, DbgLoc); uint32_t BranchWeight = 0; if (FuncInfo.BPI) BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(), @@ -1558,7 +1520,7 @@ bool X86FastISel::X86SelectShift(const Instruction *I) { unsigned ResultReg = createResultReg(RC); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(OpReg), ResultReg) .addReg(Op0Reg); - UpdateValueMap(I, ResultReg); + updateValueMap(I, ResultReg); return true; } @@ -1670,8 +1632,8 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) { TII.get(X86::MOV32r0), Zero32); // Copy the zero into the appropriate sub/super/identical physical - // register. Unfortunately the operations needed are not uniform enough to - // fit neatly into the table above. + // register. Unfortunately the operations needed are not uniform enough + // to fit neatly into the table above. if (VT.SimpleTy == MVT::i16) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Copy), TypeEntry.HighInReg) @@ -1712,7 +1674,7 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) { ResultSuperReg).addReg(SourceSuperReg).addImm(8); // Now reference the 8-bit subreg of the result. - ResultReg = FastEmitInst_extractsubreg(MVT::i8, ResultSuperReg, + ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultSuperReg, /*Kill=*/true, X86::sub_8bit); } // Copy the result out of the physreg if we haven't already. @@ -1721,7 +1683,7 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Copy), ResultReg) .addReg(OpEntry.DivRemResultReg); } - UpdateValueMap(I, ResultReg); + updateValueMap(I, ResultReg); return true; } @@ -1779,7 +1741,7 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) { EVT CmpVT = TLI.getValueType(CmpLHS->getType()); // Emit a compare of the LHS and RHS, setting the flags. if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT)) - return false; + return false; if (SETFOpc) { unsigned FlagReg1 = createResultReg(&X86::GR8RegClass); @@ -1837,9 +1799,9 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) { return false; unsigned Opc = X86::getCMovFromCond(CC, RC->getSize()); - unsigned ResultReg = FastEmitInst_rr(Opc, RC, RHSReg, RHSIsKill, + unsigned ResultReg = fastEmitInst_rr(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill); - UpdateValueMap(I, ResultReg); + updateValueMap(I, ResultReg); return true; } @@ -1858,7 +1820,7 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) { if (I->getType() != CI->getOperand(0)->getType() || !((Subtarget->hasSSE1() && RetVT == MVT::f32) || - (Subtarget->hasSSE2() && RetVT == MVT::f64) )) + (Subtarget->hasSSE2() && RetVT == MVT::f64))) return false; const Value *CmpLHS = CI->getOperand(0); @@ -1917,15 +1879,15 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) { return false; const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT); - unsigned CmpReg = FastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill, + unsigned CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill, CmpRHSReg, CmpRHSIsKill, CC); - unsigned AndReg = FastEmitInst_rr(Opc[1], RC, CmpReg, /*IsKill=*/false, + unsigned AndReg = fastEmitInst_rr(Opc[1], RC, CmpReg, /*IsKill=*/false, LHSReg, LHSIsKill); - unsigned AndNReg = FastEmitInst_rr(Opc[2], RC, CmpReg, /*IsKill=*/true, + unsigned AndNReg = fastEmitInst_rr(Opc[2], RC, CmpReg, /*IsKill=*/true, RHSReg, RHSIsKill); - unsigned ResultReg = FastEmitInst_rr(Opc[3], RC, AndNReg, /*IsKill=*/true, + unsigned ResultReg = fastEmitInst_rr(Opc[3], RC, AndNReg, /*IsKill=*/true, AndReg, /*IsKill=*/true); - UpdateValueMap(I, ResultReg); + updateValueMap(I, ResultReg); return true; } @@ -1988,8 +1950,8 @@ bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) { const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT); unsigned ResultReg = - FastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill, CC); - UpdateValueMap(I, ResultReg); + fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill, CC); + updateValueMap(I, ResultReg); return true; } @@ -2018,7 +1980,7 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), ResultReg) .addReg(OpReg, getKillRegState(OpIsKill)); - UpdateValueMap(I, ResultReg); + updateValueMap(I, ResultReg); return true; } } @@ -2051,7 +2013,7 @@ bool X86FastISel::X86SelectFPExt(const Instruction *I) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::CVTSS2SDrr), ResultReg) .addReg(OpReg); - UpdateValueMap(I, ResultReg); + updateValueMap(I, ResultReg); return true; } } @@ -2070,7 +2032,7 @@ bool X86FastISel::X86SelectFPTrunc(const Instruction *I) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::CVTSD2SSrr), ResultReg) .addReg(OpReg); - UpdateValueMap(I, ResultReg); + updateValueMap(I, ResultReg); return true; } } @@ -2096,30 +2058,29 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) { if (SrcVT == MVT::i8) { // Truncate from i8 to i1; no code needed. - UpdateValueMap(I, InputReg); + updateValueMap(I, InputReg); return true; } if (!Subtarget->is64Bit()) { // If we're on x86-32; we can't extract an i8 from a general register. // First issue a copy to GR16_ABCD or GR32_ABCD. - const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16) ? - (const TargetRegisterClass*)&X86::GR16_ABCDRegClass : - (const TargetRegisterClass*)&X86::GR32_ABCDRegClass; + const TargetRegisterClass *CopyRC = + (SrcVT == MVT::i16) ? &X86::GR16_ABCDRegClass : &X86::GR32_ABCDRegClass; unsigned CopyReg = createResultReg(CopyRC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), - CopyReg).addReg(InputReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), CopyReg).addReg(InputReg); InputReg = CopyReg; } // Issue an extract_subreg. - unsigned ResultReg = FastEmitInst_extractsubreg(MVT::i8, + unsigned ResultReg = fastEmitInst_extractsubreg(MVT::i8, InputReg, /*Kill=*/true, X86::sub_8bit); if (!ResultReg) return false; - UpdateValueMap(I, ResultReg); + updateValueMap(I, ResultReg); return true; } @@ -2145,9 +2106,8 @@ bool X86FastISel::TryEmitSmallMemcpy(X86AddressMode DestAM, VT = MVT::i32; else if (Len >= 2) VT = MVT::i16; - else { + else VT = MVT::i8; - } unsigned Reg; bool RV = X86FastEmitLoad(VT, SrcAM, nullptr, Reg); @@ -2163,19 +2123,7 @@ bool X86FastISel::TryEmitSmallMemcpy(X86AddressMode DestAM, return true; } -static bool isCommutativeIntrinsic(IntrinsicInst const *II) { - switch (II->getIntrinsicID()) { - case Intrinsic::sadd_with_overflow: - case Intrinsic::uadd_with_overflow: - case Intrinsic::smul_with_overflow: - case Intrinsic::umul_with_overflow: - return true; - default: - return false; - } -} - -bool X86FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) { +bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { // FIXME: Handle more intrinsics. switch (II->getIntrinsicID()) { default: return false; @@ -2195,14 +2143,14 @@ bool X86FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) { case MVT::i64: Opc = X86::MOV64rm; RC = &X86::GR64RegClass; break; } - // This needs to be set before we call getFrameRegister, otherwise we get - // the wrong frame register. + // This needs to be set before we call getPtrSizedFrameRegister, otherwise + // we get the wrong frame register. MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo(); MFI->setFrameAddressIsTaken(true); - const X86RegisterInfo *RegInfo = - static_cast<const X86RegisterInfo*>(TM.getRegisterInfo()); - unsigned FrameReg = RegInfo->getFrameRegister(*(FuncInfo.MF)); + const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>( + TM.getSubtargetImpl()->getRegisterInfo()); + unsigned FrameReg = RegInfo->getPtrSizedFrameRegister(*(FuncInfo.MF)); assert(((FrameReg == X86::RBP && VT == MVT::i64) || (FrameReg == X86::EBP && VT == MVT::i32)) && "Invalid Frame Register!"); @@ -2228,7 +2176,7 @@ bool X86FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) { SrcReg = DestReg; } - UpdateValueMap(II, SrcReg); + updateValueMap(II, SrcReg); return true; } case Intrinsic::memcpy: { @@ -2258,7 +2206,7 @@ bool X86FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) { if (MCI->getSourceAddressSpace() > 255 || MCI->getDestAddressSpace() > 255) return false; - return LowerCallTo(II, "memcpy", II->getNumArgOperands() - 2); + return lowerCallTo(II, "memcpy", II->getNumArgOperands() - 2); } case Intrinsic::memset: { const MemSetInst *MSI = cast<MemSetInst>(II); @@ -2273,7 +2221,7 @@ bool X86FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) { if (MSI->getDestAddressSpace() > 255) return false; - return LowerCallTo(II, "memset", II->getNumArgOperands() - 2); + return lowerCallTo(II, "memset", II->getNumArgOperands() - 2); } case Intrinsic::stackprotector: { // Emit code to store the stack guard onto the stack. @@ -2299,8 +2247,10 @@ bool X86FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) { const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE); // FIXME may need to add RegState::Debug to any registers produced, // although ESP/EBP should be the only ones at the moment. - addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II), AM). - addImm(0).addMetadata(DI->getVariable()); + addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II), AM) + .addImm(0) + .addMetadata(DI->getVariable()) + .addMetadata(DI->getExpression()); return true; } case Intrinsic::trap: { @@ -2317,7 +2267,7 @@ bool X86FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) { if (!isTypeLegal(RetTy, VT)) return false; - // Unfortunately we can't use FastEmit_r, because the AVX version of FSQRT + // Unfortunately we can't use fastEmit_r, because the AVX version of FSQRT // is not generated by FastISel yet. // FIXME: Update this code once tablegen can handle it. static const unsigned SqrtOpc[2][2] = { @@ -2356,7 +2306,7 @@ bool X86FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) { MIB.addReg(SrcReg); - UpdateValueMap(II, ResultReg); + updateValueMap(II, ResultReg); return true; } case Intrinsic::sadd_with_overflow: @@ -2387,15 +2337,23 @@ bool X86FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) { isCommutativeIntrinsic(II)) std::swap(LHS, RHS); + bool UseIncDec = false; + if (isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isOne()) + UseIncDec = true; + unsigned BaseOpc, CondOpc; switch (II->getIntrinsicID()) { default: llvm_unreachable("Unexpected intrinsic!"); case Intrinsic::sadd_with_overflow: - BaseOpc = ISD::ADD; CondOpc = X86::SETOr; break; + BaseOpc = UseIncDec ? unsigned(X86ISD::INC) : unsigned(ISD::ADD); + CondOpc = X86::SETOr; + break; case Intrinsic::uadd_with_overflow: BaseOpc = ISD::ADD; CondOpc = X86::SETBr; break; case Intrinsic::ssub_with_overflow: - BaseOpc = ISD::SUB; CondOpc = X86::SETOr; break; + BaseOpc = UseIncDec ? unsigned(X86ISD::DEC) : unsigned(ISD::SUB); + CondOpc = X86::SETOr; + break; case Intrinsic::usub_with_overflow: BaseOpc = ISD::SUB; CondOpc = X86::SETBr; break; case Intrinsic::smul_with_overflow: @@ -2411,9 +2369,21 @@ bool X86FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) { unsigned ResultReg = 0; // Check if we have an immediate version. - if (auto const *C = dyn_cast<ConstantInt>(RHS)) { - ResultReg = FastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill, - C->getZExtValue()); + if (const auto *CI = dyn_cast<ConstantInt>(RHS)) { + static const unsigned Opc[2][4] = { + { X86::INC8r, X86::INC16r, X86::INC32r, X86::INC64r }, + { X86::DEC8r, X86::DEC16r, X86::DEC32r, X86::DEC64r } + }; + + if (BaseOpc == X86ISD::INC || BaseOpc == X86ISD::DEC) { + ResultReg = createResultReg(TLI.getRegClassFor(VT)); + bool IsDec = BaseOpc == X86ISD::DEC; + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(Opc[IsDec][VT.SimpleTy-MVT::i8]), ResultReg) + .addReg(LHSReg, getKillRegState(LHSIsKill)); + } else + ResultReg = fastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill, + CI->getZExtValue()); } unsigned RHSReg; @@ -2423,7 +2393,7 @@ bool X86FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) { if (RHSReg == 0) return false; RHSIsKill = hasTrivialKill(RHS); - ResultReg = FastEmit_rr(VT, VT, BaseOpc, LHSReg, LHSIsKill, RHSReg, + ResultReg = fastEmit_rr(VT, VT, BaseOpc, LHSReg, LHSIsKill, RHSReg, RHSIsKill); } @@ -2438,7 +2408,7 @@ bool X86FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), Reg[VT.SimpleTy-MVT::i8]) .addReg(LHSReg, getKillRegState(LHSIsKill)); - ResultReg = FastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8], + ResultReg = fastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8], TLI.getRegClassFor(VT), RHSReg, RHSIsKill); } else if (BaseOpc == X86ISD::SMUL && !ResultReg) { static const unsigned MULOpc[] = @@ -2449,10 +2419,10 @@ bool X86FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), X86::AL) .addReg(LHSReg, getKillRegState(LHSIsKill)); - ResultReg = FastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg, + ResultReg = fastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg, RHSIsKill); } else - ResultReg = FastEmitInst_rr(MULOpc[VT.SimpleTy-MVT::i8], + ResultReg = fastEmitInst_rr(MULOpc[VT.SimpleTy-MVT::i8], TLI.getRegClassFor(VT), LHSReg, LHSIsKill, RHSReg, RHSIsKill); } @@ -2465,7 +2435,7 @@ bool X86FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CondOpc), ResultReg2); - UpdateValueMap(II, ResultReg, 2); + updateValueMap(II, ResultReg, 2); return true; } case Intrinsic::x86_sse_cvttss2si: @@ -2531,13 +2501,13 @@ bool X86FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) .addReg(Reg); - UpdateValueMap(II, ResultReg); + updateValueMap(II, ResultReg); return true; } } } -bool X86FastISel::FastLowerArguments() { +bool X86FastISel::fastLowerArguments() { if (!FuncInfo.CanLowerReturn) return false; @@ -2554,7 +2524,7 @@ bool X86FastISel::FastLowerArguments() { if (!Subtarget->is64Bit()) return false; - + // Only handle simple cases. i.e. Up to 6 i32/i64 scalar arguments. unsigned GPRCnt = 0; unsigned FPRCnt = 0; @@ -2627,7 +2597,7 @@ bool X86FastISel::FastLowerArguments() { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), ResultReg) .addReg(DstReg, getKillRegState(true)); - UpdateValueMap(&Arg, ResultReg); + updateValueMap(&Arg, ResultReg); } return true; } @@ -2649,7 +2619,7 @@ static unsigned computeBytesPoppedByCallee(const X86Subtarget *Subtarget, return 4; } -bool X86FastISel::FastLowerCall(CallLoweringInfo &CLI) { +bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) { auto &OutVals = CLI.OutVals; auto &OutFlags = CLI.OutFlags; auto &OutRegs = CLI.OutRegs; @@ -2699,6 +2669,9 @@ bool X86FastISel::FastLowerCall(CallLoweringInfo &CLI) { TM.Options.GuaranteedTailCallOpt)) return false; + SmallVector<MVT, 16> OutVTs; + SmallVector<unsigned, 16> ArgRegs; + // If this is a constant i1/i8/i16 argument, promote to i32 to avoid an extra // instruction. This is safe because it is common to all FastISel supported // calling conventions on x86. @@ -2716,46 +2689,44 @@ bool X86FastISel::FastLowerCall(CallLoweringInfo &CLI) { // Passing bools around ends up doing a trunc to i1 and passing it. // Codegen this as an argument + "and 1". - if (auto *TI = dyn_cast<TruncInst>(Val)) { - if (TI->getType()->isIntegerTy(1) && CLI.CS && - (TI->getParent() == CLI.CS->getInstruction()->getParent()) && - TI->hasOneUse()) { - Val = cast<TruncInst>(Val)->getOperand(0); - unsigned ResultReg = getRegForValue(Val); - - if (!ResultReg) - return false; - - MVT ArgVT; - if (!isTypeLegal(Val->getType(), ArgVT)) - return false; + MVT VT; + auto *TI = dyn_cast<TruncInst>(Val); + unsigned ResultReg; + if (TI && TI->getType()->isIntegerTy(1) && CLI.CS && + (TI->getParent() == CLI.CS->getInstruction()->getParent()) && + TI->hasOneUse()) { + Value *PrevVal = TI->getOperand(0); + ResultReg = getRegForValue(PrevVal); + + if (!ResultReg) + return false; - ResultReg = - FastEmit_ri(ArgVT, ArgVT, ISD::AND, ResultReg, Val->hasOneUse(), 1); + if (!isTypeLegal(PrevVal->getType(), VT)) + return false; - if (!ResultReg) - return false; - UpdateValueMap(Val, ResultReg); - } + ResultReg = + fastEmit_ri(VT, VT, ISD::AND, ResultReg, hasTrivialKill(PrevVal), 1); + } else { + if (!isTypeLegal(Val->getType(), VT)) + return false; + ResultReg = getRegForValue(Val); } + + if (!ResultReg) + return false; + + ArgRegs.push_back(ResultReg); + OutVTs.push_back(VT); } // Analyze operands of the call, assigning locations to each operand. SmallVector<CCValAssign, 16> ArgLocs; - CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, TM, ArgLocs, - CLI.RetTy->getContext()); + CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, ArgLocs, CLI.RetTy->getContext()); // Allocate shadow area for Win64 if (IsWin64) CCInfo.AllocateStack(32, 8); - SmallVector<MVT, 16> OutVTs; - for (auto *Val : OutVals) { - MVT VT; - if (!isTypeLegal(Val->getType(), VT)) - return false; - OutVTs.push_back(VT); - } CCInfo.AnalyzeCallOperands(OutVTs, OutFlags, CC_X86); // Get a count of how many bytes are to be pushed on the stack. @@ -2767,8 +2738,8 @@ bool X86FastISel::FastLowerCall(CallLoweringInfo &CLI) { .addImm(NumBytes); // Walk the register/memloc assignments, inserting copies/loads. - const X86RegisterInfo *RegInfo = - static_cast<const X86RegisterInfo *>(TM.getRegisterInfo()); + const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>( + TM.getSubtargetImpl()->getRegisterInfo()); for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { CCValAssign const &VA = ArgLocs[i]; const Value *ArgVal = OutVals[VA.getValNo()]; @@ -2777,9 +2748,7 @@ bool X86FastISel::FastLowerCall(CallLoweringInfo &CLI) { if (ArgVT == MVT::x86mmx) return false; - unsigned ArgReg = getRegForValue(ArgVal); - if (!ArgReg) - return false; + unsigned ArgReg = ArgRegs[VA.getValNo()]; // Promote the value if needed. switch (VA.getLocInfo()) { @@ -2819,7 +2788,7 @@ bool X86FastISel::FastLowerCall(CallLoweringInfo &CLI) { break; } case CCValAssign::BCvt: { - ArgReg = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg, + ArgReg = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg, /*TODO: Kill=*/false); assert(ArgReg && "Failed to emit a bitcast!"); ArgVT = VA.getLocVT(); @@ -2846,6 +2815,11 @@ bool X86FastISel::FastLowerCall(CallLoweringInfo &CLI) { OutRegs.push_back(VA.getLocReg()); } else { assert(VA.isMemLoc()); + + // Don't emit stores for undef values. + if (isa<UndefValue>(ArgVal)) + continue; + unsigned LocMemOffset = VA.getLocMemOffset(); X86AddressMode AM; AM.Base.Reg = RegInfo->getStackRegister(); @@ -2982,7 +2956,7 @@ bool X86FastISel::FastLowerCall(CallLoweringInfo &CLI) { // Now handle call return values. SmallVector<CCValAssign, 16> RVLocs; - CCState CCRetInfo(CC, IsVarArg, *FuncInfo.MF, TM, RVLocs, + CCState CCRetInfo(CC, IsVarArg, *FuncInfo.MF, RVLocs, CLI.RetTy->getContext()); CCRetInfo.AnalyzeCallResult(Ins, RetCC_X86); @@ -2999,39 +2973,33 @@ bool X86FastISel::FastLowerCall(CallLoweringInfo &CLI) { report_fatal_error("SSE register return with SSE disabled"); } - // If this is a call to a function that returns an fp value on the floating - // point stack, we must guarantee the value is popped from the stack, so - // a COPY is not good enough - the copy instruction may be eliminated if the - // return value is not used. We use the FpPOP_RETVAL instruction instead. - if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) { - // If we prefer to use the value in xmm registers, copy it out as f80 and - // use a truncate to move it from fp stack reg to xmm reg. - if (isScalarFPTypeInSSEReg(VA.getValVT())) { - CopyVT = MVT::f80; - CopyReg = createResultReg(&X86::RFP80RegClass); - } - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, - TII.get(X86::FpPOP_RETVAL), CopyReg); - - // Round the f80 to the right size, which also moves it to the appropriate - // xmm register. This is accomplished by storing the f80 value in memory - // and then loading it back. - if (CopyVT != VA.getValVT()) { - EVT ResVT = VA.getValVT(); - unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64; - unsigned MemSize = ResVT.getSizeInBits()/8; - int FI = MFI.CreateStackObject(MemSize, MemSize, false); - addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, - TII.get(Opc)), FI) - .addReg(CopyReg); - Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm; - addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, - TII.get(Opc), ResultReg + i), FI); - } - } else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, - TII.get(TargetOpcode::COPY), CopyReg).addReg(VA.getLocReg()); - InRegs.push_back(VA.getLocReg()); + // If we prefer to use the value in xmm registers, copy it out as f80 and + // use a truncate to move it from fp stack reg to xmm reg. + if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) && + isScalarFPTypeInSSEReg(VA.getValVT())) { + CopyVT = MVT::f80; + CopyReg = createResultReg(&X86::RFP80RegClass); + } + + // Copy out the result. + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), CopyReg).addReg(VA.getLocReg()); + InRegs.push_back(VA.getLocReg()); + + // Round the f80 to the right size, which also moves it to the appropriate + // xmm register. This is accomplished by storing the f80 value in memory + // and then loading it back. + if (CopyVT != VA.getValVT()) { + EVT ResVT = VA.getValVT(); + unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64; + unsigned MemSize = ResVT.getSizeInBits()/8; + int FI = MFI.CreateStackObject(MemSize, MemSize, false); + addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(Opc)), FI) + .addReg(CopyReg); + Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm; + addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(Opc), ResultReg + i), FI); } } @@ -3043,7 +3011,7 @@ bool X86FastISel::FastLowerCall(CallLoweringInfo &CLI) { } bool -X86FastISel::TargetSelectInstruction(const Instruction *I) { +X86FastISel::fastSelectInstruction(const Instruction *I) { switch (I->getOpcode()) { default: break; case Instruction::Load: @@ -3086,7 +3054,7 @@ X86FastISel::TargetSelectInstruction(const Instruction *I) { return X86SelectTrunc(I); unsigned Reg = getRegForValue(I->getOperand(0)); if (Reg == 0) return false; - UpdateValueMap(I, Reg); + updateValueMap(I, Reg); return true; } } @@ -3094,13 +3062,69 @@ X86FastISel::TargetSelectInstruction(const Instruction *I) { return false; } -unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) { - MVT VT; - if (!isTypeLegal(C->getType(), VT)) +unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) { + if (VT > MVT::i64) return 0; + uint64_t Imm = CI->getZExtValue(); + if (Imm == 0) { + unsigned SrcReg = fastEmitInst_(X86::MOV32r0, &X86::GR32RegClass); + switch (VT.SimpleTy) { + default: llvm_unreachable("Unexpected value type"); + case MVT::i1: + case MVT::i8: + return fastEmitInst_extractsubreg(MVT::i8, SrcReg, /*Kill=*/true, + X86::sub_8bit); + case MVT::i16: + return fastEmitInst_extractsubreg(MVT::i16, SrcReg, /*Kill=*/true, + X86::sub_16bit); + case MVT::i32: + return SrcReg; + case MVT::i64: { + unsigned ResultReg = createResultReg(&X86::GR64RegClass); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg) + .addImm(0).addReg(SrcReg).addImm(X86::sub_32bit); + return ResultReg; + } + } + } + + unsigned Opc = 0; + switch (VT.SimpleTy) { + default: llvm_unreachable("Unexpected value type"); + case MVT::i1: VT = MVT::i8; // fall-through + case MVT::i8: Opc = X86::MOV8ri; break; + case MVT::i16: Opc = X86::MOV16ri; break; + case MVT::i32: Opc = X86::MOV32ri; break; + case MVT::i64: { + if (isUInt<32>(Imm)) + Opc = X86::MOV32ri; + else if (isInt<32>(Imm)) + Opc = X86::MOV64ri32; + else + Opc = X86::MOV64ri; + break; + } + } + if (VT == MVT::i64 && Opc == X86::MOV32ri) { + unsigned SrcReg = fastEmitInst_i(Opc, &X86::GR32RegClass, Imm); + unsigned ResultReg = createResultReg(&X86::GR64RegClass); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg) + .addImm(0).addReg(SrcReg).addImm(X86::sub_32bit); + return ResultReg; + } + return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm); +} + +unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) { + if (CFP->isNullValue()) + return fastMaterializeFloatZero(CFP); + // Can't handle alternate code models yet. - if (TM.getCodeModel() != CodeModel::Small) + CodeModel::Model CM = TM.getCodeModel(); + if (CM != CodeModel::Small && CM != CodeModel::Large) return 0; // Get opcode and regclass of the output for the given load instruction. @@ -3108,23 +3132,6 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) { const TargetRegisterClass *RC = nullptr; switch (VT.SimpleTy) { default: return 0; - case MVT::i8: - Opc = X86::MOV8rm; - RC = &X86::GR8RegClass; - break; - case MVT::i16: - Opc = X86::MOV16rm; - RC = &X86::GR16RegClass; - break; - case MVT::i32: - Opc = X86::MOV32rm; - RC = &X86::GR32RegClass; - break; - case MVT::i64: - // Must be in x86-64 mode. - Opc = X86::MOV64rm; - RC = &X86::GR64RegClass; - break; case MVT::f32: if (X86ScalarSSEf32) { Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm; @@ -3148,39 +3155,11 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) { return 0; } - // Materialize addresses with LEA/MOV instructions. - if (isa<GlobalValue>(C)) { - X86AddressMode AM; - if (X86SelectAddress(C, AM)) { - // If the expression is just a basereg, then we're done, otherwise we need - // to emit an LEA. - if (AM.BaseType == X86AddressMode::RegBase && - AM.IndexReg == 0 && AM.Disp == 0 && AM.GV == nullptr) - return AM.Base.Reg; - - unsigned ResultReg = createResultReg(RC); - if (TM.getRelocationModel() == Reloc::Static && - TLI.getPointerTy() == MVT::i64) { - // The displacement code be more than 32 bits away so we need to use - // an instruction with a 64 bit immediate - Opc = X86::MOV64ri; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, - TII.get(Opc), ResultReg).addGlobalAddress(cast<GlobalValue>(C)); - } else { - Opc = TLI.getPointerTy() == MVT::i32 ? X86::LEA32r : X86::LEA64r; - addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, - TII.get(Opc), ResultReg), AM); - } - return ResultReg; - } - return 0; - } - // MachineConstantPool wants an explicit alignment. - unsigned Align = DL.getPrefTypeAlignment(C->getType()); + unsigned Align = DL.getPrefTypeAlignment(CFP->getType()); if (Align == 0) { - // Alignment of vector types. FIXME! - Align = DL.getTypeAllocSize(C->getType()); + // Alignment of vector types. FIXME! + Align = DL.getTypeAllocSize(CFP->getType()); } // x86-32 PIC requires a PIC base register for constant pools. @@ -3198,23 +3177,88 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) { } // Create the load from the constant pool. - unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align); + unsigned CPI = MCP.getConstantPoolIndex(CFP, Align); unsigned ResultReg = createResultReg(RC); + + if (CM == CodeModel::Large) { + unsigned AddrReg = createResultReg(&X86::GR64RegClass); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV64ri), + AddrReg) + .addConstantPoolIndex(CPI, 0, OpFlag); + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(Opc), ResultReg); + addDirectMem(MIB, AddrReg); + MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand( + MachinePointerInfo::getConstantPool(), MachineMemOperand::MOLoad, + TM.getSubtargetImpl()->getDataLayout()->getPointerSize(), Align); + MIB->addMemOperand(*FuncInfo.MF, MMO); + return ResultReg; + } + addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg), - MCPOffset, PICBase, OpFlag); - + CPI, PICBase, OpFlag); return ResultReg; } -unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) { +unsigned X86FastISel::X86MaterializeGV(const GlobalValue *GV, MVT VT) { + // Can't handle alternate code models yet. + if (TM.getCodeModel() != CodeModel::Small) + return 0; + + // Materialize addresses with LEA/MOV instructions. + X86AddressMode AM; + if (X86SelectAddress(GV, AM)) { + // If the expression is just a basereg, then we're done, otherwise we need + // to emit an LEA. + if (AM.BaseType == X86AddressMode::RegBase && + AM.IndexReg == 0 && AM.Disp == 0 && AM.GV == nullptr) + return AM.Base.Reg; + + unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); + if (TM.getRelocationModel() == Reloc::Static && + TLI.getPointerTy() == MVT::i64) { + // The displacement code could be more than 32 bits away so we need to use + // an instruction with a 64 bit immediate + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV64ri), + ResultReg) + .addGlobalAddress(GV); + } else { + unsigned Opc = TLI.getPointerTy() == MVT::i32 ? X86::LEA32r : X86::LEA64r; + addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(Opc), ResultReg), AM); + } + return ResultReg; + } + return 0; +} + +unsigned X86FastISel::fastMaterializeConstant(const Constant *C) { + EVT CEVT = TLI.getValueType(C->getType(), true); + + // Only handle simple types. + if (!CEVT.isSimple()) + return 0; + MVT VT = CEVT.getSimpleVT(); + + if (const auto *CI = dyn_cast<ConstantInt>(C)) + return X86MaterializeInt(CI, VT); + else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) + return X86MaterializeFP(CFP, VT); + else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) + return X86MaterializeGV(GV, VT); + + return 0; +} + +unsigned X86FastISel::fastMaterializeAlloca(const AllocaInst *C) { // Fail on dynamic allocas. At this point, getRegForValue has already // checked its CSE maps, so if we're here trying to handle a dynamic // alloca, we're not going to succeed. X86SelectAddress has a // check for dynamic allocas, because it's called directly from - // various places, but TargetMaterializeAlloca also needs a check + // various places, but targetMaterializeAlloca also needs a check // in order to avoid recursion between getRegForValue, - // X86SelectAddrss, and TargetMaterializeAlloca. + // X86SelectAddrss, and targetMaterializeAlloca. if (!FuncInfo.StaticAllocaMap.count(C)) return 0; assert(C->isStaticAlloca() && "dynamic alloca in the static alloca map?"); @@ -3222,7 +3266,7 @@ unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) { X86AddressMode AM; if (!X86SelectAddress(C, AM)) return 0; - unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r; + unsigned Opc = TLI.getPointerTy() == MVT::i32 ? X86::LEA32r : X86::LEA64r; const TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy()); unsigned ResultReg = createResultReg(RC); addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, @@ -3230,7 +3274,7 @@ unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) { return ResultReg; } -unsigned X86FastISel::TargetMaterializeFloatZero(const ConstantFP *CF) { +unsigned X86FastISel::fastMaterializeFloatZero(const ConstantFP *CF) { MVT VT; if (!isTypeLegal(CF->getType(), VT)) return 0; @@ -3276,7 +3320,7 @@ bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, if (!X86SelectAddress(Ptr, AM)) return false; - const X86InstrInfo &XII = (const X86InstrInfo&)TII; + const X86InstrInfo &XII = (const X86InstrInfo &)TII; unsigned Size = DL.getTypeAllocSize(LI->getType()); unsigned Alignment = LI->getAlignment(); @@ -3288,7 +3332,8 @@ bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, AM.getFullAddress(AddrOps); MachineInstr *Result = - XII.foldMemoryOperandImpl(*FuncInfo.MF, MI, OpNo, AddrOps, Size, Alignment); + XII.foldMemoryOperandImpl(*FuncInfo.MF, MI, OpNo, AddrOps, + Size, Alignment, /*AllowCommute=*/true); if (!Result) return false; |