summaryrefslogtreecommitdiff
path: root/llvm/lib/Target/X86/X86FastISel.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/X86/X86FastISel.cpp')
-rw-r--r--llvm/lib/Target/X86/X86FastISel.cpp273
1 files changed, 133 insertions, 140 deletions
diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp
index 1dbf406835646..b305940139c06 100644
--- a/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/llvm/lib/Target/X86/X86FastISel.cpp
@@ -26,7 +26,6 @@
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
@@ -498,7 +497,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill,
default: return false;
case MVT::i1: {
// Mask out all but lowest bit.
- unsigned AndResult = createResultReg(&X86::GR8RegClass);
+ Register AndResult = createResultReg(&X86::GR8RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(X86::AND8ri), AndResult)
.addReg(ValReg, getKillRegState(ValIsKill)).addImm(1);
@@ -691,7 +690,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
}
}
- unsigned ValReg = getRegForValue(Val);
+ Register ValReg = getRegForValue(Val);
if (ValReg == 0)
return false;
@@ -761,9 +760,9 @@ bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) {
// Ok, we need to do a load from a stub. If we've already loaded from
// this stub, reuse the loaded pointer, otherwise emit the load now.
- DenseMap<const Value *, unsigned>::iterator I = LocalValueMap.find(V);
- unsigned LoadReg;
- if (I != LocalValueMap.end() && I->second != 0) {
+ DenseMap<const Value *, Register>::iterator I = LocalValueMap.find(V);
+ Register LoadReg;
+ if (I != LocalValueMap.end() && I->second) {
LoadReg = I->second;
} else {
// Issue load from stub.
@@ -1128,10 +1127,8 @@ bool X86FastISel::X86SelectStore(const Instruction *I) {
if (!isTypeLegal(Val->getType(), VT, /*AllowI1=*/true))
return false;
- unsigned Alignment = S->getAlignment();
- unsigned ABIAlignment = DL.getABITypeAlignment(Val->getType());
- if (Alignment == 0) // Ensure that codegen never sees alignment 0
- Alignment = ABIAlignment;
+ Align Alignment = S->getAlign();
+ Align ABIAlignment = DL.getABITypeAlign(Val->getType());
bool Aligned = Alignment >= ABIAlignment;
X86AddressMode AM;
@@ -1196,7 +1193,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
CCInfo.AnalyzeReturn(Outs, RetCC_X86);
const Value *RV = Ret->getOperand(0);
- unsigned Reg = getRegForValue(RV);
+ Register Reg = getRegForValue(RV);
if (Reg == 0)
return false;
@@ -1264,7 +1261,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
// We saved the argument into a virtual register in the entry block,
// so now we copy the value out and into %rax/%eax.
if (F.hasStructRetAttr() && CC != CallingConv::Swift) {
- unsigned Reg = X86MFInfo->getSRetReturnReg();
+ Register Reg = X86MFInfo->getSRetReturnReg();
assert(Reg &&
"SRetReturnReg should have been set in LowerFormalArguments()!");
unsigned RetReg = Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX;
@@ -1322,14 +1319,9 @@ bool X86FastISel::X86SelectLoad(const Instruction *I) {
if (!X86SelectAddress(Ptr, AM))
return false;
- unsigned Alignment = LI->getAlignment();
- unsigned ABIAlignment = DL.getABITypeAlignment(LI->getType());
- if (Alignment == 0) // Ensure that codegen never sees alignment 0
- Alignment = ABIAlignment;
-
unsigned ResultReg = 0;
if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg,
- Alignment))
+ LI->getAlign().value()))
return false;
updateValueMap(I, ResultReg);
@@ -1392,7 +1384,7 @@ static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC) {
bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, EVT VT,
const DebugLoc &CurDbgLoc) {
- unsigned Op0Reg = getRegForValue(Op0);
+ Register Op0Reg = getRegForValue(Op0);
if (Op0Reg == 0) return false;
// Handle 'null' like i32/i64 0.
@@ -1414,7 +1406,7 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, EVT VT,
unsigned CompareOpc = X86ChooseCmpOpcode(VT, Subtarget);
if (CompareOpc == 0) return false;
- unsigned Op1Reg = getRegForValue(Op1);
+ Register Op1Reg = getRegForValue(Op1);
if (Op1Reg == 0) return false;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurDbgLoc, TII.get(CompareOpc))
.addReg(Op0Reg)
@@ -1487,8 +1479,8 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc()))
return false;
- unsigned FlagReg1 = createResultReg(&X86::GR8RegClass);
- unsigned FlagReg2 = createResultReg(&X86::GR8RegClass);
+ Register FlagReg1 = createResultReg(&X86::GR8RegClass);
+ Register FlagReg2 = createResultReg(&X86::GR8RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr),
FlagReg1).addImm(SETFOpc[0]);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr),
@@ -1522,7 +1514,7 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) {
if (!TLI.isTypeLegal(DstVT))
return false;
- unsigned ResultReg = getRegForValue(I->getOperand(0));
+ Register ResultReg = getRegForValue(I->getOperand(0));
if (ResultReg == 0)
return false;
@@ -1548,7 +1540,7 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) {
default: llvm_unreachable("Unexpected zext to i64 source type");
}
- unsigned Result32 = createResultReg(&X86::GR32RegClass);
+ Register Result32 = createResultReg(&X86::GR32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovInst), Result32)
.addReg(ResultReg);
@@ -1559,7 +1551,7 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) {
} else if (DstVT == MVT::i16) {
// i8->i16 doesn't exist in the autogenerated isel table. Need to zero
// extend to 32-bits and then extract down to 16-bits.
- unsigned Result32 = createResultReg(&X86::GR32RegClass);
+ Register Result32 = createResultReg(&X86::GR32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOVZX32rr8),
Result32).addReg(ResultReg);
@@ -1581,7 +1573,7 @@ bool X86FastISel::X86SelectSExt(const Instruction *I) {
if (!TLI.isTypeLegal(DstVT))
return false;
- unsigned ResultReg = getRegForValue(I->getOperand(0));
+ Register ResultReg = getRegForValue(I->getOperand(0));
if (ResultReg == 0)
return false;
@@ -1589,7 +1581,7 @@ bool X86FastISel::X86SelectSExt(const Instruction *I) {
MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType());
if (SrcVT == MVT::i1) {
// Set the high bits to zero.
- unsigned ZExtReg = fastEmitZExtFromI1(MVT::i8, ResultReg,
+ Register ZExtReg = fastEmitZExtFromI1(MVT::i8, ResultReg,
/*TODO: Kill=*/false);
if (ZExtReg == 0)
return false;
@@ -1605,7 +1597,7 @@ bool X86FastISel::X86SelectSExt(const Instruction *I) {
if (DstVT == MVT::i16) {
// i8->i16 doesn't exist in the autogenerated isel table. Need to sign
// extend to 32-bits and then extract down to 16-bits.
- unsigned Result32 = createResultReg(&X86::GR32RegClass);
+ Register Result32 = createResultReg(&X86::GR32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOVSX32rr8),
Result32).addReg(ResultReg);
@@ -1720,7 +1712,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
case MVT::i64: TestOpc = X86::TEST64ri32; break;
}
if (TestOpc) {
- unsigned OpReg = getRegForValue(TI->getOperand(0));
+ Register OpReg = getRegForValue(TI->getOperand(0));
if (OpReg == 0) return false;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TestOpc))
@@ -1742,7 +1734,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
} else if (foldX86XALUIntrinsic(CC, BI, BI->getCondition())) {
// Fake request the condition, otherwise the intrinsic might be completely
// optimized away.
- unsigned TmpReg = getRegForValue(BI->getCondition());
+ Register TmpReg = getRegForValue(BI->getCondition());
if (TmpReg == 0)
return false;
@@ -1755,7 +1747,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
// Otherwise do a clumsy setcc and re-test it.
// Note that i1 essentially gets ANY_EXTEND'ed to i8 where it isn't used
// in an explicit cast, so make sure to handle that correctly.
- unsigned OpReg = getRegForValue(BI->getCondition());
+ Register OpReg = getRegForValue(BI->getCondition());
if (OpReg == 0) return false;
// In case OpReg is a K register, COPY to a GPR
@@ -1824,10 +1816,10 @@ bool X86FastISel::X86SelectShift(const Instruction *I) {
if (!isTypeLegal(I->getType(), VT))
return false;
- unsigned Op0Reg = getRegForValue(I->getOperand(0));
+ Register Op0Reg = getRegForValue(I->getOperand(0));
if (Op0Reg == 0) return false;
- unsigned Op1Reg = getRegForValue(I->getOperand(1));
+ Register Op1Reg = getRegForValue(I->getOperand(1));
if (Op1Reg == 0) return false;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
CReg).addReg(Op1Reg);
@@ -1839,7 +1831,7 @@ bool X86FastISel::X86SelectShift(const Instruction *I) {
TII.get(TargetOpcode::KILL), X86::CL)
.addReg(CReg, RegState::Kill);
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(OpReg), ResultReg)
.addReg(Op0Reg);
updateValueMap(I, ResultReg);
@@ -1933,10 +1925,10 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) {
const DivRemEntry &TypeEntry = OpTable[TypeIndex];
const DivRemEntry::DivRemResult &OpEntry = TypeEntry.ResultTable[OpIndex];
- unsigned Op0Reg = getRegForValue(I->getOperand(0));
+ Register Op0Reg = getRegForValue(I->getOperand(0));
if (Op0Reg == 0)
return false;
- unsigned Op1Reg = getRegForValue(I->getOperand(1));
+ Register Op1Reg = getRegForValue(I->getOperand(1));
if (Op1Reg == 0)
return false;
@@ -1949,7 +1941,7 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(OpEntry.OpSignExtend));
else {
- unsigned Zero32 = createResultReg(&X86::GR32RegClass);
+ Register Zero32 = createResultReg(&X86::GR32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(X86::MOV32r0), Zero32);
@@ -1986,8 +1978,8 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) {
if ((I->getOpcode() == Instruction::SRem ||
I->getOpcode() == Instruction::URem) &&
OpEntry.DivRemResultReg == X86::AH && Subtarget->is64Bit()) {
- unsigned SourceSuperReg = createResultReg(&X86::GR16RegClass);
- unsigned ResultSuperReg = createResultReg(&X86::GR16RegClass);
+ Register SourceSuperReg = createResultReg(&X86::GR16RegClass);
+ Register ResultSuperReg = createResultReg(&X86::GR16RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Copy), SourceSuperReg).addReg(X86::AX);
@@ -2066,15 +2058,15 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
return false;
if (SETFOpc) {
- unsigned FlagReg1 = createResultReg(&X86::GR8RegClass);
- unsigned FlagReg2 = createResultReg(&X86::GR8RegClass);
+ Register FlagReg1 = createResultReg(&X86::GR8RegClass);
+ Register FlagReg2 = createResultReg(&X86::GR8RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr),
FlagReg1).addImm(SETFOpc[0]);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr),
FlagReg2).addImm(SETFOpc[1]);
auto const &II = TII.get(SETFOpc[2]);
if (II.getNumDefs()) {
- unsigned TmpReg = createResultReg(&X86::GR8RegClass);
+ Register TmpReg = createResultReg(&X86::GR8RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, TmpReg)
.addReg(FlagReg2).addReg(FlagReg1);
} else {
@@ -2086,7 +2078,7 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
} else if (foldX86XALUIntrinsic(CC, I, Cond)) {
// Fake request the condition, otherwise the intrinsic might be completely
// optimized away.
- unsigned TmpReg = getRegForValue(Cond);
+ Register TmpReg = getRegForValue(Cond);
if (TmpReg == 0)
return false;
@@ -2099,7 +2091,7 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
// accurate. If we read more than the lsb, we may see non-zero values
// whereas lsb is zero. Therefore, we have to truncate Op0Reg to i1 for
// the select. This is achieved by performing TEST against 1.
- unsigned CondReg = getRegForValue(Cond);
+ Register CondReg = getRegForValue(Cond);
if (CondReg == 0)
return false;
bool CondIsKill = hasTrivialKill(Cond);
@@ -2122,10 +2114,10 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
const Value *LHS = I->getOperand(1);
const Value *RHS = I->getOperand(2);
- unsigned RHSReg = getRegForValue(RHS);
+ Register RHSReg = getRegForValue(RHS);
bool RHSIsKill = hasTrivialKill(RHS);
- unsigned LHSReg = getRegForValue(LHS);
+ Register LHSReg = getRegForValue(LHS);
bool LHSIsKill = hasTrivialKill(LHS);
if (!LHSReg || !RHSReg)
@@ -2133,7 +2125,7 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
const TargetRegisterInfo &TRI = *Subtarget->getRegisterInfo();
unsigned Opc = X86::getCMovOpcode(TRI.getRegSizeInBits(*RC)/8);
- unsigned ResultReg = fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill,
+ Register ResultReg = fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill,
LHSReg, LHSIsKill, CC);
updateValueMap(I, ResultReg);
return true;
@@ -2182,19 +2174,19 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
const Value *LHS = I->getOperand(1);
const Value *RHS = I->getOperand(2);
- unsigned LHSReg = getRegForValue(LHS);
+ Register LHSReg = getRegForValue(LHS);
bool LHSIsKill = hasTrivialKill(LHS);
- unsigned RHSReg = getRegForValue(RHS);
+ Register RHSReg = getRegForValue(RHS);
bool RHSIsKill = hasTrivialKill(RHS);
- unsigned CmpLHSReg = getRegForValue(CmpLHS);
+ Register CmpLHSReg = getRegForValue(CmpLHS);
bool CmpLHSIsKill = hasTrivialKill(CmpLHS);
- unsigned CmpRHSReg = getRegForValue(CmpRHS);
+ Register CmpRHSReg = getRegForValue(CmpRHS);
bool CmpRHSIsKill = hasTrivialKill(CmpRHS);
- if (!LHSReg || !RHSReg || !CmpLHS || !CmpRHS)
+ if (!LHSReg || !RHSReg || !CmpLHSReg || !CmpRHSReg)
return false;
const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
@@ -2207,12 +2199,12 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
unsigned CmpOpcode =
(RetVT == MVT::f32) ? X86::VCMPSSZrr : X86::VCMPSDZrr;
- unsigned CmpReg = fastEmitInst_rri(CmpOpcode, VK1, CmpLHSReg, CmpLHSIsKill,
+ Register CmpReg = fastEmitInst_rri(CmpOpcode, VK1, CmpLHSReg, CmpLHSIsKill,
CmpRHSReg, CmpRHSIsKill, CC);
// Need an IMPLICIT_DEF for the input that is used to generate the upper
// bits of the result register since its not based on any of the inputs.
- unsigned ImplicitDefReg = createResultReg(VR128X);
+ Register ImplicitDefReg = createResultReg(VR128X);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
@@ -2241,9 +2233,9 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
unsigned BlendOpcode =
(RetVT == MVT::f32) ? X86::VBLENDVPSrr : X86::VBLENDVPDrr;
- unsigned CmpReg = fastEmitInst_rri(CmpOpcode, RC, CmpLHSReg, CmpLHSIsKill,
+ Register CmpReg = fastEmitInst_rri(CmpOpcode, RC, CmpLHSReg, CmpLHSIsKill,
CmpRHSReg, CmpRHSIsKill, CC);
- unsigned VBlendReg = fastEmitInst_rrr(BlendOpcode, VR128, RHSReg, RHSIsKill,
+ Register VBlendReg = fastEmitInst_rrr(BlendOpcode, VR128, RHSReg, RHSIsKill,
LHSReg, LHSIsKill, CmpReg, true);
ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
@@ -2263,13 +2255,13 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
}
const TargetRegisterClass *VR128 = &X86::VR128RegClass;
- unsigned CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill,
+ Register CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill,
CmpRHSReg, CmpRHSIsKill, CC);
- unsigned AndReg = fastEmitInst_rr(Opc[1], VR128, CmpReg, /*IsKill=*/false,
+ Register AndReg = fastEmitInst_rr(Opc[1], VR128, CmpReg, /*IsKill=*/false,
LHSReg, LHSIsKill);
- unsigned AndNReg = fastEmitInst_rr(Opc[2], VR128, CmpReg, /*IsKill=*/true,
+ Register AndNReg = fastEmitInst_rr(Opc[2], VR128, CmpReg, /*IsKill=*/true,
RHSReg, RHSIsKill);
- unsigned OrReg = fastEmitInst_rr(Opc[3], VR128, AndNReg, /*IsKill=*/true,
+ Register OrReg = fastEmitInst_rr(Opc[3], VR128, AndNReg, /*IsKill=*/true,
AndReg, /*IsKill=*/true);
ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
@@ -2317,7 +2309,7 @@ bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {
if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->getDebugLoc()))
return false;
} else {
- unsigned CondReg = getRegForValue(Cond);
+ Register CondReg = getRegForValue(Cond);
if (CondReg == 0)
return false;
bool CondIsKill = hasTrivialKill(Cond);
@@ -2340,10 +2332,10 @@ bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {
const Value *LHS = I->getOperand(1);
const Value *RHS = I->getOperand(2);
- unsigned LHSReg = getRegForValue(LHS);
+ Register LHSReg = getRegForValue(LHS);
bool LHSIsKill = hasTrivialKill(LHS);
- unsigned RHSReg = getRegForValue(RHS);
+ Register RHSReg = getRegForValue(RHS);
bool RHSIsKill = hasTrivialKill(RHS);
if (!LHSReg || !RHSReg)
@@ -2351,7 +2343,7 @@ bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {
const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
- unsigned ResultReg =
+ Register ResultReg =
fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill, CC);
updateValueMap(I, ResultReg);
return true;
@@ -2373,12 +2365,12 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) {
}
// No need for a select anymore - this is an unconditional move.
if (Opnd) {
- unsigned OpReg = getRegForValue(Opnd);
+ Register OpReg = getRegForValue(Opnd);
if (OpReg == 0)
return false;
bool OpIsKill = hasTrivialKill(Opnd);
const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(OpReg, getKillRegState(OpIsKill));
@@ -2419,7 +2411,7 @@ bool X86FastISel::X86SelectIntToFP(const Instruction *I, bool IsSigned) {
return false;
// Select integer to float/double conversion.
- unsigned OpReg = getRegForValue(I->getOperand(0));
+ Register OpReg = getRegForValue(I->getOperand(0));
if (OpReg == 0)
return false;
@@ -2448,10 +2440,10 @@ bool X86FastISel::X86SelectIntToFP(const Instruction *I, bool IsSigned) {
MVT DstVT = TLI.getValueType(DL, I->getType()).getSimpleVT();
const TargetRegisterClass *RC = TLI.getRegClassFor(DstVT);
- unsigned ImplicitDefReg = createResultReg(RC);
+ Register ImplicitDefReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
- unsigned ResultReg =
+ Register ResultReg =
fastEmitInst_rr(Opcode, RC, ImplicitDefReg, true, OpReg, false);
updateValueMap(I, ResultReg);
return true;
@@ -2474,7 +2466,7 @@ bool X86FastISel::X86SelectFPExtOrFPTrunc(const Instruction *I,
"Instruction must be an FPExt or FPTrunc!");
bool HasAVX = Subtarget->hasAVX();
- unsigned OpReg = getRegForValue(I->getOperand(0));
+ Register OpReg = getRegForValue(I->getOperand(0));
if (OpReg == 0)
return false;
@@ -2486,7 +2478,7 @@ bool X86FastISel::X86SelectFPExtOrFPTrunc(const Instruction *I,
}
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
MachineInstrBuilder MIB;
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpc),
ResultReg);
@@ -2537,7 +2529,7 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) {
if (!TLI.isTypeLegal(SrcVT))
return false;
- unsigned InputReg = getRegForValue(I->getOperand(0));
+ Register InputReg = getRegForValue(I->getOperand(0));
if (!InputReg)
// Unhandled operand. Halt "fast" selection and bail.
return false;
@@ -2549,7 +2541,7 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) {
}
// Issue an extract_subreg.
- unsigned ResultReg = fastEmitInst_extractsubreg(MVT::i8,
+ Register ResultReg = fastEmitInst_extractsubreg(MVT::i8,
InputReg, false,
X86::sub_8bit);
if (!ResultReg)
@@ -2608,7 +2600,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
return false;
const Value *Op = II->getArgOperand(0);
- unsigned InputReg = getRegForValue(Op);
+ Register InputReg = getRegForValue(Op);
if (InputReg == 0)
return false;
@@ -2632,12 +2624,15 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
// used to provide rounding control: use MXCSR.RC, encoded as 0b100.
// It's consistent with the other FP instructions, which are usually
// controlled by MXCSR.
- InputReg = fastEmitInst_ri(X86::VCVTPS2PHrr, RC, InputReg, false, 4);
+ unsigned Opc = Subtarget->hasVLX() ? X86::VCVTPS2PHZ128rr
+ : X86::VCVTPS2PHrr;
+ InputReg = fastEmitInst_ri(Opc, RC, InputReg, false, 4);
// Move the lower 32-bits of ResultReg to another register of class GR32.
+ Opc = Subtarget->hasAVX512() ? X86::VMOVPDI2DIZrr
+ : X86::VMOVPDI2DIrr;
ResultReg = createResultReg(&X86::GR32RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
- TII.get(X86::VMOVPDI2DIrr), ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addReg(InputReg, RegState::Kill);
// The result value is in the lower 16-bits of ResultReg.
@@ -2645,19 +2640,21 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
ResultReg = fastEmitInst_extractsubreg(MVT::i16, ResultReg, true, RegIdx);
} else {
assert(Op->getType()->isIntegerTy(16) && "Expected a 16-bit integer!");
- // Explicitly sign-extend the input to 32-bit.
- InputReg = fastEmit_r(MVT::i16, MVT::i32, ISD::SIGN_EXTEND, InputReg,
+ // Explicitly zero-extend the input to 32-bit.
+ InputReg = fastEmit_r(MVT::i16, MVT::i32, ISD::ZERO_EXTEND, InputReg,
/*Kill=*/false);
// The following SCALAR_TO_VECTOR will be expanded into a VMOVDI2PDIrr.
InputReg = fastEmit_r(MVT::i32, MVT::v4i32, ISD::SCALAR_TO_VECTOR,
InputReg, /*Kill=*/true);
- InputReg = fastEmitInst_r(X86::VCVTPH2PSrr, RC, InputReg, /*Kill=*/true);
+ unsigned Opc = Subtarget->hasVLX() ? X86::VCVTPH2PSZ128rr
+ : X86::VCVTPH2PSrr;
+ InputReg = fastEmitInst_r(Opc, RC, InputReg, /*Kill=*/true);
// The result value is in the lower 32-bits of ResultReg.
// Emit an explicit copy from register class VR128 to register class FR32.
- ResultReg = createResultReg(&X86::FR32RegClass);
+ ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(InputReg, RegState::Kill);
@@ -2700,7 +2697,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
// Always make a copy of the frame register to a vreg first, so that we
// never directly reference the frame register (the TwoAddressInstruction-
// Pass doesn't like that).
- unsigned SrcReg = createResultReg(RC);
+ Register SrcReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), SrcReg).addReg(FrameReg);
@@ -2830,7 +2827,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
}
const Value *SrcVal = II->getArgOperand(0);
- unsigned SrcReg = getRegForValue(SrcVal);
+ Register SrcReg = getRegForValue(SrcVal);
if (SrcReg == 0)
return false;
@@ -2843,7 +2840,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
}
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
MachineInstrBuilder MIB;
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
ResultReg);
@@ -2903,7 +2900,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
BaseOpc = X86ISD::UMUL; CondCode = X86::COND_O; break;
}
- unsigned LHSReg = getRegForValue(LHS);
+ Register LHSReg = getRegForValue(LHS);
if (LHSReg == 0)
return false;
bool LHSIsKill = hasTrivialKill(LHS);
@@ -2974,7 +2971,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
return false;
// Assign to a GPR since the overflow return value is lowered to a SETcc.
- unsigned ResultReg2 = createResultReg(&X86::GR8RegClass);
+ Register ResultReg2 = createResultReg(&X86::GR8RegClass);
assert((ResultReg+1) == ResultReg2 && "Nonconsecutive result registers.");
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr),
ResultReg2).addImm(CondCode);
@@ -3041,11 +3038,11 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
Op = IE->getOperand(0);
}
- unsigned Reg = getRegForValue(Op);
+ Register Reg = getRegForValue(Op);
if (Reg == 0)
return false;
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
+ Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addReg(Reg);
@@ -3139,11 +3136,11 @@ bool X86FastISel::fastLowerArguments() {
case MVT::f32: LLVM_FALLTHROUGH;
case MVT::f64: SrcReg = XMMArgRegs[FPRIdx++]; break;
}
- unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
+ Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
// FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
// Without this, EmitLiveInCopies may eliminate the livein if its only
// use is a bitcast (which isn't turned into an instruction).
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(DstReg, getKillRegState(true));
@@ -3154,7 +3151,7 @@ bool X86FastISel::fastLowerArguments() {
static unsigned computeBytesPoppedByCalleeForSRet(const X86Subtarget *Subtarget,
CallingConv::ID CC,
- ImmutableCallSite *CS) {
+ const CallBase *CB) {
if (Subtarget->is64Bit())
return 0;
if (Subtarget->getTargetTriple().isOSMSVCRT())
@@ -3163,9 +3160,9 @@ static unsigned computeBytesPoppedByCalleeForSRet(const X86Subtarget *Subtarget,
CC == CallingConv::HiPE || CC == CallingConv::Tail)
return 0;
- if (CS)
- if (CS->arg_empty() || !CS->paramHasAttr(0, Attribute::StructRet) ||
- CS->paramHasAttr(0, Attribute::InReg) || Subtarget->isTargetMCU())
+ if (CB)
+ if (CB->arg_empty() || !CB->paramHasAttr(0, Attribute::StructRet) ||
+ CB->paramHasAttr(0, Attribute::InReg) || Subtarget->isTargetMCU())
return 0;
return 4;
@@ -3186,14 +3183,12 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
bool Is64Bit = Subtarget->is64Bit();
bool IsWin64 = Subtarget->isCallingConvWin64(CC);
- const CallInst *CI =
- CLI.CS ? dyn_cast<CallInst>(CLI.CS->getInstruction()) : nullptr;
+ const CallInst *CI = dyn_cast_or_null<CallInst>(CLI.CB);
const Function *CalledFn = CI ? CI->getCalledFunction() : nullptr;
// Call / invoke instructions with NoCfCheck attribute require special
// handling.
- const auto *II =
- CLI.CS ? dyn_cast<InvokeInst>(CLI.CS->getInstruction()) : nullptr;
+ const auto *II = dyn_cast_or_null<InvokeInst>(CLI.CB);
if ((CI && CI->doesNoCfCheck()) || (II && II->doesNoCfCheck()))
return false;
@@ -3202,8 +3197,8 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
(CalledFn && CalledFn->hasFnAttribute("no_caller_saved_registers")))
return false;
- // Functions using retpoline for indirect calls need to use SDISel.
- if (Subtarget->useRetpolineIndirectCalls())
+ // Functions using thunks for indirect calls need to use SDISel.
+ if (Subtarget->useIndirectThunkCalls())
return false;
// Handle only C, fastcc, and webkit_js calling conventions for now.
@@ -3239,11 +3234,11 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
return false;
// Don't know about inalloca yet.
- if (CLI.CS && CLI.CS->hasInAllocaArgument())
+ if (CLI.CB && CLI.CB->hasInAllocaArgument())
return false;
for (auto Flag : CLI.OutFlags)
- if (Flag.isSwiftError())
+ if (Flag.isSwiftError() || Flag.isPreallocated())
return false;
SmallVector<MVT, 16> OutVTs;
@@ -3269,9 +3264,8 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
MVT VT;
auto *TI = dyn_cast<TruncInst>(Val);
unsigned ResultReg;
- if (TI && TI->getType()->isIntegerTy(1) && CLI.CS &&
- (TI->getParent() == CLI.CS->getInstruction()->getParent()) &&
- TI->hasOneUse()) {
+ if (TI && TI->getType()->isIntegerTy(1) && CLI.CB &&
+ (TI->getParent() == CLI.CB->getParent()) && TI->hasOneUse()) {
Value *PrevVal = TI->getOperand(0);
ResultReg = getRegForValue(PrevVal);
@@ -3284,7 +3278,8 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
ResultReg =
fastEmit_ri(VT, VT, ISD::AND, ResultReg, hasTrivialKill(PrevVal), 1);
} else {
- if (!isTypeLegal(Val->getType(), VT))
+ if (!isTypeLegal(Val->getType(), VT) ||
+ (VT.isVector() && VT.getVectorElementType() == MVT::i1))
return false;
ResultReg = getRegForValue(Val);
}
@@ -3302,7 +3297,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
// Allocate shadow area for Win64
if (IsWin64)
- CCInfo.AllocateStack(32, 8);
+ CCInfo.AllocateStack(32, Align(8));
CCInfo.AnalyzeCallOperands(OutVTs, OutFlags, CC_X86);
@@ -3406,7 +3401,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg);
OutRegs.push_back(VA.getLocReg());
} else {
- assert(VA.isMemLoc());
+ assert(VA.isMemLoc() && "Unknown value location!");
// Don't emit stores for undef values.
if (isa<UndefValue>(ArgVal))
@@ -3417,7 +3412,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
AM.Base.Reg = RegInfo->getStackRegister();
AM.Disp = LocMemOffset;
ISD::ArgFlagsTy Flags = OutFlags[VA.getValNo()];
- unsigned Alignment = DL.getABITypeAlignment(ArgVal->getType());
+ Align Alignment = DL.getABITypeAlign(ArgVal->getType());
MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
MachinePointerInfo::getStack(*FuncInfo.MF, LocMemOffset),
MachineMemOperand::MOStore, ArgVT.getStoreSize(), Alignment);
@@ -3537,7 +3532,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
X86::isCalleePop(CC, Subtarget->is64Bit(), IsVarArg,
TM.Options.GuaranteedTailCallOpt)
? NumBytes // Callee pops everything.
- : computeBytesPoppedByCalleeForSRet(Subtarget, CC, CLI.CS);
+ : computeBytesPoppedByCalleeForSRet(Subtarget, CC, CLI.CB);
unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
.addImm(NumBytes).addImm(NumBytesForCalleeToPop);
@@ -3549,7 +3544,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
CCRetInfo.AnalyzeCallResult(Ins, RetCC_X86);
// Copy all of the result registers out of their specified physreg.
- unsigned ResultReg = FuncInfo.CreateRegs(CLI.RetTy);
+ Register ResultReg = FuncInfo.CreateRegs(CLI.RetTy);
for (unsigned i = 0; i != RVLocs.size(); ++i) {
CCValAssign &VA = RVLocs[i];
EVT CopyVT = VA.getValVT();
@@ -3582,7 +3577,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
EVT ResVT = VA.getValVT();
unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
unsigned MemSize = ResVT.getSizeInBits()/8;
- int FI = MFI.CreateStackObject(MemSize, MemSize, false);
+ int FI = MFI.CreateStackObject(MemSize, Align(MemSize), false);
addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc)), FI)
.addReg(CopyReg);
@@ -3647,7 +3642,7 @@ X86FastISel::fastSelectInstruction(const Instruction *I) {
return X86SelectZExt(I);
if (DstVT.bitsLT(SrcVT))
return X86SelectTrunc(I);
- unsigned Reg = getRegForValue(I->getOperand(0));
+ Register Reg = getRegForValue(I->getOperand(0));
if (Reg == 0) return false;
updateValueMap(I, Reg);
return true;
@@ -3668,13 +3663,18 @@ X86FastISel::fastSelectInstruction(const Instruction *I) {
DstVT.getVectorElementType() == MVT::i1)
return false;
- unsigned Reg = getRegForValue(I->getOperand(0));
- if (Reg == 0)
+ Register Reg = getRegForValue(I->getOperand(0));
+ if (!Reg)
return false;
- // No instruction is needed for conversion. Reuse the register used by
- // the fist operand.
- updateValueMap(I, Reg);
+ // Emit a reg-reg copy so we don't propagate cached known bits information
+ // with the wrong VT if we fall out of fast isel after selecting this.
+ const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT);
+ Register ResultReg = createResultReg(DstClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg);
+
+ updateValueMap(I, ResultReg);
return true;
}
}
@@ -3688,7 +3688,7 @@ unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {
uint64_t Imm = CI->getZExtValue();
if (Imm == 0) {
- unsigned SrcReg = fastEmitInst_(X86::MOV32r0, &X86::GR32RegClass);
+ Register SrcReg = fastEmitInst_(X86::MOV32r0, &X86::GR32RegClass);
switch (VT.SimpleTy) {
default: llvm_unreachable("Unexpected value type");
case MVT::i1:
@@ -3701,7 +3701,7 @@ unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {
case MVT::i32:
return SrcReg;
case MVT::i64: {
- unsigned ResultReg = createResultReg(&X86::GR64RegClass);
+ Register ResultReg = createResultReg(&X86::GR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg)
.addImm(0).addReg(SrcReg).addImm(X86::sub_32bit);
@@ -3769,11 +3769,7 @@ unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
}
// MachineConstantPool wants an explicit alignment.
- unsigned Align = DL.getPrefTypeAlignment(CFP->getType());
- if (Align == 0) {
- // Alignment of vector types. FIXME!
- Align = DL.getTypeAllocSize(CFP->getType());
- }
+ Align Alignment = DL.getPrefTypeAlign(CFP->getType());
// x86-32 PIC requires a PIC base register for constant pools.
unsigned PICBase = 0;
@@ -3786,11 +3782,12 @@ unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
PICBase = X86::RIP;
// Create the load from the constant pool.
- unsigned CPI = MCP.getConstantPoolIndex(CFP, Align);
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy));
+ unsigned CPI = MCP.getConstantPoolIndex(CFP, Alignment);
+ Register ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy));
- if (CM == CodeModel::Large) {
- unsigned AddrReg = createResultReg(&X86::GR64RegClass);
+ // Large code model only applies to 64-bit mode.
+ if (Subtarget->is64Bit() && CM == CodeModel::Large) {
+ Register AddrReg = createResultReg(&X86::GR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV64ri),
AddrReg)
.addConstantPoolIndex(CPI, 0, OpFlag);
@@ -3799,7 +3796,7 @@ unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
addDirectMem(MIB, AddrReg);
MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
MachinePointerInfo::getConstantPool(*FuncInfo.MF),
- MachineMemOperand::MOLoad, DL.getPointerSize(), Align);
+ MachineMemOperand::MOLoad, DL.getPointerSize(), Alignment);
MIB->addMemOperand(*FuncInfo.MF, MMO);
return ResultReg;
}
@@ -3824,7 +3821,7 @@ unsigned X86FastISel::X86MaterializeGV(const GlobalValue *GV, MVT VT) {
AM.IndexReg == 0 && AM.Disp == 0 && AM.GV == nullptr)
return AM.Base.Reg;
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
+ Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
if (TM.getRelocationModel() == Reloc::Static &&
TLI.getPointerTy(DL) == MVT::i64) {
// The displacement code could be more than 32 bits away so we need to use
@@ -3883,7 +3880,7 @@ unsigned X86FastISel::fastMaterializeAlloca(const AllocaInst *C) {
? (Subtarget->isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r)
: X86::LEA64r;
const TargetRegisterClass *RC = TLI.getRegClassFor(TLI.getPointerTy(DL));
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg), AM);
return ResultReg;
@@ -3916,7 +3913,7 @@ unsigned X86FastISel::fastMaterializeFloatZero(const ConstantFP *CF) {
return 0;
}
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
+ Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg);
return ResultReg;
}
@@ -3932,16 +3929,12 @@ bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
const X86InstrInfo &XII = (const X86InstrInfo &)TII;
unsigned Size = DL.getTypeAllocSize(LI->getType());
- unsigned Alignment = LI->getAlignment();
-
- if (Alignment == 0) // Ensure that codegen never sees alignment 0
- Alignment = DL.getABITypeAlignment(LI->getType());
SmallVector<MachineOperand, 8> AddrOps;
AM.getFullAddress(AddrOps);
MachineInstr *Result = XII.foldMemoryOperandImpl(
- *FuncInfo.MF, *MI, OpNo, AddrOps, FuncInfo.InsertPt, Size, Alignment,
+ *FuncInfo.MF, *MI, OpNo, AddrOps, FuncInfo.InsertPt, Size, LI->getAlign(),
/*AllowCommute=*/true);
if (!Result)
return false;
@@ -3958,7 +3951,7 @@ bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
if (!MO.isReg() || MO.isDef() || MO.getReg() != AM.IndexReg)
continue;
// Found the index reg, now try to rewrite it.
- unsigned IndexReg = constrainOperandRegClass(Result->getDesc(),
+ Register IndexReg = constrainOperandRegClass(Result->getDesc(),
MO.getReg(), OperandNo);
if (IndexReg == MO.getReg())
continue;
@@ -3980,7 +3973,7 @@ unsigned X86FastISel::fastEmitInst_rrrr(unsigned MachineInstOpcode,
unsigned Op3, bool Op3IsKill) {
const MCInstrDesc &II = TII.get(MachineInstOpcode);
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);