diff options
Diffstat (limited to 'lib/Target/ARM/ARMInstructionSelector.cpp')
-rw-r--r-- | lib/Target/ARM/ARMInstructionSelector.cpp | 268 |
1 files changed, 210 insertions, 58 deletions
diff --git a/lib/Target/ARM/ARMInstructionSelector.cpp b/lib/Target/ARM/ARMInstructionSelector.cpp index 293e734c97cd..4485a474a6df 100644 --- a/lib/Target/ARM/ARMInstructionSelector.cpp +++ b/lib/Target/ARM/ARMInstructionSelector.cpp @@ -1,9 +1,8 @@ //===- ARMInstructionSelector.cpp ----------------------------*- C++ -*-==// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// \file @@ -76,6 +75,11 @@ private: const ARMRegisterBankInfo &RBI; const ARMSubtarget &STI; + // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel + // uses "STI." in the code generated by TableGen. If we want to reuse some of + // the custom C++ predicates written for DAGISel, we need to have both around. + const ARMSubtarget *Subtarget = &STI; + // Store the opcodes that we might need, so we don't have to check what kind // of subtarget (ARM vs Thumb) we have all the time. struct OpcodeCache { @@ -98,6 +102,27 @@ private: unsigned STORE8; unsigned LOAD8; + unsigned ADDrr; + unsigned ADDri; + + // Used for G_ICMP + unsigned CMPrr; + unsigned MOVi; + unsigned MOVCCi; + + // Used for G_SELECT + unsigned MOVCCr; + + unsigned TSTri; + unsigned Bcc; + + // Used for G_GLOBAL_VALUE + unsigned MOVi32imm; + unsigned ConstPoolLoad; + unsigned MOV_ga_pcrel; + unsigned LDRLIT_ga_pcrel; + unsigned LDRLIT_ga_abs; + OpcodeCache(const ARMSubtarget &STI); } const Opcodes; @@ -112,6 +137,9 @@ private: unsigned selectLoadStoreOpCode(unsigned Opc, unsigned RegBank, unsigned Size) const; + void renderVFPF32Imm(MachineInstrBuilder &New, const MachineInstr &Old) const; + void renderVFPF64Imm(MachineInstrBuilder &New, const MachineInstr &Old) const; + #define GET_GLOBALISEL_PREDICATES_DECL #include "ARMGenGlobalISel.inc" #undef GET_GLOBALISEL_PREDICATES_DECL @@ -204,7 +232,7 @@ static bool selectMergeValues(MachineInstrBuilder &MIB, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) { - assert(TII.getSubtarget().hasVFP2() && "Can't select merge without VFP"); + assert(TII.getSubtarget().hasVFP2Base() && "Can't select merge without VFP"); // We only support G_MERGE_VALUES as a way to stick together two scalar GPRs // into one DPR. @@ -235,7 +263,8 @@ static bool selectUnmergeValues(MachineInstrBuilder &MIB, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) { - assert(TII.getSubtarget().hasVFP2() && "Can't select unmerge without VFP"); + assert(TII.getSubtarget().hasVFP2Base() && + "Can't select unmerge without VFP"); // We only support G_UNMERGE_VALUES as a way to break up one DPR into two // GPRs. @@ -285,6 +314,24 @@ ARMInstructionSelector::OpcodeCache::OpcodeCache(const ARMSubtarget &STI) { STORE_OPCODE(STORE8, STRBi12); STORE_OPCODE(LOAD8, LDRBi12); + + STORE_OPCODE(ADDrr, ADDrr); + STORE_OPCODE(ADDri, ADDri); + + STORE_OPCODE(CMPrr, CMPrr); + STORE_OPCODE(MOVi, MOVi); + STORE_OPCODE(MOVCCi, MOVCCi); + + STORE_OPCODE(MOVCCr, MOVCCr); + + STORE_OPCODE(TSTri, TSTri); + STORE_OPCODE(Bcc, Bcc); + + STORE_OPCODE(MOVi32imm, MOVi32imm); + ConstPoolLoad = isThumb ? ARM::t2LDRpci : ARM::LDRi12; + STORE_OPCODE(MOV_ga_pcrel, MOV_ga_pcrel); + LDRLIT_ga_pcrel = isThumb ? ARM::tLDRLIT_ga_pcrel : ARM::LDRLIT_ga_pcrel; + LDRLIT_ga_abs = isThumb ? ARM::tLDRLIT_ga_abs : ARM::LDRLIT_ga_abs; #undef MAP_OPCODE } @@ -408,10 +455,11 @@ getComparePreds(CmpInst::Predicate Pred) { } struct ARMInstructionSelector::CmpConstants { - CmpConstants(unsigned CmpOpcode, unsigned FlagsOpcode, unsigned OpRegBank, - unsigned OpSize) + CmpConstants(unsigned CmpOpcode, unsigned FlagsOpcode, unsigned SelectOpcode, + unsigned OpRegBank, unsigned OpSize) : ComparisonOpcode(CmpOpcode), ReadFlagsOpcode(FlagsOpcode), - OperandRegBankID(OpRegBank), OperandSize(OpSize) {} + SelectResultOpcode(SelectOpcode), OperandRegBankID(OpRegBank), + OperandSize(OpSize) {} // The opcode used for performing the comparison. const unsigned ComparisonOpcode; @@ -420,6 +468,9 @@ struct ARMInstructionSelector::CmpConstants { // ARM::INSTRUCTION_LIST_END if we don't need to read the flags. const unsigned ReadFlagsOpcode; + // The opcode used for materializing the result of the comparison. + const unsigned SelectResultOpcode; + // The assumed register bank ID for the operands. const unsigned OperandRegBankID; @@ -439,7 +490,7 @@ struct ARMInstructionSelector::InsertInfo { void ARMInstructionSelector::putConstant(InsertInfo I, unsigned DestReg, unsigned Constant) const { - (void)BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, TII.get(ARM::MOVi)) + (void)BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, TII.get(Opcodes.MOVi)) .addDef(DestReg) .addImm(Constant) .add(predOps(ARMCC::AL)) @@ -542,7 +593,8 @@ bool ARMInstructionSelector::insertComparison(CmpConstants Helper, InsertInfo I, } // Select either 1 or the previous result based on the value of the flags. - auto Mov1I = BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, TII.get(ARM::MOVCCi)) + auto Mov1I = BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, + TII.get(Helper.SelectResultOpcode)) .addDef(ResReg) .addUse(PrevRes) .addImm(1) @@ -569,7 +621,7 @@ bool ARMInstructionSelector::selectGlobal(MachineInstrBuilder &MIB, auto &MBB = *MIB->getParent(); auto &MF = *MBB.getParent(); - bool UseMovt = STI.useMovt(MF); + bool UseMovt = STI.useMovt(); unsigned Size = TM.getPointerSize(0); unsigned Alignment = 4; @@ -577,7 +629,9 @@ bool ARMInstructionSelector::selectGlobal(MachineInstrBuilder &MIB, auto addOpsForConstantPoolLoad = [&MF, Alignment, Size](MachineInstrBuilder &MIB, const GlobalValue *GV, bool IsSBREL) { - assert(MIB->getOpcode() == ARM::LDRi12 && "Unsupported instruction"); + assert((MIB->getOpcode() == ARM::LDRi12 || + MIB->getOpcode() == ARM::t2LDRpci) && + "Unsupported instruction"); auto ConstPool = MF.getConstantPool(); auto CPIndex = // For SB relative entries we need a target-specific constant pool. @@ -587,21 +641,38 @@ bool ARMInstructionSelector::selectGlobal(MachineInstrBuilder &MIB, ARMConstantPoolConstant::Create(GV, ARMCP::SBREL), Alignment) : ConstPool->getConstantPoolIndex(GV, Alignment); MIB.addConstantPoolIndex(CPIndex, /*Offset*/ 0, /*TargetFlags*/ 0) - .addMemOperand( - MF.getMachineMemOperand(MachinePointerInfo::getConstantPool(MF), - MachineMemOperand::MOLoad, Size, Alignment)) - .addImm(0) - .add(predOps(ARMCC::AL)); + .addMemOperand(MF.getMachineMemOperand( + MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad, + Size, Alignment)); + if (MIB->getOpcode() == ARM::LDRi12) + MIB.addImm(0); + MIB.add(predOps(ARMCC::AL)); + }; + + auto addGOTMemOperand = [this, &MF, Alignment](MachineInstrBuilder &MIB) { + MIB.addMemOperand(MF.getMachineMemOperand( + MachinePointerInfo::getGOT(MF), MachineMemOperand::MOLoad, + TM.getProgramPointerSize(), Alignment)); }; if (TM.isPositionIndependent()) { bool Indirect = STI.isGVIndirectSymbol(GV); + + // For ARM mode, we have different pseudoinstructions for direct accesses + // and indirect accesses, and the ones for indirect accesses include the + // load from GOT. For Thumb mode, we use the same pseudoinstruction for both + // direct and indirect accesses, and we need to manually generate the load + // from GOT. + bool UseOpcodeThatLoads = Indirect && !STI.isThumb(); + // FIXME: Taking advantage of MOVT for ELF is pretty involved, so we don't // support it yet. See PR28229. unsigned Opc = UseMovt && !STI.isTargetELF() - ? (Indirect ? ARM::MOV_ga_pcrel_ldr : ARM::MOV_ga_pcrel) - : (Indirect ? ARM::LDRLIT_ga_pcrel_ldr : ARM::LDRLIT_ga_pcrel); + ? (UseOpcodeThatLoads ? (unsigned)ARM::MOV_ga_pcrel_ldr + : Opcodes.MOV_ga_pcrel) + : (UseOpcodeThatLoads ? (unsigned)ARM::LDRLIT_ga_pcrel_ldr + : Opcodes.LDRLIT_ga_pcrel); MIB->setDesc(TII.get(Opc)); int TargetFlags = ARMII::MO_NO_FLAG; @@ -611,17 +682,35 @@ bool ARMInstructionSelector::selectGlobal(MachineInstrBuilder &MIB, TargetFlags |= ARMII::MO_GOT; MIB->getOperand(1).setTargetFlags(TargetFlags); - if (Indirect) - MIB.addMemOperand(MF.getMachineMemOperand( - MachinePointerInfo::getGOT(MF), MachineMemOperand::MOLoad, - TM.getProgramPointerSize(), Alignment)); + if (Indirect) { + if (!UseOpcodeThatLoads) { + auto ResultReg = MIB->getOperand(0).getReg(); + auto AddressReg = MRI.createVirtualRegister(&ARM::GPRRegClass); + + MIB->getOperand(0).setReg(AddressReg); + + auto InsertBefore = std::next(MIB->getIterator()); + auto MIBLoad = BuildMI(MBB, InsertBefore, MIB->getDebugLoc(), + TII.get(Opcodes.LOAD32)) + .addDef(ResultReg) + .addReg(AddressReg) + .addImm(0) + .add(predOps(ARMCC::AL)); + addGOTMemOperand(MIBLoad); + + if (!constrainSelectedInstRegOperands(*MIBLoad, TII, TRI, RBI)) + return false; + } else { + addGOTMemOperand(MIB); + } + } return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); } bool isReadOnly = STI.getTargetLowering()->isReadOnly(GV); if (STI.isROPI() && isReadOnly) { - unsigned Opc = UseMovt ? ARM::MOV_ga_pcrel : ARM::LDRLIT_ga_pcrel; + unsigned Opc = UseMovt ? Opcodes.MOV_ga_pcrel : Opcodes.LDRLIT_ga_pcrel; MIB->setDesc(TII.get(Opc)); return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); } @@ -630,19 +719,19 @@ bool ARMInstructionSelector::selectGlobal(MachineInstrBuilder &MIB, MachineInstrBuilder OffsetMIB; if (UseMovt) { OffsetMIB = BuildMI(MBB, *MIB, MIB->getDebugLoc(), - TII.get(ARM::MOVi32imm), Offset); + TII.get(Opcodes.MOVi32imm), Offset); OffsetMIB.addGlobalAddress(GV, /*Offset*/ 0, ARMII::MO_SBREL); } else { // Load the offset from the constant pool. - OffsetMIB = - BuildMI(MBB, *MIB, MIB->getDebugLoc(), TII.get(ARM::LDRi12), Offset); + OffsetMIB = BuildMI(MBB, *MIB, MIB->getDebugLoc(), + TII.get(Opcodes.ConstPoolLoad), Offset); addOpsForConstantPoolLoad(OffsetMIB, GV, /*IsSBREL*/ true); } if (!constrainSelectedInstRegOperands(*OffsetMIB, TII, TRI, RBI)) return false; // Add the offset to the SB register. - MIB->setDesc(TII.get(ARM::ADDrr)); + MIB->setDesc(TII.get(Opcodes.ADDrr)); MIB->RemoveOperand(1); MIB.addReg(ARM::R9) // FIXME: don't hardcode R9 .addReg(Offset) @@ -654,18 +743,18 @@ bool ARMInstructionSelector::selectGlobal(MachineInstrBuilder &MIB, if (STI.isTargetELF()) { if (UseMovt) { - MIB->setDesc(TII.get(ARM::MOVi32imm)); + MIB->setDesc(TII.get(Opcodes.MOVi32imm)); } else { // Load the global's address from the constant pool. - MIB->setDesc(TII.get(ARM::LDRi12)); + MIB->setDesc(TII.get(Opcodes.ConstPoolLoad)); MIB->RemoveOperand(1); addOpsForConstantPoolLoad(MIB, GV, /*IsSBREL*/ false); } } else if (STI.isTargetMachO()) { if (UseMovt) - MIB->setDesc(TII.get(ARM::MOVi32imm)); + MIB->setDesc(TII.get(Opcodes.MOVi32imm)); else - MIB->setDesc(TII.get(ARM::LDRLIT_ga_abs)); + MIB->setDesc(TII.get(Opcodes.LDRLIT_ga_abs)); } else { LLVM_DEBUG(dbgs() << "Object format not supported yet\n"); return false; @@ -680,13 +769,13 @@ bool ARMInstructionSelector::selectSelect(MachineInstrBuilder &MIB, auto InsertBefore = std::next(MIB->getIterator()); auto &DbgLoc = MIB->getDebugLoc(); - // Compare the condition to 0. + // Compare the condition to 1. auto CondReg = MIB->getOperand(1).getReg(); assert(validReg(MRI, CondReg, 1, ARM::GPRRegBankID) && "Unsupported types for select operation"); - auto CmpI = BuildMI(MBB, InsertBefore, DbgLoc, TII.get(ARM::CMPri)) + auto CmpI = BuildMI(MBB, InsertBefore, DbgLoc, TII.get(Opcodes.TSTri)) .addUse(CondReg) - .addImm(0) + .addImm(1) .add(predOps(ARMCC::AL)); if (!constrainSelectedInstRegOperands(*CmpI, TII, TRI, RBI)) return false; @@ -699,7 +788,7 @@ bool ARMInstructionSelector::selectSelect(MachineInstrBuilder &MIB, assert(validOpRegPair(MRI, ResReg, TrueReg, 32, ARM::GPRRegBankID) && validOpRegPair(MRI, TrueReg, FalseReg, 32, ARM::GPRRegBankID) && "Unsupported types for select operation"); - auto Mov1I = BuildMI(MBB, InsertBefore, DbgLoc, TII.get(ARM::MOVCCr)) + auto Mov1I = BuildMI(MBB, InsertBefore, DbgLoc, TII.get(Opcodes.MOVCCr)) .addDef(ResReg) .addUse(TrueReg) .addUse(FalseReg) @@ -713,12 +802,37 @@ bool ARMInstructionSelector::selectSelect(MachineInstrBuilder &MIB, bool ARMInstructionSelector::selectShift(unsigned ShiftOpc, MachineInstrBuilder &MIB) const { + assert(!STI.isThumb() && "Unsupported subtarget"); MIB->setDesc(TII.get(ARM::MOVsr)); MIB.addImm(ShiftOpc); MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); } +void ARMInstructionSelector::renderVFPF32Imm( + MachineInstrBuilder &NewInstBuilder, const MachineInstr &OldInst) const { + assert(OldInst.getOpcode() == TargetOpcode::G_FCONSTANT && + "Expected G_FCONSTANT"); + + APFloat FPImmValue = OldInst.getOperand(1).getFPImm()->getValueAPF(); + int FPImmEncoding = ARM_AM::getFP32Imm(FPImmValue); + assert(FPImmEncoding != -1 && "Invalid immediate value"); + + NewInstBuilder.addImm(FPImmEncoding); +} + +void ARMInstructionSelector::renderVFPF64Imm( + MachineInstrBuilder &NewInstBuilder, const MachineInstr &OldInst) const { + assert(OldInst.getOpcode() == TargetOpcode::G_FCONSTANT && + "Expected G_FCONSTANT"); + + APFloat FPImmValue = OldInst.getOperand(1).getFPImm()->getValueAPF(); + int FPImmEncoding = ARM_AM::getFP64Imm(FPImmValue); + assert(FPImmEncoding != -1 && "Invalid immediate value"); + + NewInstBuilder.addImm(FPImmEncoding); +} + bool ARMInstructionSelector::select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const { assert(I.getParent() && "Instruction should be in a basic block!"); @@ -748,12 +862,8 @@ bool ARMInstructionSelector::select(MachineInstr &I, isSExt = true; LLVM_FALLTHROUGH; case G_ZEXT: { - LLT DstTy = MRI.getType(I.getOperand(0).getReg()); - // FIXME: Smaller destination sizes coming soon! - if (DstTy.getSizeInBits() != 32) { - LLVM_DEBUG(dbgs() << "Unsupported destination size for extension"); - return false; - } + assert(MRI.getType(I.getOperand(0).getReg()).getSizeInBits() <= 32 && + "Unsupported destination size for extension"); LLT SrcTy = MRI.getType(I.getOperand(1).getReg()); unsigned SrcSize = SrcTy.getSizeInBits(); @@ -869,10 +979,32 @@ bool ARMInstructionSelector::select(MachineInstr &I, } } + assert(!STI.isThumb() && "Unsupported subtarget"); I.setDesc(TII.get(ARM::MOVi)); MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); break; } + case G_FCONSTANT: { + // Load from constant pool + unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits() / 8; + unsigned Alignment = Size; + + assert((Size == 4 || Size == 8) && "Unsupported FP constant type"); + auto LoadOpcode = Size == 4 ? ARM::VLDRS : ARM::VLDRD; + + auto ConstPool = MF.getConstantPool(); + auto CPIndex = + ConstPool->getConstantPoolIndex(I.getOperand(1).getFPImm(), Alignment); + MIB->setDesc(TII.get(LoadOpcode)); + MIB->RemoveOperand(1); + MIB.addConstantPoolIndex(CPIndex, /*Offset*/ 0, /*TargetFlags*/ 0) + .addMemOperand( + MF.getMachineMemOperand(MachinePointerInfo::getConstantPool(MF), + MachineMemOperand::MOLoad, Size, Alignment)) + .addImm(0) + .add(predOps(ARMCC::AL)); + break; + } case G_INTTOPTR: case G_PTRTOINT: { auto SrcReg = I.getOperand(1).getReg(); @@ -900,17 +1032,17 @@ bool ARMInstructionSelector::select(MachineInstr &I, case G_SELECT: return selectSelect(MIB, MRI); case G_ICMP: { - CmpConstants Helper(ARM::CMPrr, ARM::INSTRUCTION_LIST_END, - ARM::GPRRegBankID, 32); + CmpConstants Helper(Opcodes.CMPrr, ARM::INSTRUCTION_LIST_END, + Opcodes.MOVCCi, ARM::GPRRegBankID, 32); return selectCmp(Helper, MIB, MRI); } case G_FCMP: { - assert(STI.hasVFP2() && "Can't select fcmp without VFP"); + assert(STI.hasVFP2Base() && "Can't select fcmp without VFP"); unsigned OpReg = I.getOperand(2).getReg(); unsigned Size = MRI.getType(OpReg).getSizeInBits(); - if (Size == 64 && STI.isFPOnlySP()) { + if (Size == 64 && !STI.hasFP64()) { LLVM_DEBUG(dbgs() << "Subtarget only supports single precision"); return false; } @@ -920,7 +1052,7 @@ bool ARMInstructionSelector::select(MachineInstr &I, } CmpConstants Helper(Size == 32 ? ARM::VCMPS : ARM::VCMPD, ARM::FMSTAT, - ARM::FPRRegBankID, Size); + Opcodes.MOVCCi, ARM::FPRRegBankID, Size); return selectCmp(Helper, MIB, MRI); } case G_LSHR: @@ -931,13 +1063,13 @@ bool ARMInstructionSelector::select(MachineInstr &I, return selectShift(ARM_AM::ShiftOpc::lsl, MIB); } case G_GEP: - I.setDesc(TII.get(ARM::ADDrr)); + I.setDesc(TII.get(Opcodes.ADDrr)); MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); break; case G_FRAME_INDEX: // Add 0 to the given frame index and hope it will eventually be folded into // the user(s). - I.setDesc(TII.get(ARM::ADDri)); + I.setDesc(TII.get(Opcodes.ADDri)); MIB.addImm(0).add(predOps(ARMCC::AL)).add(condCodeOp()); break; case G_GLOBAL_VALUE: @@ -956,13 +1088,31 @@ bool ARMInstructionSelector::select(MachineInstr &I, LLT ValTy = MRI.getType(Reg); const auto ValSize = ValTy.getSizeInBits(); - assert((ValSize != 64 || STI.hasVFP2()) && + assert((ValSize != 64 || STI.hasVFP2Base()) && "Don't know how to load/store 64-bit value without VFP"); const auto NewOpc = selectLoadStoreOpCode(I.getOpcode(), RegBank, ValSize); if (NewOpc == G_LOAD || NewOpc == G_STORE) return false; + if (ValSize == 1 && NewOpc == Opcodes.STORE8) { + // Before storing a 1-bit value, make sure to clear out any unneeded bits. + unsigned OriginalValue = I.getOperand(0).getReg(); + + unsigned ValueToStore = MRI.createVirtualRegister(&ARM::GPRRegClass); + I.getOperand(0).setReg(ValueToStore); + + auto InsertBefore = I.getIterator(); + auto AndI = BuildMI(MBB, InsertBefore, I.getDebugLoc(), TII.get(Opcodes.AND)) + .addDef(ValueToStore) + .addUse(OriginalValue) + .addImm(1) + .add(predOps(ARMCC::AL)) + .add(condCodeOp()); + if (!constrainSelectedInstRegOperands(*AndI, TII, TRI, RBI)) + return false; + } + I.setDesc(TII.get(NewOpc)); if (NewOpc == ARM::LDRH || NewOpc == ARM::STRH) @@ -988,17 +1138,19 @@ bool ARMInstructionSelector::select(MachineInstr &I, } // Set the flags. - auto Test = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(ARM::TSTri)) - .addReg(I.getOperand(0).getReg()) - .addImm(1) - .add(predOps(ARMCC::AL)); + auto Test = + BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcodes.TSTri)) + .addReg(I.getOperand(0).getReg()) + .addImm(1) + .add(predOps(ARMCC::AL)); if (!constrainSelectedInstRegOperands(*Test, TII, TRI, RBI)) return false; // Branch conditionally. - auto Branch = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(ARM::Bcc)) - .add(I.getOperand(1)) - .add(predOps(ARMCC::NE, ARM::CPSR)); + auto Branch = + BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcodes.Bcc)) + .add(I.getOperand(1)) + .add(predOps(ARMCC::NE, ARM::CPSR)); if (!constrainSelectedInstRegOperands(*Branch, TII, TRI, RBI)) return false; I.eraseFromParent(); |