diff options
author | Dimitry Andric <dim@FreeBSD.org> | 2012-05-03 16:50:55 +0000 |
---|---|---|
committer | Dimitry Andric <dim@FreeBSD.org> | 2012-05-03 16:50:55 +0000 |
commit | b61ab53cb789e568acbb2952fbead20ab853a696 (patch) | |
tree | 8575c732129e272992ac5d7b4c2519238fff4735 /lib | |
parent | 63faed5b8e4f2755f127fcb8aa440480c0649327 (diff) | |
download | src-b61ab53cb789e568acbb2952fbead20ab853a696.tar.gz src-b61ab53cb789e568acbb2952fbead20ab853a696.zip |
Notes
Diffstat (limited to 'lib')
122 files changed, 3307 insertions, 8479 deletions
diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp index 7a0a4e1e8246..783c32e6669d 100644 --- a/lib/Analysis/ConstantFolding.cpp +++ b/lib/Analysis/ConstantFolding.cpp @@ -681,6 +681,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops, // This makes it easy to determine if the getelementptr is "inbounds". // Also, this helps GlobalOpt do SROA on GlobalVariables. Type *Ty = Ptr->getType(); + assert(Ty->isPointerTy() && "Forming regular GEP of non-pointer type"); SmallVector<Constant*, 32> NewIdxs; do { if (SequentialType *ATy = dyn_cast<SequentialType>(Ty)) { @@ -711,10 +712,17 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops, } Ty = ATy->getElementType(); } else if (StructType *STy = dyn_cast<StructType>(Ty)) { - // Determine which field of the struct the offset points into. The - // getZExtValue is at least as safe as the StructLayout API because we - // know the offset is within the struct at this point. + // If we end up with an offset that isn't valid for this struct type, we + // can't re-form this GEP in a regular form, so bail out. The pointer + // operand likely went through casts that are necessary to make the GEP + // sensible. const StructLayout &SL = *TD->getStructLayout(STy); + if (Offset.uge(SL.getSizeInBytes())) + break; + + // Determine which field of the struct the offset points into. The + // getZExtValue is fine as we've already ensured that the offset is + // within the range representable by the StructLayout API. unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue()); NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx)); diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index 1d55642079a0..205227ca0b7f 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -3187,7 +3187,7 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { // Add the total offset from all the GEP indices to the base. return getAddExpr(BaseS, TotalOffset, - isInBounds ? SCEV::FlagNUW : SCEV::FlagAnyWrap); + isInBounds ? SCEV::FlagNSW : SCEV::FlagAnyWrap); } /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp index a430f6281ef0..1418e01d7c81 100644 --- a/lib/Analysis/ValueTracking.cpp +++ b/lib/Analysis/ValueTracking.cpp @@ -564,7 +564,7 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne, Depth+1); // If it's known zero, our sign bit is also zero. if (LHSKnownZero.isNegative()) - KnownZero |= LHSKnownZero; + KnownZero.setBit(BitWidth - 1); } break; diff --git a/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp b/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp index 660684d1bea5..454a923c13e0 100644 --- a/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp @@ -36,35 +36,20 @@ const char *DwarfAccelTable::Atom::AtomTypeString(enum AtomType AT) { llvm_unreachable("invalid AtomType!"); } -// The general case would need to have a less hard coded size for the -// length of the HeaderData, however, if we're constructing based on a -// single Atom then we know it will always be: 4 + 4 + 2 + 2. -DwarfAccelTable::DwarfAccelTable(DwarfAccelTable::Atom atom) : - Header(12), - HeaderData(atom) { -} - // The length of the header data is always going to be 4 + 4 + 4*NumAtoms. -DwarfAccelTable::DwarfAccelTable(std::vector<DwarfAccelTable::Atom> &atomList) : +DwarfAccelTable::DwarfAccelTable(ArrayRef<DwarfAccelTable::Atom> atomList) : Header(8 + (atomList.size() * 4)), - HeaderData(atomList) { -} + HeaderData(atomList), + Entries(Allocator) { } -DwarfAccelTable::~DwarfAccelTable() { - for (size_t i = 0, e = Data.size(); i < e; ++i) - delete Data[i]; - for (StringMap<DataArray>::iterator - EI = Entries.begin(), EE = Entries.end(); EI != EE; ++EI) - for (DataArray::iterator DI = EI->second.begin(), - DE = EI->second.end(); DI != DE; ++DI) - delete (*DI); -} +DwarfAccelTable::~DwarfAccelTable() { } void DwarfAccelTable::AddName(StringRef Name, DIE* die, char Flags) { + assert(Data.empty() && "Already finalized!"); // If the string is in the list already then add this die to the list // otherwise add a new one. DataArray &DIEs = Entries[Name]; - DIEs.push_back(new HashDataContents(die, Flags)); + DIEs.push_back(new (Allocator) HashDataContents(die, Flags)); } void DwarfAccelTable::ComputeBucketCount(void) { @@ -85,31 +70,23 @@ void DwarfAccelTable::ComputeBucketCount(void) { Header.hashes_count = num; } -namespace { - // DIESorter - comparison predicate that sorts DIEs by their offset. - struct DIESorter { - bool operator()(const struct DwarfAccelTable::HashDataContents *A, - const struct DwarfAccelTable::HashDataContents *B) const { - return A->Die->getOffset() < B->Die->getOffset(); - } - }; +// compareDIEs - comparison predicate that sorts DIEs by their offset. +static bool compareDIEs(const DwarfAccelTable::HashDataContents *A, + const DwarfAccelTable::HashDataContents *B) { + return A->Die->getOffset() < B->Die->getOffset(); } void DwarfAccelTable::FinalizeTable(AsmPrinter *Asm, const char *Prefix) { // Create the individual hash data outputs. for (StringMap<DataArray>::iterator EI = Entries.begin(), EE = Entries.end(); EI != EE; ++EI) { - struct HashData *Entry = new HashData((*EI).getKeyData()); // Unique the entries. - std::stable_sort(EI->second.begin(), EI->second.end(), DIESorter()); + std::stable_sort(EI->second.begin(), EI->second.end(), compareDIEs); EI->second.erase(std::unique(EI->second.begin(), EI->second.end()), EI->second.end()); - for (DataArray::const_iterator DI = EI->second.begin(), - DE = EI->second.end(); - DI != DE; ++DI) - Entry->addData((*DI)); + HashData *Entry = new (Allocator) HashData(EI->getKey(), EI->second); Data.push_back(Entry); } @@ -216,7 +193,7 @@ void DwarfAccelTable::EmitData(AsmPrinter *Asm, DwarfDebug *D) { D->getStringPool()); Asm->OutStreamer.AddComment("Num DIEs"); Asm->EmitInt32((*HI)->Data.size()); - for (std::vector<struct HashDataContents*>::const_iterator + for (ArrayRef<HashDataContents*>::const_iterator DI = (*HI)->Data.begin(), DE = (*HI)->Data.end(); DI != DE; ++DI) { // Emit the DIE offset diff --git a/lib/CodeGen/AsmPrinter/DwarfAccelTable.h b/lib/CodeGen/AsmPrinter/DwarfAccelTable.h index 2278d4c784f4..963b8cdf3424 100644 --- a/lib/CodeGen/AsmPrinter/DwarfAccelTable.h +++ b/lib/CodeGen/AsmPrinter/DwarfAccelTable.h @@ -15,6 +15,7 @@ #define CODEGEN_ASMPRINTER_DWARFACCELTABLE_H__ #include "llvm/ADT/StringMap.h" +#include "llvm/ADT/ArrayRef.h" #include "llvm/MC/MCSymbol.h" #include "llvm/Support/Dwarf.h" #include "llvm/Support/DataTypes.h" @@ -164,22 +165,12 @@ public: private: struct TableHeaderData { - uint32_t die_offset_base; - std::vector<Atom> Atoms; + SmallVector<Atom, 1> Atoms; + + TableHeaderData(ArrayRef<Atom> AtomList, uint32_t offset = 0) + : die_offset_base(offset), Atoms(AtomList.begin(), AtomList.end()) { } - TableHeaderData(std::vector<DwarfAccelTable::Atom> &AtomList, - uint32_t offset = 0) : - die_offset_base(offset) { - for (size_t i = 0, e = AtomList.size(); i != e; ++i) - Atoms.push_back(AtomList[i]); - } - - TableHeaderData(DwarfAccelTable::Atom Atom, uint32_t offset = 0) - : die_offset_base(offset) { - Atoms.push_back(Atom); - } - #ifndef NDEBUG void print (raw_ostream &O) { O << "die_offset_base: " << die_offset_base << "\n"; @@ -221,11 +212,11 @@ private: StringRef Str; uint32_t HashValue; MCSymbol *Sym; - std::vector<struct HashDataContents*> Data; // offsets - HashData(StringRef S) : Str(S) { + ArrayRef<HashDataContents*> Data; // offsets + HashData(StringRef S, ArrayRef<HashDataContents*> Data) + : Str(S), Data(Data) { HashValue = DwarfAccelTable::HashDJB(S); } - void addData(struct HashDataContents *Datum) { Data.push_back(Datum); } #ifndef NDEBUG void print(raw_ostream &O) { O << "Name: " << Str << "\n"; @@ -255,15 +246,18 @@ private: void EmitHashes(AsmPrinter *); void EmitOffsets(AsmPrinter *, MCSymbol *); void EmitData(AsmPrinter *, DwarfDebug *D); - + + // Allocator for HashData and HashDataContents. + BumpPtrAllocator Allocator; + // Output Variables TableHeader Header; TableHeaderData HeaderData; std::vector<HashData*> Data; // String Data - typedef std::vector<struct HashDataContents*> DataArray; - typedef StringMap<DataArray> StringEntries; + typedef std::vector<HashDataContents*> DataArray; + typedef StringMap<DataArray, BumpPtrAllocator&> StringEntries; StringEntries Entries; // Buckets/Hashes/Offsets @@ -274,8 +268,7 @@ private: // Public Implementation public: - DwarfAccelTable(DwarfAccelTable::Atom); - DwarfAccelTable(std::vector<DwarfAccelTable::Atom> &); + DwarfAccelTable(ArrayRef<DwarfAccelTable::Atom>); ~DwarfAccelTable(); void AddName(StringRef, DIE*, char = 0); void FinalizeTable(AsmPrinter *, const char *); diff --git a/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp index 69dc454ae1d7..cc5b6424d73c 100644 --- a/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp @@ -1032,9 +1032,10 @@ DIE *CompileUnit::getOrCreateSubprogramDIE(DISubprogram SP) { // Add function template parameters. addTemplateParams(*SPDie, SP.getTemplateParams()); - // Unfortunately this code needs to stay here to work around - // a bug in older gdbs that requires the linkage name to resolve - // multiple template functions. + // Unfortunately this code needs to stay here instead of below the + // AT_specification code in order to work around a bug in older + // gdbs that requires the linkage name to resolve multiple template + // functions. StringRef LinkageName = SP.getLinkageName(); if (!LinkageName.empty()) addString(SPDie, dwarf::DW_AT_MIPS_linkage_name, diff --git a/lib/CodeGen/DFAPacketizer.cpp b/lib/CodeGen/DFAPacketizer.cpp index bfbe7790998f..5ff641c7c844 100644 --- a/lib/CodeGen/DFAPacketizer.cpp +++ b/lib/CodeGen/DFAPacketizer.cpp @@ -23,10 +23,10 @@ // //===----------------------------------------------------------------------===// -#include "llvm/CodeGen/ScheduleDAGInstrs.h" #include "llvm/CodeGen/DFAPacketizer.h" #include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineInstrBundle.h" +#include "llvm/CodeGen/ScheduleDAGInstrs.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/MC/MCInstrItineraries.h" using namespace llvm; @@ -100,17 +100,17 @@ void DFAPacketizer::reserveResources(llvm::MachineInstr *MI) { reserveResources(&MID); } -namespace llvm { +namespace { // DefaultVLIWScheduler - This class extends ScheduleDAGInstrs and overrides // Schedule method to build the dependence graph. class DefaultVLIWScheduler : public ScheduleDAGInstrs { public: DefaultVLIWScheduler(MachineFunction &MF, MachineLoopInfo &MLI, - MachineDominatorTree &MDT, bool IsPostRA); + MachineDominatorTree &MDT, bool IsPostRA); // Schedule - Actual scheduling work. void schedule(); }; -} +} // end anonymous namespace DefaultVLIWScheduler::DefaultVLIWScheduler( MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT, @@ -129,25 +129,49 @@ VLIWPacketizerList::VLIWPacketizerList( bool IsPostRA) : TM(MF.getTarget()), MF(MF) { TII = TM.getInstrInfo(); ResourceTracker = TII->CreateTargetScheduleState(&TM, 0); - VLIWScheduler = new DefaultVLIWScheduler(MF, MLI, MDT, IsPostRA); + SchedulerImpl = new DefaultVLIWScheduler(MF, MLI, MDT, IsPostRA); } // VLIWPacketizerList Dtor VLIWPacketizerList::~VLIWPacketizerList() { - if (VLIWScheduler) - delete VLIWScheduler; + delete SchedulerImpl; + delete ResourceTracker; +} + +// ignorePseudoInstruction - ignore pseudo instructions. +bool VLIWPacketizerList::ignorePseudoInstruction(MachineInstr *MI, + MachineBasicBlock *MBB) { + if (MI->isDebugValue()) + return true; + + if (TII->isSchedulingBoundary(MI, MBB, MF)) + return true; + + return false; +} + +// isSoloInstruction - return true if instruction I must end previous +// packet. +bool VLIWPacketizerList::isSoloInstruction(MachineInstr *I) { + if (I->isInlineAsm()) + return true; + + return false; +} - if (ResourceTracker) - delete ResourceTracker; +// addToPacket - Add I to the current packet and reserve resource. +void VLIWPacketizerList::addToPacket(MachineInstr *MI) { + CurrentPacketMIs.push_back(MI); + ResourceTracker->reserveResources(MI); } // endPacket - End the current packet, bundle packet instructions and reset // DFA state. void VLIWPacketizerList::endPacket(MachineBasicBlock *MBB, - MachineInstr *MI) { + MachineInstr *I) { if (CurrentPacketMIs.size() > 1) { MachineInstr *MIFirst = CurrentPacketMIs.front(); - finalizeBundle(*MBB, MIFirst, MI); + finalizeBundle(*MBB, MIFirst, I); } CurrentPacketMIs.clear(); ResourceTracker->clearResources(); @@ -157,36 +181,31 @@ void VLIWPacketizerList::endPacket(MachineBasicBlock *MBB, void VLIWPacketizerList::PacketizeMIs(MachineBasicBlock *MBB, MachineBasicBlock::iterator BeginItr, MachineBasicBlock::iterator EndItr) { - assert(VLIWScheduler && "VLIW Scheduler is not initialized!"); - VLIWScheduler->enterRegion(MBB, BeginItr, EndItr, MBB->size()); - VLIWScheduler->schedule(); - VLIWScheduler->exitRegion(); - - // Generate MI -> SU map. - //std::map <MachineInstr*, SUnit*> MIToSUnit; - MIToSUnit.clear(); - for (unsigned i = 0, e = VLIWScheduler->SUnits.size(); i != e; ++i) { - SUnit *SU = &VLIWScheduler->SUnits[i]; - MIToSUnit[SU->getInstr()] = SU; - } + assert(MBB->end() == EndItr && "Bad EndIndex"); + + SchedulerImpl->enterRegion(MBB, BeginItr, EndItr, MBB->size()); + + // Build the DAG without reordering instructions. + SchedulerImpl->schedule(); + + // Remember scheduling units. + SUnits = SchedulerImpl->SUnits; // The main packetizer loop. for (; BeginItr != EndItr; ++BeginItr) { MachineInstr *MI = BeginItr; - this->initPacketizerState(); + // Ignore pseudo instructions. + if (ignorePseudoInstruction(MI, MBB)) + continue; // End the current packet if needed. - if (this->isSoloInstruction(MI)) { + if (isSoloInstruction(MI)) { endPacket(MBB, MI); continue; } - // Ignore pseudo instructions. - if (this->ignorePseudoInstruction(MI, MBB)) - continue; - - SUnit *SUI = MIToSUnit[MI]; + SUnit *SUI = SchedulerImpl->getSUnit(MI); assert(SUI && "Missing SUnit Info!"); // Ask DFA if machine resource is available for MI. @@ -196,13 +215,13 @@ void VLIWPacketizerList::PacketizeMIs(MachineBasicBlock *MBB, for (std::vector<MachineInstr*>::iterator VI = CurrentPacketMIs.begin(), VE = CurrentPacketMIs.end(); VI != VE; ++VI) { MachineInstr *MJ = *VI; - SUnit *SUJ = MIToSUnit[MJ]; + SUnit *SUJ = SchedulerImpl->getSUnit(MJ); assert(SUJ && "Missing SUnit Info!"); // Is it legal to packetize SUI and SUJ together. - if (!this->isLegalToPacketizeTogether(SUI, SUJ)) { + if (!isLegalToPacketizeTogether(SUI, SUJ)) { // Allow packetization if dependency can be pruned. - if (!this->isLegalToPruneDependencies(SUI, SUJ)) { + if (!isLegalToPruneDependencies(SUI, SUJ)) { // End the packet if dependency cannot be pruned. endPacket(MBB, MI); break; @@ -215,9 +234,11 @@ void VLIWPacketizerList::PacketizeMIs(MachineBasicBlock *MBB, } // Add MI to the current packet. - BeginItr = this->addToPacket(MI); + addToPacket(MI); } // For all instructions in BB. // End any packet left behind. endPacket(MBB, EndItr); + + SchedulerImpl->exitRegion(); } diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index 3ade66097cbd..934cc124c77e 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -1068,9 +1068,9 @@ public: #ifndef NDEBUG LIValidator validator; - std::for_each(Entering.begin(), Entering.end(), validator); - std::for_each(Internal.begin(), Internal.end(), validator); - std::for_each(Exiting.begin(), Exiting.end(), validator); + validator = std::for_each(Entering.begin(), Entering.end(), validator); + validator = std::for_each(Internal.begin(), Internal.end(), validator); + validator = std::for_each(Exiting.begin(), Exiting.end(), validator); assert(validator.rangesOk() && "moveAllOperandsFrom broke liveness."); #endif @@ -1115,9 +1115,9 @@ public: #ifndef NDEBUG LIValidator validator; - std::for_each(Entering.begin(), Entering.end(), validator); - std::for_each(Internal.begin(), Internal.end(), validator); - std::for_each(Exiting.begin(), Exiting.end(), validator); + validator = std::for_each(Entering.begin(), Entering.end(), validator); + validator = std::for_each(Internal.begin(), Internal.end(), validator); + validator = std::for_each(Exiting.begin(), Exiting.end(), validator); assert(validator.rangesOk() && "moveAllOperandsInto broke liveness."); #endif } diff --git a/lib/CodeGen/MachineBasicBlock.cpp b/lib/CodeGen/MachineBasicBlock.cpp index 6c8a1072697c..1abb8f240cc3 100644 --- a/lib/CodeGen/MachineBasicBlock.cpp +++ b/lib/CodeGen/MachineBasicBlock.cpp @@ -392,22 +392,44 @@ void MachineBasicBlock::updateTerminator() { TII->InsertBranch(*this, TBB, 0, Cond, dl); } } else { + // Walk through the successors and find the successor which is not + // a landing pad and is not the conditional branch destination (in TBB) + // as the fallthrough successor. + MachineBasicBlock *FallthroughBB = 0; + for (succ_iterator SI = succ_begin(), SE = succ_end(); SI != SE; ++SI) { + if ((*SI)->isLandingPad() || *SI == TBB) + continue; + assert(!FallthroughBB && "Found more than one fallthrough successor."); + FallthroughBB = *SI; + } + if (!FallthroughBB && canFallThrough()) { + // We fallthrough to the same basic block as the conditional jump + // targets. Remove the conditional jump, leaving unconditional + // fallthrough. + // FIXME: This does not seem like a reasonable pattern to support, but it + // has been seen in the wild coming out of degenerate ARM test cases. + TII->RemoveBranch(*this); + + // Finally update the unconditional successor to be reached via a branch + // if it would not be reached by fallthrough. + if (!isLayoutSuccessor(TBB)) + TII->InsertBranch(*this, TBB, 0, Cond, dl); + return; + } + // The block has a fallthrough conditional branch. - MachineBasicBlock *MBBA = *succ_begin(); - MachineBasicBlock *MBBB = *llvm::next(succ_begin()); - if (MBBA == TBB) std::swap(MBBB, MBBA); if (isLayoutSuccessor(TBB)) { if (TII->ReverseBranchCondition(Cond)) { // We can't reverse the condition, add an unconditional branch. Cond.clear(); - TII->InsertBranch(*this, MBBA, 0, Cond, dl); + TII->InsertBranch(*this, FallthroughBB, 0, Cond, dl); return; } TII->RemoveBranch(*this); - TII->InsertBranch(*this, MBBA, 0, Cond, dl); - } else if (!isLayoutSuccessor(MBBA)) { + TII->InsertBranch(*this, FallthroughBB, 0, Cond, dl); + } else if (!isLayoutSuccessor(FallthroughBB)) { TII->RemoveBranch(*this); - TII->InsertBranch(*this, TBB, MBBA, Cond, dl); + TII->InsertBranch(*this, TBB, FallthroughBB, Cond, dl); } } } diff --git a/lib/CodeGen/MachineBlockPlacement.cpp b/lib/CodeGen/MachineBlockPlacement.cpp index 22d7212007fc..5ba68517b7a1 100644 --- a/lib/CodeGen/MachineBlockPlacement.cpp +++ b/lib/CodeGen/MachineBlockPlacement.cpp @@ -102,13 +102,13 @@ public: } /// \brief Iterator over blocks within the chain. - typedef SmallVectorImpl<MachineBasicBlock *>::const_iterator iterator; + typedef SmallVectorImpl<MachineBasicBlock *>::iterator iterator; /// \brief Beginning of blocks within the chain. - iterator begin() const { return Blocks.begin(); } + iterator begin() { return Blocks.begin(); } /// \brief End of blocks within the chain. - iterator end() const { return Blocks.end(); } + iterator end() { return Blocks.end(); } /// \brief Merge a block chain into this one. /// @@ -211,12 +211,15 @@ class MachineBlockPlacement : public MachineFunctionPass { void buildChain(MachineBasicBlock *BB, BlockChain &Chain, SmallVectorImpl<MachineBasicBlock *> &BlockWorkList, const BlockFilterSet *BlockFilter = 0); - MachineBasicBlock *findBestLoopTop(MachineFunction &F, - MachineLoop &L, + MachineBasicBlock *findBestLoopTop(MachineLoop &L, const BlockFilterSet &LoopBlockSet); + MachineBasicBlock *findBestLoopExit(MachineFunction &F, + MachineLoop &L, + const BlockFilterSet &LoopBlockSet); void buildLoopChains(MachineFunction &F, MachineLoop &L); + void rotateLoop(BlockChain &LoopChain, MachineBasicBlock *ExitingBB, + const BlockFilterSet &LoopBlockSet); void buildCFGChains(MachineFunction &F); - void AlignLoops(MachineFunction &F); public: static char ID; // Pass identification, replacement for typeid @@ -540,13 +543,74 @@ void MachineBlockPlacement::buildChain( /// \brief Find the best loop top block for layout. /// +/// Look for a block which is strictly better than the loop header for laying +/// out at the top of the loop. This looks for one and only one pattern: +/// a latch block with no conditional exit. This block will cause a conditional +/// jump around it or will be the bottom of the loop if we lay it out in place, +/// but if it it doesn't end up at the bottom of the loop for any reason, +/// rotation alone won't fix it. Because such a block will always result in an +/// unconditional jump (for the backedge) rotating it in front of the loop +/// header is always profitable. +MachineBasicBlock * +MachineBlockPlacement::findBestLoopTop(MachineLoop &L, + const BlockFilterSet &LoopBlockSet) { + // Check that the header hasn't been fused with a preheader block due to + // crazy branches. If it has, we need to start with the header at the top to + // prevent pulling the preheader into the loop body. + BlockChain &HeaderChain = *BlockToChain[L.getHeader()]; + if (!LoopBlockSet.count(*HeaderChain.begin())) + return L.getHeader(); + + DEBUG(dbgs() << "Finding best loop top for: " + << getBlockName(L.getHeader()) << "\n"); + + BlockFrequency BestPredFreq; + MachineBasicBlock *BestPred = 0; + for (MachineBasicBlock::pred_iterator PI = L.getHeader()->pred_begin(), + PE = L.getHeader()->pred_end(); + PI != PE; ++PI) { + MachineBasicBlock *Pred = *PI; + if (!LoopBlockSet.count(Pred)) + continue; + DEBUG(dbgs() << " header pred: " << getBlockName(Pred) << ", " + << Pred->succ_size() << " successors, " + << MBFI->getBlockFreq(Pred) << " freq\n"); + if (Pred->succ_size() > 1) + continue; + + BlockFrequency PredFreq = MBFI->getBlockFreq(Pred); + if (!BestPred || PredFreq > BestPredFreq || + (!(PredFreq < BestPredFreq) && + Pred->isLayoutSuccessor(L.getHeader()))) { + BestPred = Pred; + BestPredFreq = PredFreq; + } + } + + // If no direct predecessor is fine, just use the loop header. + if (!BestPred) + return L.getHeader(); + + // Walk backwards through any straight line of predecessors. + while (BestPred->pred_size() == 1 && + (*BestPred->pred_begin())->succ_size() == 1 && + *BestPred->pred_begin() != L.getHeader()) + BestPred = *BestPred->pred_begin(); + + DEBUG(dbgs() << " final top: " << getBlockName(BestPred) << "\n"); + return BestPred; +} + + +/// \brief Find the best loop exiting block for layout. +/// /// This routine implements the logic to analyze the loop looking for the best /// block to layout at the top of the loop. Typically this is done to maximize /// fallthrough opportunities. MachineBasicBlock * -MachineBlockPlacement::findBestLoopTop(MachineFunction &F, - MachineLoop &L, - const BlockFilterSet &LoopBlockSet) { +MachineBlockPlacement::findBestLoopExit(MachineFunction &F, + MachineLoop &L, + const BlockFilterSet &LoopBlockSet) { // We don't want to layout the loop linearly in all cases. If the loop header // is just a normal basic block in the loop, we want to look for what block // within the loop is the best one to layout at the top. However, if the loop @@ -557,11 +621,11 @@ MachineBlockPlacement::findBestLoopTop(MachineFunction &F, // header and only rotate if safe. BlockChain &HeaderChain = *BlockToChain[L.getHeader()]; if (!LoopBlockSet.count(*HeaderChain.begin())) - return L.getHeader(); + return 0; BlockFrequency BestExitEdgeFreq; + unsigned BestExitLoopDepth = 0; MachineBasicBlock *ExitingBB = 0; - MachineBasicBlock *LoopingBB = 0; // If there are exits to outer loops, loop rotation can severely limit // fallthrough opportunites unless it selects such an exit. Keep a set of // blocks where rotating to exit with that block will reach an outer loop. @@ -584,15 +648,10 @@ MachineBlockPlacement::findBestLoopTop(MachineFunction &F, // successor isn't found. MachineBasicBlock *OldExitingBB = ExitingBB; BlockFrequency OldBestExitEdgeFreq = BestExitEdgeFreq; - // We also compute and store the best looping successor for use in layout. - MachineBasicBlock *BestLoopSucc = 0; + bool HasLoopingSucc = false; // FIXME: Due to the performance of the probability and weight routines in - // the MBPI analysis, we use the internal weights. This is only valid - // because it is purely a ranking function, we don't care about anything - // but the relative values. - uint32_t BestLoopSuccWeight = 0; - // FIXME: We also manually compute the probabilities to avoid quadratic - // behavior. + // the MBPI analysis, we use the internal weights and manually compute the + // probabilities to avoid quadratic behavior. uint32_t WeightScale = 0; uint32_t SumWeight = MBPI->getSumForBlock(*I, WeightScale); for (MachineBasicBlock::succ_iterator SI = (*I)->succ_begin(), @@ -604,10 +663,8 @@ MachineBlockPlacement::findBestLoopTop(MachineFunction &F, continue; BlockChain &SuccChain = *BlockToChain[*SI]; // Don't split chains, either this chain or the successor's chain. - if (&Chain == &SuccChain || *SI != *SuccChain.begin()) { - DEBUG(dbgs() << " " << (LoopBlockSet.count(*SI) ? "looping: " - : "exiting: ") - << getBlockName(*I) << " -> " + if (&Chain == &SuccChain) { + DEBUG(dbgs() << " exiting: " << getBlockName(*I) << " -> " << getBlockName(*SI) << " (chain conflict)\n"); continue; } @@ -616,60 +673,103 @@ MachineBlockPlacement::findBestLoopTop(MachineFunction &F, if (LoopBlockSet.count(*SI)) { DEBUG(dbgs() << " looping: " << getBlockName(*I) << " -> " << getBlockName(*SI) << " (" << SuccWeight << ")\n"); - if (BestLoopSucc && BestLoopSuccWeight >= SuccWeight) - continue; - - BestLoopSucc = *SI; - BestLoopSuccWeight = SuccWeight; + HasLoopingSucc = true; continue; } + unsigned SuccLoopDepth = 0; + if (MachineLoop *ExitLoop = MLI->getLoopFor(*SI)) { + SuccLoopDepth = ExitLoop->getLoopDepth(); + if (ExitLoop->contains(&L)) + BlocksExitingToOuterLoop.insert(*I); + } + BranchProbability SuccProb(SuccWeight / WeightScale, SumWeight); BlockFrequency ExitEdgeFreq = MBFI->getBlockFreq(*I) * SuccProb; DEBUG(dbgs() << " exiting: " << getBlockName(*I) << " -> " - << getBlockName(*SI) << " (" << ExitEdgeFreq << ")\n"); + << getBlockName(*SI) << " [L:" << SuccLoopDepth + << "] (" << ExitEdgeFreq << ")\n"); // Note that we slightly bias this toward an existing layout successor to // retain incoming order in the absence of better information. // FIXME: Should we bias this more strongly? It's pretty weak. - if (!ExitingBB || ExitEdgeFreq > BestExitEdgeFreq || + if (!ExitingBB || BestExitLoopDepth < SuccLoopDepth || + ExitEdgeFreq > BestExitEdgeFreq || ((*I)->isLayoutSuccessor(*SI) && !(ExitEdgeFreq < BestExitEdgeFreq))) { BestExitEdgeFreq = ExitEdgeFreq; ExitingBB = *I; } - - if (MachineLoop *ExitLoop = MLI->getLoopFor(*SI)) - if (ExitLoop->contains(&L)) - BlocksExitingToOuterLoop.insert(*I); } // Restore the old exiting state, no viable looping successor was found. - if (!BestLoopSucc) { + if (!HasLoopingSucc) { ExitingBB = OldExitingBB; BestExitEdgeFreq = OldBestExitEdgeFreq; continue; } - - // If this was best exiting block thus far, also record the looping block. - if (ExitingBB == *I) - LoopingBB = BestLoopSucc; } - // Without a candidate exitting block or with only a single block in the + // Without a candidate exiting block or with only a single block in the // loop, just use the loop header to layout the loop. if (!ExitingBB || L.getNumBlocks() == 1) - return L.getHeader(); + return 0; // Also, if we have exit blocks which lead to outer loops but didn't select // one of them as the exiting block we are rotating toward, disable loop // rotation altogether. if (!BlocksExitingToOuterLoop.empty() && !BlocksExitingToOuterLoop.count(ExitingBB)) - return L.getHeader(); + return 0; - assert(LoopingBB && "All successors of a loop block are exit blocks!"); DEBUG(dbgs() << " Best exiting block: " << getBlockName(ExitingBB) << "\n"); - DEBUG(dbgs() << " Best top block: " << getBlockName(LoopingBB) << "\n"); - return LoopingBB; + return ExitingBB; +} + +/// \brief Attempt to rotate an exiting block to the bottom of the loop. +/// +/// Once we have built a chain, try to rotate it to line up the hot exit block +/// with fallthrough out of the loop if doing so doesn't introduce unnecessary +/// branches. For example, if the loop has fallthrough into its header and out +/// of its bottom already, don't rotate it. +void MachineBlockPlacement::rotateLoop(BlockChain &LoopChain, + MachineBasicBlock *ExitingBB, + const BlockFilterSet &LoopBlockSet) { + if (!ExitingBB) + return; + + MachineBasicBlock *Top = *LoopChain.begin(); + bool ViableTopFallthrough = false; + for (MachineBasicBlock::pred_iterator PI = Top->pred_begin(), + PE = Top->pred_end(); + PI != PE; ++PI) { + BlockChain *PredChain = BlockToChain[*PI]; + if (!LoopBlockSet.count(*PI) && + (!PredChain || *PI == *llvm::prior(PredChain->end()))) { + ViableTopFallthrough = true; + break; + } + } + + // If the header has viable fallthrough, check whether the current loop + // bottom is a viable exiting block. If so, bail out as rotating will + // introduce an unnecessary branch. + if (ViableTopFallthrough) { + MachineBasicBlock *Bottom = *llvm::prior(LoopChain.end()); + for (MachineBasicBlock::succ_iterator SI = Bottom->succ_begin(), + SE = Bottom->succ_end(); + SI != SE; ++SI) { + BlockChain *SuccChain = BlockToChain[*SI]; + if (!LoopBlockSet.count(*SI) && + (!SuccChain || *SI == *SuccChain->begin())) + return; + } + } + + BlockChain::iterator ExitIt = std::find(LoopChain.begin(), LoopChain.end(), + ExitingBB); + if (ExitIt == LoopChain.end()) + return; + + std::rotate(LoopChain.begin(), llvm::next(ExitIt), LoopChain.end()); } /// \brief Forms basic block chains from the natural loop structures. @@ -688,8 +788,20 @@ void MachineBlockPlacement::buildLoopChains(MachineFunction &F, SmallVector<MachineBasicBlock *, 16> BlockWorkList; BlockFilterSet LoopBlockSet(L.block_begin(), L.block_end()); - MachineBasicBlock *LayoutTop = findBestLoopTop(F, L, LoopBlockSet); - BlockChain &LoopChain = *BlockToChain[LayoutTop]; + // First check to see if there is an obviously preferable top block for the + // loop. This will default to the header, but may end up as one of the + // predecessors to the header if there is one which will result in strictly + // fewer branches in the loop body. + MachineBasicBlock *LoopTop = findBestLoopTop(L, LoopBlockSet); + + // If we selected just the header for the loop top, look for a potentially + // profitable exit block in the event that rotating the loop can eliminate + // branches by placing an exit edge at the bottom. + MachineBasicBlock *ExitingBB = 0; + if (LoopTop == L.getHeader()) + ExitingBB = findBestLoopExit(F, L, LoopBlockSet); + + BlockChain &LoopChain = *BlockToChain[LoopTop]; // FIXME: This is a really lame way of walking the chains in the loop: we // walk the blocks, and use a set to prevent visiting a particular chain @@ -721,7 +833,8 @@ void MachineBlockPlacement::buildLoopChains(MachineFunction &F, BlockWorkList.push_back(*Chain.begin()); } - buildChain(LayoutTop, LoopChain, BlockWorkList, &LoopBlockSet); + buildChain(LoopTop, LoopChain, BlockWorkList, &LoopBlockSet); + rotateLoop(LoopChain, ExitingBB, LoopBlockSet); DEBUG({ // Crash at the end so we get all of the debugging output first. @@ -733,7 +846,8 @@ void MachineBlockPlacement::buildLoopChains(MachineFunction &F, << " Chain header: " << getBlockName(*LoopChain.begin()) << "\n"; } for (BlockChain::iterator BCI = LoopChain.begin(), BCE = LoopChain.end(); - BCI != BCE; ++BCI) + BCI != BCE; ++BCI) { + dbgs() << " ... " << getBlockName(*BCI) << "\n"; if (!LoopBlockSet.erase(*BCI)) { // We don't mark the loop as bad here because there are real situations // where this can occur. For example, with an unanalyzable fallthrough @@ -743,6 +857,7 @@ void MachineBlockPlacement::buildLoopChains(MachineFunction &F, << " Chain header: " << getBlockName(*LoopChain.begin()) << "\n" << " Bad block: " << getBlockName(*BCI) << "\n"; } + } if (!LoopBlockSet.empty()) { BadLoop = true; @@ -882,28 +997,33 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) { MachineBasicBlock *TBB = 0, *FBB = 0; // For AnalyzeBranch. if (!TII->AnalyzeBranch(F.back(), TBB, FBB, Cond)) F.back().updateTerminator(); -} -/// \brief Recursive helper to align a loop and any nested loops. -static void AlignLoop(MachineFunction &F, MachineLoop *L, unsigned Align) { - // Recurse through nested loops. - for (MachineLoop::iterator I = L->begin(), E = L->end(); I != E; ++I) - AlignLoop(F, *I, Align); - - L->getTopBlock()->setAlignment(Align); -} - -/// \brief Align loop headers to target preferred alignments. -void MachineBlockPlacement::AlignLoops(MachineFunction &F) { + // Walk through the backedges of the function now that we have fully laid out + // the basic blocks and align the destination of each backedge. We don't rely + // on the loop info here so that we can align backedges in unnatural CFGs and + // backedges that were introduced purely because of the loop rotations done + // during this layout pass. + // FIXME: This isn't quite right, we shouldn't align backedges that result + // from blocks being sunken below the exit block for the function. if (F.getFunction()->hasFnAttr(Attribute::OptimizeForSize)) return; - unsigned Align = TLI->getPrefLoopAlignment(); if (!Align) return; // Don't care about loop alignment. - for (MachineLoopInfo::iterator I = MLI->begin(), E = MLI->end(); I != E; ++I) - AlignLoop(F, *I, Align); + SmallPtrSet<MachineBasicBlock *, 16> PreviousBlocks; + for (BlockChain::iterator BI = FunctionChain.begin(), + BE = FunctionChain.end(); + BI != BE; ++BI) { + PreviousBlocks.insert(*BI); + // Set alignment on the destination of all the back edges in the new + // ordering. + for (MachineBasicBlock::succ_iterator SI = (*BI)->succ_begin(), + SE = (*BI)->succ_end(); + SI != SE; ++SI) + if (PreviousBlocks.count(*SI)) + (*SI)->setAlignment(Align); + } } bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &F) { @@ -919,7 +1039,6 @@ bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &F) { assert(BlockToChain.empty()); buildCFGChains(F); - AlignLoops(F); BlockToChain.clear(); ChainAllocator.DestroyAll(); diff --git a/lib/CodeGen/Passes.cpp b/lib/CodeGen/Passes.cpp index 53d1fcf7377a..490547bbb873 100644 --- a/lib/CodeGen/Passes.cpp +++ b/lib/CodeGen/Passes.cpp @@ -37,8 +37,9 @@ static cl::opt<bool> DisableTailDuplicate("disable-tail-duplicate", cl::Hidden, cl::desc("Disable tail duplication")); static cl::opt<bool> DisableEarlyTailDup("disable-early-taildup", cl::Hidden, cl::desc("Disable pre-register allocation tail duplication")); -static cl::opt<bool> EnableBlockPlacement("enable-block-placement", - cl::Hidden, cl::desc("Enable probability-driven block placement")); +static cl::opt<bool> DisableBlockPlacement("disable-block-placement", + cl::Hidden, cl::desc("Disable the probability-driven block placement, and " + "re-enable the old code placement pass")); static cl::opt<bool> EnableBlockPlacementStats("enable-block-placement-stats", cl::Hidden, cl::desc("Collect probability-driven block placement stats")); static cl::opt<bool> DisableCodePlace("disable-code-place", cl::Hidden, @@ -206,7 +207,7 @@ TargetPassConfig::~TargetPassConfig() { // Out of line constructor provides default values for pass options and // registers all common codegen passes. TargetPassConfig::TargetPassConfig(TargetMachine *tm, PassManagerBase &pm) - : ImmutablePass(ID), TM(tm), PM(pm), Impl(0), Initialized(false), + : ImmutablePass(ID), TM(tm), PM(&pm), Impl(0), Initialized(false), DisableVerify(false), EnableTailMerge(true) { @@ -233,7 +234,7 @@ TargetPassConfig *LLVMTargetMachine::createPassConfig(PassManagerBase &PM) { } TargetPassConfig::TargetPassConfig() - : ImmutablePass(ID), PM(*(PassManagerBase*)0) { + : ImmutablePass(ID), PM(0) { llvm_unreachable("TargetPassConfig should not be constructed on-the-fly"); } @@ -268,16 +269,16 @@ AnalysisID TargetPassConfig::addPass(char &ID) { Pass *P = Pass::createPass(FinalID); if (!P) llvm_unreachable("Pass ID not registered"); - PM.add(P); + PM->add(P); return FinalID; } void TargetPassConfig::printAndVerify(const char *Banner) const { if (TM->shouldPrintMachineCode()) - PM.add(createMachineFunctionPrinterPass(dbgs(), Banner)); + PM->add(createMachineFunctionPrinterPass(dbgs(), Banner)); if (VerifyMachineCode) - PM.add(createMachineVerifierPass(Banner)); + PM->add(createMachineVerifierPass(Banner)); } /// Add common target configurable passes that perform LLVM IR to IR transforms @@ -287,46 +288,46 @@ void TargetPassConfig::addIRPasses() { // Add TypeBasedAliasAnalysis before BasicAliasAnalysis so that // BasicAliasAnalysis wins if they disagree. This is intended to help // support "obvious" type-punning idioms. - PM.add(createTypeBasedAliasAnalysisPass()); - PM.add(createBasicAliasAnalysisPass()); + PM->add(createTypeBasedAliasAnalysisPass()); + PM->add(createBasicAliasAnalysisPass()); // Before running any passes, run the verifier to determine if the input // coming from the front-end and/or optimizer is valid. if (!DisableVerify) - PM.add(createVerifierPass()); + PM->add(createVerifierPass()); // Run loop strength reduction before anything else. if (getOptLevel() != CodeGenOpt::None && !DisableLSR) { - PM.add(createLoopStrengthReducePass(getTargetLowering())); + PM->add(createLoopStrengthReducePass(getTargetLowering())); if (PrintLSR) - PM.add(createPrintFunctionPass("\n\n*** Code after LSR ***\n", &dbgs())); + PM->add(createPrintFunctionPass("\n\n*** Code after LSR ***\n", &dbgs())); } - PM.add(createGCLoweringPass()); + PM->add(createGCLoweringPass()); // Make sure that no unreachable blocks are instruction selected. - PM.add(createUnreachableBlockEliminationPass()); + PM->add(createUnreachableBlockEliminationPass()); } /// Add common passes that perform LLVM IR to IR transforms in preparation for /// instruction selection. void TargetPassConfig::addISelPrepare() { if (getOptLevel() != CodeGenOpt::None && !DisableCGP) - PM.add(createCodeGenPreparePass(getTargetLowering())); + PM->add(createCodeGenPreparePass(getTargetLowering())); - PM.add(createStackProtectorPass(getTargetLowering())); + PM->add(createStackProtectorPass(getTargetLowering())); addPreISel(); if (PrintISelInput) - PM.add(createPrintFunctionPass("\n\n" - "*** Final LLVM Code input to ISel ***\n", - &dbgs())); + PM->add(createPrintFunctionPass("\n\n" + "*** Final LLVM Code input to ISel ***\n", + &dbgs())); // All passes which modify the LLVM IR are now complete; run the verifier // to ensure that the IR is valid. if (!DisableVerify) - PM.add(createVerifierPass()); + PM->add(createVerifierPass()); } /// Add the complete set of target-independent postISel code generator passes. @@ -404,7 +405,7 @@ void TargetPassConfig::addMachinePasses() { // GC addPass(GCMachineCodeAnalysisID); if (PrintGCInfo) - PM.add(createGCInfoPrinter(dbgs())); + PM->add(createGCInfoPrinter(dbgs())); // Basic block placement. if (getOptLevel() != CodeGenOpt::None) @@ -521,7 +522,7 @@ void TargetPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) { addPass(PHIEliminationID); addPass(TwoAddressInstructionPassID); - PM.add(RegAllocPass); + PM->add(RegAllocPass); printAndVerify("After Register Allocation"); } @@ -563,7 +564,7 @@ void TargetPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) { printAndVerify("After Machine Scheduling"); // Add the selected register allocation pass. - PM.add(RegAllocPass); + PM->add(RegAllocPass); printAndVerify("After Register Allocation"); // FinalizeRegAlloc is convenient until MachineInstrBundles is more mature, @@ -610,10 +611,10 @@ void TargetPassConfig::addMachineLateOptimization() { /// Add standard basic block placement passes. void TargetPassConfig::addBlockPlacement() { AnalysisID ID = &NoPassID; - if (EnableBlockPlacement) { - // MachineBlockPlacement is an experimental pass which is disabled by - // default currently. Eventually it should subsume CodePlacementOpt, so - // when enabled, the other is disabled. + if (!DisableBlockPlacement) { + // MachineBlockPlacement is a new pass which subsumes the functionality of + // CodPlacementOpt. The old code placement pass can be restored by + // disabling block placement, but eventually it will be removed. ID = addPass(MachineBlockPlacementID); } else { ID = addPass(CodePlacementOptID); diff --git a/lib/CodeGen/ScheduleDAGInstrs.cpp b/lib/CodeGen/ScheduleDAGInstrs.cpp index 6be1ab7f5b08..d46eb896e549 100644 --- a/lib/CodeGen/ScheduleDAGInstrs.cpp +++ b/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -39,8 +39,8 @@ ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf, LiveIntervals *lis) : ScheduleDAG(mf), MLI(mli), MDT(mdt), MFI(mf.getFrameInfo()), InstrItins(mf.getTarget().getInstrItineraryData()), LIS(lis), - IsPostRA(IsPostRAFlag), UnitLatencies(false), LoopRegs(MLI, MDT), - FirstDbgValue(0) { + IsPostRA(IsPostRAFlag), UnitLatencies(false), CanHandleTerminators(false), + LoopRegs(MLI, MDT), FirstDbgValue(0) { assert((IsPostRA || LIS) && "PreRA scheduling requires LiveIntervals"); DbgValues.clear(); assert(!(IsPostRA && MRI.getNumVirtRegs()) && @@ -554,7 +554,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA) { continue; } - assert(!MI->isTerminator() && !MI->isLabel() && + assert((!MI->isTerminator() || CanHandleTerminators) && !MI->isLabel() && "Cannot schedule terminators or labels!"); SUnit *SU = MISUnitMap[MI]; diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index d1b998f8d840..0914c6627660 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -1080,6 +1080,7 @@ void DAGCombiner::Run(CombineLevel AtLevel) { // If the root changed (e.g. it was a dead load, update the root). DAG.setRoot(Dummy.getValue()); + DAG.RemoveDeadNodes(); } SDValue DAGCombiner::visit(SDNode *N) { diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp index 3ae8345bd198..9fe4480d113a 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -417,7 +417,8 @@ SDValue VectorLegalizer::ExpandVSELECT(SDValue Op) { Op1 = DAG.getNode(ISD::AND, DL, VT, Op1, Mask); Op2 = DAG.getNode(ISD::AND, DL, VT, Op2, NotMask); - return DAG.getNode(ISD::OR, DL, VT, Op1, Op2); + SDValue Val = DAG.getNode(ISD::OR, DL, VT, Op1, Op2); + return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Val); } SDValue VectorLegalizer::ExpandUINT_TO_FLOAT(SDValue Op) { diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp index 69dd813b24e0..8ec1ae8620f7 100644 --- a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp +++ b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp @@ -138,9 +138,11 @@ static void AddGlue(SDNode *N, SDValue Glue, bool AddGlue, SelectionDAG *DAG) { // Don't add glue from a node to itself. if (GlueDestNode == N) return; - // Don't add glue to something which already has glue. - if (N->getValueType(N->getNumValues() - 1) == MVT::Glue) return; - + // Don't add glue to something that already has it, either as a use or value. + if (N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue || + N->getValueType(N->getNumValues() - 1) == MVT::Glue) { + return; + } for (unsigned I = 0, E = N->getNumValues(); I != E; ++I) VTs.push_back(N->getValueType(I)); diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 94cb95804f69..f1e879be9567 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -5050,7 +5050,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) { } case Intrinsic::gcroot: if (GFI) { - const Value *Alloca = I.getArgOperand(0); + const Value *Alloca = I.getArgOperand(0)->stripPointerCasts(); const Constant *TypeMap = cast<Constant>(I.getArgOperand(1)); FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode()); diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 09a2b1f3d7a5..e341e15e41ad 100644 --- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -1367,8 +1367,9 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op, // bits on that side are also known to be set on the other side, turn this // into an AND, as we know the bits will be cleared. // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 - if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known - if ((KnownOne & KnownOne2) == KnownOne) { + // NB: it is okay if more bits are known than are requested + if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known on one side + if (KnownOne == KnownOne2) { // set bits are the same on both sides EVT VT = Op.getValueType(); SDValue ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, VT); return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT, diff --git a/lib/CodeGen/SlotIndexes.cpp b/lib/CodeGen/SlotIndexes.cpp index c5bd3a3cae63..26cf25944162 100644 --- a/lib/CodeGen/SlotIndexes.cpp +++ b/lib/CodeGen/SlotIndexes.cpp @@ -34,7 +34,8 @@ void SlotIndexes::releaseMemory() { mi2iMap.clear(); MBBRanges.clear(); idx2MBBMap.clear(); - clearList(); + indexList.clear(); + ileAllocator.Reset(); } bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) { @@ -45,17 +46,15 @@ bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) { // iterator in lock-step (though skipping it over indexes which have // null pointers in the instruction field). // At each iteration assert that the instruction pointed to in the index - // is the same one pointed to by the MI iterator. This + // is the same one pointed to by the MI iterator. This // FIXME: This can be simplified. The mi2iMap_, Idx2MBBMap, etc. should // only need to be set up once after the first numbering is computed. mf = &fn; - initList(); // Check that the list contains only the sentinal. - assert(indexListHead->getNext() == 0 && - "Index list non-empty at initial numbering?"); + assert(indexList.empty() && "Index list non-empty at initial numbering?"); assert(idx2MBBMap.empty() && "Index -> MBB mapping non-empty at initial numbering?"); assert(MBBRanges.empty() && @@ -68,7 +67,7 @@ bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) { MBBRanges.resize(mf->getNumBlockIDs()); idx2MBBMap.reserve(mf->size()); - push_back(createEntry(0, index)); + indexList.push_back(createEntry(0, index)); // Iterate over the function. for (MachineFunction::iterator mbbItr = mf->begin(), mbbEnd = mf->end(); @@ -76,7 +75,7 @@ bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) { MachineBasicBlock *mbb = &*mbbItr; // Insert an index for the MBB start. - SlotIndex blockStartIndex(back(), SlotIndex::Slot_Block); + SlotIndex blockStartIndex(&indexList.back(), SlotIndex::Slot_Block); for (MachineBasicBlock::iterator miItr = mbb->begin(), miEnd = mbb->end(); miItr != miEnd; ++miItr) { @@ -85,20 +84,20 @@ bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) { continue; // Insert a store index for the instr. - push_back(createEntry(mi, index += SlotIndex::InstrDist)); + indexList.push_back(createEntry(mi, index += SlotIndex::InstrDist)); // Save this base index in the maps. - mi2iMap.insert(std::make_pair(mi, SlotIndex(back(), + mi2iMap.insert(std::make_pair(mi, SlotIndex(&indexList.back(), SlotIndex::Slot_Block))); - + ++functionSize; } // We insert one blank instructions between basic blocks. - push_back(createEntry(0, index += SlotIndex::InstrDist)); + indexList.push_back(createEntry(0, index += SlotIndex::InstrDist)); MBBRanges[mbb->getNumber()].first = blockStartIndex; - MBBRanges[mbb->getNumber()].second = SlotIndex(back(), + MBBRanges[mbb->getNumber()].second = SlotIndex(&indexList.back(), SlotIndex::Slot_Block); idx2MBBMap.push_back(IdxMBBPair(blockStartIndex, mbb)); } @@ -119,38 +118,37 @@ void SlotIndexes::renumberIndexes() { unsigned index = 0; - for (IndexListEntry *curEntry = front(); curEntry != getTail(); - curEntry = curEntry->getNext()) { - curEntry->setIndex(index); + for (IndexList::iterator I = indexList.begin(), E = indexList.end(); + I != E; ++I) { + I->setIndex(index); index += SlotIndex::InstrDist; } } -// Renumber indexes locally after curEntry was inserted, but failed to get a new +// Renumber indexes locally after curItr was inserted, but failed to get a new // index. -void SlotIndexes::renumberIndexes(IndexListEntry *curEntry) { +void SlotIndexes::renumberIndexes(IndexList::iterator curItr) { // Number indexes with half the default spacing so we can catch up quickly. const unsigned Space = SlotIndex::InstrDist/2; assert((Space & 3) == 0 && "InstrDist must be a multiple of 2*NUM"); - IndexListEntry *start = curEntry->getPrev(); - unsigned index = start->getIndex(); - IndexListEntry *tail = getTail(); + IndexList::iterator startItr = prior(curItr); + unsigned index = startItr->getIndex(); do { - curEntry->setIndex(index += Space); - curEntry = curEntry->getNext(); + curItr->setIndex(index += Space); + ++curItr; // If the next index is bigger, we have caught up. - } while (curEntry != tail && curEntry->getIndex() <= index); + } while (curItr != indexList.end() && curItr->getIndex() <= index); - DEBUG(dbgs() << "\n*** Renumbered SlotIndexes " << start->getIndex() << '-' + DEBUG(dbgs() << "\n*** Renumbered SlotIndexes " << startItr->getIndex() << '-' << index << " ***\n"); ++NumLocalRenum; } void SlotIndexes::dump() const { - for (const IndexListEntry *itr = front(); itr != getTail(); - itr = itr->getNext()) { + for (IndexList::const_iterator itr = indexList.begin(); + itr != indexList.end(); ++itr) { dbgs() << itr->getIndex() << " "; if (itr->getInstr() != 0) { @@ -168,7 +166,7 @@ void SlotIndexes::dump() const { // Print a SlotIndex to a raw_ostream. void SlotIndex::print(raw_ostream &os) const { if (isValid()) - os << entry().getIndex() << "Berd"[getSlot()]; + os << listEntry()->getIndex() << "Berd"[getSlot()]; else os << "invalid"; } diff --git a/lib/ExecutionEngine/RuntimeDyld/CMakeLists.txt b/lib/ExecutionEngine/RuntimeDyld/CMakeLists.txt index 002e63cd3b6b..cbf7cf14d491 100644 --- a/lib/ExecutionEngine/RuntimeDyld/CMakeLists.txt +++ b/lib/ExecutionEngine/RuntimeDyld/CMakeLists.txt @@ -1,5 +1,6 @@ add_llvm_library(LLVMRuntimeDyld + GDBRegistrar.cpp RuntimeDyld.cpp - RuntimeDyldMachO.cpp RuntimeDyldELF.cpp + RuntimeDyldMachO.cpp ) diff --git a/lib/ExecutionEngine/RuntimeDyld/GDBRegistrar.cpp b/lib/ExecutionEngine/RuntimeDyld/GDBRegistrar.cpp new file mode 100644 index 000000000000..8b5010142241 --- /dev/null +++ b/lib/ExecutionEngine/RuntimeDyld/GDBRegistrar.cpp @@ -0,0 +1,214 @@ +//===-- GDBRegistrar.cpp - Registers objects with GDB ---------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "JITRegistrar.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/Support/MutexGuard.h" +#include "llvm/Support/Mutex.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/Compiler.h" + +using namespace llvm; + +// This must be kept in sync with gdb/gdb/jit.h . +extern "C" { + + typedef enum { + JIT_NOACTION = 0, + JIT_REGISTER_FN, + JIT_UNREGISTER_FN + } jit_actions_t; + + struct jit_code_entry { + struct jit_code_entry *next_entry; + struct jit_code_entry *prev_entry; + const char *symfile_addr; + uint64_t symfile_size; + }; + + struct jit_descriptor { + uint32_t version; + // This should be jit_actions_t, but we want to be specific about the + // bit-width. + uint32_t action_flag; + struct jit_code_entry *relevant_entry; + struct jit_code_entry *first_entry; + }; + + // We put information about the JITed function in this global, which the + // debugger reads. Make sure to specify the version statically, because the + // debugger checks the version before we can set it during runtime. + static struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 }; + + // Debuggers puts a breakpoint in this function. + LLVM_ATTRIBUTE_NOINLINE void __jit_debug_register_code() { } + +} + +namespace { + +// Buffer for an in-memory object file in executable memory +typedef llvm::DenseMap< const char*, + std::pair<std::size_t, jit_code_entry*> > + RegisteredObjectBufferMap; + +/// Global access point for the JIT debugging interface designed for use with a +/// singleton toolbox. Handles thread-safe registration and deregistration of +/// object files that are in executable memory managed by the client of this +/// class. +class GDBJITRegistrar : public JITRegistrar { + /// A map of in-memory object files that have been registered with the + /// JIT interface. + RegisteredObjectBufferMap ObjectBufferMap; + +public: + /// Instantiates the JIT service. + GDBJITRegistrar() : ObjectBufferMap() {} + + /// Unregisters each object that was previously registered and releases all + /// internal resources. + virtual ~GDBJITRegistrar(); + + /// Creates an entry in the JIT registry for the buffer @p Object, + /// which must contain an object file in executable memory with any + /// debug information for the debugger. + void registerObject(const MemoryBuffer &Object); + + /// Removes the internal registration of @p Object, and + /// frees associated resources. + /// Returns true if @p Object was found in ObjectBufferMap. + bool deregisterObject(const MemoryBuffer &Object); + +private: + /// Deregister the debug info for the given object file from the debugger + /// and delete any temporary copies. This private method does not remove + /// the function from Map so that it can be called while iterating over Map. + void deregisterObjectInternal(RegisteredObjectBufferMap::iterator I); +}; + +/// Lock used to serialize all jit registration events, since they +/// modify global variables. +llvm::sys::Mutex JITDebugLock; + +/// Acquire the lock and do the registration. +void NotifyDebugger(jit_code_entry* JITCodeEntry) { + llvm::MutexGuard locked(JITDebugLock); + __jit_debug_descriptor.action_flag = JIT_REGISTER_FN; + + // Insert this entry at the head of the list. + JITCodeEntry->prev_entry = NULL; + jit_code_entry* NextEntry = __jit_debug_descriptor.first_entry; + JITCodeEntry->next_entry = NextEntry; + if (NextEntry != NULL) { + NextEntry->prev_entry = JITCodeEntry; + } + __jit_debug_descriptor.first_entry = JITCodeEntry; + __jit_debug_descriptor.relevant_entry = JITCodeEntry; + __jit_debug_register_code(); +} + +GDBJITRegistrar::~GDBJITRegistrar() { + // Free all registered object files. + for (RegisteredObjectBufferMap::iterator I = ObjectBufferMap.begin(), E = ObjectBufferMap.end(); + I != E; ++I) { + // Call the private method that doesn't update the map so our iterator + // doesn't break. + deregisterObjectInternal(I); + } + ObjectBufferMap.clear(); +} + +void GDBJITRegistrar::registerObject(const MemoryBuffer &Object) { + + const char *Buffer = Object.getBufferStart(); + size_t Size = Object.getBufferSize(); + + assert(Buffer && "Attempt to register a null object with a debugger."); + assert(ObjectBufferMap.find(Buffer) == ObjectBufferMap.end() && + "Second attempt to perform debug registration."); + jit_code_entry* JITCodeEntry = new jit_code_entry(); + + if (JITCodeEntry == 0) { + llvm::report_fatal_error( + "Allocation failed when registering a JIT entry!\n"); + } + else { + JITCodeEntry->symfile_addr = Buffer; + JITCodeEntry->symfile_size = Size; + + ObjectBufferMap[Buffer] = std::make_pair(Size, JITCodeEntry); + NotifyDebugger(JITCodeEntry); + } +} + +bool GDBJITRegistrar::deregisterObject(const MemoryBuffer& Object) { + const char *Buffer = Object.getBufferStart(); + RegisteredObjectBufferMap::iterator I = ObjectBufferMap.find(Buffer); + + if (I != ObjectBufferMap.end()) { + deregisterObjectInternal(I); + ObjectBufferMap.erase(I); + return true; + } + return false; +} + +void GDBJITRegistrar::deregisterObjectInternal( + RegisteredObjectBufferMap::iterator I) { + + jit_code_entry*& JITCodeEntry = I->second.second; + + // Acquire the lock and do the unregistration. + { + llvm::MutexGuard locked(JITDebugLock); + __jit_debug_descriptor.action_flag = JIT_UNREGISTER_FN; + + // Remove the jit_code_entry from the linked list. + jit_code_entry* PrevEntry = JITCodeEntry->prev_entry; + jit_code_entry* NextEntry = JITCodeEntry->next_entry; + + if (NextEntry) { + NextEntry->prev_entry = PrevEntry; + } + if (PrevEntry) { + PrevEntry->next_entry = NextEntry; + } + else { + assert(__jit_debug_descriptor.first_entry == JITCodeEntry); + __jit_debug_descriptor.first_entry = NextEntry; + } + + // Tell the debugger which entry we removed, and unregister the code. + __jit_debug_descriptor.relevant_entry = JITCodeEntry; + __jit_debug_register_code(); + } + + delete JITCodeEntry; + JITCodeEntry = NULL; +} + +} // end namespace + +namespace llvm { + +JITRegistrar& JITRegistrar::getGDBRegistrar() { + static GDBJITRegistrar* sRegistrar = NULL; + if (sRegistrar == NULL) { + // The mutex is here so that it won't slow down access once the registrar + // is instantiated + llvm::MutexGuard locked(JITDebugLock); + // Check again to be sure another thread didn't create this while we waited + if (sRegistrar == NULL) { + sRegistrar = new GDBJITRegistrar; + } + } + return *sRegistrar; +} + +} // namespace llvm diff --git a/lib/ExecutionEngine/RuntimeDyld/JITRegistrar.h b/lib/ExecutionEngine/RuntimeDyld/JITRegistrar.h new file mode 100644 index 000000000000..f964bc61829b --- /dev/null +++ b/lib/ExecutionEngine/RuntimeDyld/JITRegistrar.h @@ -0,0 +1,43 @@ +//===-- JITRegistrar.h - Registers objects with a debugger ----------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_EXECUTION_ENGINE_JIT_REGISTRAR_H +#define LLVM_EXECUTION_ENGINE_JIT_REGISTRAR_H + +#include "llvm/Support/MemoryBuffer.h" + +namespace llvm { + +/// Global access point for the JIT debugging interface. +class JITRegistrar { +public: + /// Instantiates the JIT service. + JITRegistrar() {} + + /// Unregisters each object that was previously registered and releases all + /// internal resources. + virtual ~JITRegistrar() {} + + /// Creates an entry in the JIT registry for the buffer @p Object, + /// which must contain an object file in executable memory with any + /// debug information for the debugger. + virtual void registerObject(const MemoryBuffer &Object) = 0; + + /// Removes the internal registration of @p Object, and + /// frees associated resources. + /// Returns true if @p Object was previously registered. + virtual bool deregisterObject(const MemoryBuffer &Object) = 0; + + /// Returns a reference to a GDB JIT registrar singleton + static JITRegistrar& getGDBRegistrar(); +}; + +} // end namespace llvm + +#endif // LLVM_EXECUTION_ENGINE_JIT_REGISTRAR_H diff --git a/lib/ExecutionEngine/RuntimeDyld/ObjectImage.h b/lib/ExecutionEngine/RuntimeDyld/ObjectImage.h new file mode 100644 index 000000000000..8206eadbd252 --- /dev/null +++ b/lib/ExecutionEngine/RuntimeDyld/ObjectImage.h @@ -0,0 +1,59 @@ +//===---- ObjectImage.h - Format independent executuable object image -----===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares a file format independent ObjectImage class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_RUNTIMEDYLD_OBJECT_IMAGE_H +#define LLVM_RUNTIMEDYLD_OBJECT_IMAGE_H + +#include "llvm/Object/ObjectFile.h" + +namespace llvm { + +class ObjectImage { + ObjectImage(); // = delete + ObjectImage(const ObjectImage &other); // = delete +protected: + object::ObjectFile *ObjFile; + +public: + ObjectImage(object::ObjectFile *Obj) { ObjFile = Obj; } + virtual ~ObjectImage() {} + + virtual object::symbol_iterator begin_symbols() const + { return ObjFile->begin_symbols(); } + virtual object::symbol_iterator end_symbols() const + { return ObjFile->end_symbols(); } + + virtual object::section_iterator begin_sections() const + { return ObjFile->begin_sections(); } + virtual object::section_iterator end_sections() const + { return ObjFile->end_sections(); } + + virtual /* Triple::ArchType */ unsigned getArch() const + { return ObjFile->getArch(); } + + // Subclasses can override these methods to update the image with loaded + // addresses for sections and common symbols + virtual void updateSectionAddress(const object::SectionRef &Sec, + uint64_t Addr) {} + virtual void updateSymbolAddress(const object::SymbolRef &Sym, uint64_t Addr) + {} + + // Subclasses can override this method to provide JIT debugging support + virtual void registerWithDebugger() {} + virtual void deregisterWithDebugger() {} +}; + +} // end namespace llvm + +#endif // LLVM_RUNTIMEDYLD_OBJECT_IMAGE_H + diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp index 63cec1aca3b1..1b1840ae0661 100644 --- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp +++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp @@ -59,11 +59,17 @@ void RuntimeDyldImpl::mapSectionAddress(void *LocalAddress, llvm_unreachable("Attempting to remap address of unknown section!"); } +// Subclasses can implement this method to create specialized image instances +// The caller owns the the pointer that is returned. +ObjectImage *RuntimeDyldImpl::createObjectImage(const MemoryBuffer *InputBuffer) { + ObjectFile *ObjFile = ObjectFile::createObjectFile(const_cast<MemoryBuffer*> + (InputBuffer)); + ObjectImage *Obj = new ObjectImage(ObjFile); + return Obj; +} + bool RuntimeDyldImpl::loadObject(const MemoryBuffer *InputBuffer) { - // FIXME: ObjectFile don't modify MemoryBuffer. - // It should use const MemoryBuffer as parameter. - OwningPtr<ObjectFile> obj(ObjectFile::createObjectFile( - const_cast<MemoryBuffer*>(InputBuffer))); + OwningPtr<ObjectImage> obj(createObjectImage(InputBuffer)); if (!obj) report_fatal_error("Unable to create object image from memory buffer!"); @@ -110,7 +116,8 @@ bool RuntimeDyldImpl::loadObject(const MemoryBuffer *InputBuffer) { (uintptr_t)FileOffset; uintptr_t SectOffset = (uintptr_t)(SymPtr - (const uint8_t*)sData.begin()); unsigned SectionID = - findOrEmitSection(*si, + findOrEmitSection(*obj, + *si, SymType == object::SymbolRef::ST_Function, LocalSections); bool isGlobal = flags & SymbolRef::SF_Global; @@ -128,7 +135,7 @@ bool RuntimeDyldImpl::loadObject(const MemoryBuffer *InputBuffer) { // Allocate common symbols if (CommonSize != 0) - emitCommonSymbols(CommonSymbols, CommonSize, LocalSymbols); + emitCommonSymbols(*obj, CommonSymbols, CommonSize, LocalSymbols); // Parse and proccess relocations DEBUG(dbgs() << "Parse relocations:\n"); @@ -145,7 +152,7 @@ bool RuntimeDyldImpl::loadObject(const MemoryBuffer *InputBuffer) { // If it's first relocation in this section, find its SectionID if (isFirstRelocation) { - SectionID = findOrEmitSection(*si, true, LocalSections); + SectionID = findOrEmitSection(*obj, *si, true, LocalSections); DEBUG(dbgs() << "\tSectionID: " << SectionID << "\n"); isFirstRelocation = false; } @@ -164,10 +171,14 @@ bool RuntimeDyldImpl::loadObject(const MemoryBuffer *InputBuffer) { processRelocationRef(RI, *obj, LocalSections, LocalSymbols, Stubs); } } + + handleObjectLoaded(obj.take()); + return false; } -unsigned RuntimeDyldImpl::emitCommonSymbols(const CommonSymbolMap &Map, +unsigned RuntimeDyldImpl::emitCommonSymbols(ObjectImage &Obj, + const CommonSymbolMap &Map, uint64_t TotalSize, LocalSymbolMap &LocalSymbols) { // Allocate memory for the section @@ -191,6 +202,7 @@ unsigned RuntimeDyldImpl::emitCommonSymbols(const CommonSymbolMap &Map, uint64_t Size = it->second; StringRef Name; it->first.getName(Name); + Obj.updateSymbolAddress(it->first, (uint64_t)Addr); LocalSymbols[Name.data()] = SymbolLoc(SectionID, Offset); Offset += Size; Addr += Size; @@ -199,7 +211,8 @@ unsigned RuntimeDyldImpl::emitCommonSymbols(const CommonSymbolMap &Map, return SectionID; } -unsigned RuntimeDyldImpl::emitSection(const SectionRef &Section, +unsigned RuntimeDyldImpl::emitSection(ObjectImage &Obj, + const SectionRef &Section, bool IsCode) { unsigned StubBufSize = 0, @@ -257,6 +270,7 @@ unsigned RuntimeDyldImpl::emitSection(const SectionRef &Section, << " StubBufSize: " << StubBufSize << " Allocate: " << Allocate << "\n"); + Obj.updateSectionAddress(Section, (uint64_t)Addr); } else { // Even if we didn't load the section, we need to record an entry for it @@ -277,7 +291,8 @@ unsigned RuntimeDyldImpl::emitSection(const SectionRef &Section, return SectionID; } -unsigned RuntimeDyldImpl::findOrEmitSection(const SectionRef &Section, +unsigned RuntimeDyldImpl::findOrEmitSection(ObjectImage &Obj, + const SectionRef &Section, bool IsCode, ObjSectionToIDMap &LocalSections) { @@ -286,7 +301,7 @@ unsigned RuntimeDyldImpl::findOrEmitSection(const SectionRef &Section, if (i != LocalSections.end()) SectionID = i->second; else { - SectionID = emitSection(Section, IsCode); + SectionID = emitSection(Obj, Section, IsCode); LocalSections[Section] = SectionID; } return SectionID; diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp index 57fefee5dedc..db6da8c8ef25 100644 --- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp +++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp @@ -20,11 +20,176 @@ #include "llvm/Object/ObjectFile.h" #include "llvm/Support/ELF.h" #include "llvm/ADT/Triple.h" +#include "llvm/Object/ELF.h" +#include "JITRegistrar.h" using namespace llvm; using namespace llvm::object; +namespace { + +template<support::endianness target_endianness, bool is64Bits> +class DyldELFObject : public ELFObjectFile<target_endianness, is64Bits> { + LLVM_ELF_IMPORT_TYPES(target_endianness, is64Bits) + + typedef Elf_Shdr_Impl<target_endianness, is64Bits> Elf_Shdr; + typedef Elf_Sym_Impl<target_endianness, is64Bits> Elf_Sym; + typedef Elf_Rel_Impl<target_endianness, is64Bits, false> Elf_Rel; + typedef Elf_Rel_Impl<target_endianness, is64Bits, true> Elf_Rela; + + typedef typename ELFObjectFile<target_endianness, is64Bits>:: + Elf_Ehdr Elf_Ehdr; + + typedef typename ELFDataTypeTypedefHelper< + target_endianness, is64Bits>::value_type addr_type; + +protected: + // This duplicates the 'Data' member in the 'Binary' base class + // but it is necessary to workaround a bug in gcc 4.2 + MemoryBuffer *InputData; + +public: + DyldELFObject(MemoryBuffer *Object, error_code &ec); + + void updateSectionAddress(const SectionRef &Sec, uint64_t Addr); + void updateSymbolAddress(const SymbolRef &Sym, uint64_t Addr); + + const MemoryBuffer& getBuffer() const { return *InputData; } + + // Methods for type inquiry through isa, cast, and dyn_cast + static inline bool classof(const Binary *v) { + return (isa<ELFObjectFile<target_endianness, is64Bits> >(v) + && classof(cast<ELFObjectFile<target_endianness, is64Bits> >(v))); + } + static inline bool classof( + const ELFObjectFile<target_endianness, is64Bits> *v) { + return v->isDyldType(); + } + static inline bool classof(const DyldELFObject *v) { + return true; + } +}; + +template<support::endianness target_endianness, bool is64Bits> +class ELFObjectImage : public ObjectImage { + protected: + DyldELFObject<target_endianness, is64Bits> *DyldObj; + bool Registered; + + public: + ELFObjectImage(DyldELFObject<target_endianness, is64Bits> *Obj) + : ObjectImage(Obj), + DyldObj(Obj), + Registered(false) {} + + virtual ~ELFObjectImage() { + if (Registered) + deregisterWithDebugger(); + } + + // Subclasses can override these methods to update the image with loaded + // addresses for sections and common symbols + virtual void updateSectionAddress(const SectionRef &Sec, uint64_t Addr) + { + DyldObj->updateSectionAddress(Sec, Addr); + } + + virtual void updateSymbolAddress(const SymbolRef &Sym, uint64_t Addr) + { + DyldObj->updateSymbolAddress(Sym, Addr); + } + + virtual void registerWithDebugger() + { + JITRegistrar::getGDBRegistrar().registerObject(DyldObj->getBuffer()); + Registered = true; + } + virtual void deregisterWithDebugger() + { + JITRegistrar::getGDBRegistrar().deregisterObject(DyldObj->getBuffer()); + } +}; + +template<support::endianness target_endianness, bool is64Bits> +DyldELFObject<target_endianness, is64Bits>::DyldELFObject(MemoryBuffer *Object, + error_code &ec) + : ELFObjectFile<target_endianness, is64Bits>(Object, ec), + InputData(Object) { + this->isDyldELFObject = true; +} + +template<support::endianness target_endianness, bool is64Bits> +void DyldELFObject<target_endianness, is64Bits>::updateSectionAddress( + const SectionRef &Sec, + uint64_t Addr) { + DataRefImpl ShdrRef = Sec.getRawDataRefImpl(); + Elf_Shdr *shdr = const_cast<Elf_Shdr*>( + reinterpret_cast<const Elf_Shdr *>(ShdrRef.p)); + + // This assumes the address passed in matches the target address bitness + // The template-based type cast handles everything else. + shdr->sh_addr = static_cast<addr_type>(Addr); +} + +template<support::endianness target_endianness, bool is64Bits> +void DyldELFObject<target_endianness, is64Bits>::updateSymbolAddress( + const SymbolRef &SymRef, + uint64_t Addr) { + + Elf_Sym *sym = const_cast<Elf_Sym*>( + ELFObjectFile<target_endianness, is64Bits>:: + getSymbol(SymRef.getRawDataRefImpl())); + + // This assumes the address passed in matches the target address bitness + // The template-based type cast handles everything else. + sym->st_value = static_cast<addr_type>(Addr); +} + +} // namespace + + namespace llvm { +ObjectImage *RuntimeDyldELF::createObjectImage( + const MemoryBuffer *ConstInputBuffer) { + MemoryBuffer *InputBuffer = const_cast<MemoryBuffer*>(ConstInputBuffer); + std::pair<unsigned char, unsigned char> Ident = getElfArchType(InputBuffer); + error_code ec; + + if (Ident.first == ELF::ELFCLASS32 && Ident.second == ELF::ELFDATA2LSB) { + DyldELFObject<support::little, false> *Obj = + new DyldELFObject<support::little, false>(InputBuffer, ec); + return new ELFObjectImage<support::little, false>(Obj); + } + else if (Ident.first == ELF::ELFCLASS32 && Ident.second == ELF::ELFDATA2MSB) { + DyldELFObject<support::big, false> *Obj = + new DyldELFObject<support::big, false>(InputBuffer, ec); + return new ELFObjectImage<support::big, false>(Obj); + } + else if (Ident.first == ELF::ELFCLASS64 && Ident.second == ELF::ELFDATA2MSB) { + DyldELFObject<support::big, true> *Obj = + new DyldELFObject<support::big, true>(InputBuffer, ec); + return new ELFObjectImage<support::big, true>(Obj); + } + else if (Ident.first == ELF::ELFCLASS64 && Ident.second == ELF::ELFDATA2LSB) { + DyldELFObject<support::little, true> *Obj = + new DyldELFObject<support::little, true>(InputBuffer, ec); + return new ELFObjectImage<support::little, true>(Obj); + } + else + llvm_unreachable("Unexpected ELF format"); +} + +void RuntimeDyldELF::handleObjectLoaded(ObjectImage *Obj) +{ + Obj->registerWithDebugger(); + // Save the loaded object. It will deregister itself when deleted + LoadedObject = Obj; +} + +RuntimeDyldELF::~RuntimeDyldELF() { + if (LoadedObject) + delete LoadedObject; +} void RuntimeDyldELF::resolveX86_64Relocation(uint8_t *LocalAddress, uint64_t FinalAddress, @@ -167,7 +332,7 @@ void RuntimeDyldELF::resolveRelocation(uint8_t *LocalAddress, } void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, - const ObjectFile &Obj, + ObjectImage &Obj, ObjSectionToIDMap &ObjSectionToID, LocalSymbolMap &Symbols, StubMap &Stubs) { @@ -206,7 +371,7 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, if (si == Obj.end_sections()) llvm_unreachable("Symbol section not found, bad object file format!"); DEBUG(dbgs() << "\t\tThis is section symbol\n"); - Value.SectionID = findOrEmitSection((*si), true, ObjSectionToID); + Value.SectionID = findOrEmitSection(Obj, (*si), true, ObjSectionToID); Value.Addend = Addend; break; } @@ -236,7 +401,7 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, // Look up for existing stub. StubMap::const_iterator i = Stubs.find(Value); if (i != Stubs.end()) { - resolveRelocation(Target, Section.LoadAddress, (uint64_t)Section.Address + + resolveRelocation(Target, (uint64_t)Target, (uint64_t)Section.Address + i->second, RelType, 0); DEBUG(dbgs() << " Stub function found\n"); } else { @@ -247,7 +412,7 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, Section.StubOffset); AddRelocation(Value, Rel.SectionID, StubTargetAddr - Section.Address, ELF::R_ARM_ABS32); - resolveRelocation(Target, Section.LoadAddress, (uint64_t)Section.Address + + resolveRelocation(Target, (uint64_t)Target, (uint64_t)Section.Address + Section.StubOffset, RelType, 0); Section.StubOffset += getMaxStubSize(); } diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h index 36566da57a58..e7f6fab16fcb 100644 --- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h +++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h @@ -22,6 +22,8 @@ using namespace llvm; namespace llvm { class RuntimeDyldELF : public RuntimeDyldImpl { protected: + ObjectImage *LoadedObject; + void resolveX86_64Relocation(uint8_t *LocalAddress, uint64_t FinalAddress, uint64_t Value, @@ -47,12 +49,18 @@ protected: int64_t Addend); virtual void processRelocationRef(const ObjRelocationInfo &Rel, - const ObjectFile &Obj, + ObjectImage &Obj, ObjSectionToIDMap &ObjSectionToID, LocalSymbolMap &Symbols, StubMap &Stubs); + virtual ObjectImage *createObjectImage(const MemoryBuffer *InputBuffer); + virtual void handleObjectLoaded(ObjectImage *Obj); + public: - RuntimeDyldELF(RTDyldMemoryManager *mm) : RuntimeDyldImpl(mm) {} + RuntimeDyldELF(RTDyldMemoryManager *mm) + : RuntimeDyldImpl(mm), LoadedObject(0) {} + + virtual ~RuntimeDyldELF(); bool isCompatibleFormat(const MemoryBuffer *InputBuffer) const; }; diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h index bf678af6ece7..2dea13f15cea 100644 --- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h +++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h @@ -29,6 +29,7 @@ #include "llvm/ADT/Triple.h" #include <map> #include "llvm/Support/Format.h" +#include "ObjectImage.h" using namespace llvm; using namespace llvm::object; @@ -154,7 +155,8 @@ protected: /// \brief Emits a section containing common symbols. /// \return SectionID. - unsigned emitCommonSymbols(const CommonSymbolMap &Map, + unsigned emitCommonSymbols(ObjectImage &Obj, + const CommonSymbolMap &Map, uint64_t TotalSize, LocalSymbolMap &Symbols); @@ -162,14 +164,18 @@ protected: /// \param IsCode if it's true then allocateCodeSection() will be /// used for emmits, else allocateDataSection() will be used. /// \return SectionID. - unsigned emitSection(const SectionRef &Section, bool IsCode); + unsigned emitSection(ObjectImage &Obj, + const SectionRef &Section, + bool IsCode); /// \brief Find Section in LocalSections. If the secton is not found - emit /// it and store in LocalSections. /// \param IsCode if it's true then allocateCodeSection() will be /// used for emmits, else allocateDataSection() will be used. /// \return SectionID. - unsigned findOrEmitSection(const SectionRef &Section, bool IsCode, + unsigned findOrEmitSection(ObjectImage &Obj, + const SectionRef &Section, + bool IsCode, ObjSectionToIDMap &LocalSections); /// \brief If Value.SymbolName is NULL then store relocation to the @@ -200,11 +206,18 @@ protected: /// \brief Parses the object file relocation and store it to Relocations /// or SymbolRelocations. Its depend from object file type. virtual void processRelocationRef(const ObjRelocationInfo &Rel, - const ObjectFile &Obj, + ObjectImage &Obj, ObjSectionToIDMap &ObjSectionToID, LocalSymbolMap &Symbols, StubMap &Stubs) = 0; void resolveSymbols(); + virtual ObjectImage *createObjectImage(const MemoryBuffer *InputBuffer); + virtual void handleObjectLoaded(ObjectImage *Obj) + { + // Subclasses may choose to retain this image if they have a use for it + delete Obj; + } + public: RuntimeDyldImpl(RTDyldMemoryManager *mm) : MemMgr(mm), HasError(false) {} diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp index 1318b4454255..b7f515d6ce65 100644 --- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp +++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp @@ -205,7 +205,7 @@ resolveARMRelocation(uint8_t *LocalAddress, } void RuntimeDyldMachO::processRelocationRef(const ObjRelocationInfo &Rel, - const ObjectFile &Obj, + ObjectImage &Obj, ObjSectionToIDMap &ObjSectionToID, LocalSymbolMap &Symbols, StubMap &Stubs) { @@ -246,7 +246,7 @@ void RuntimeDyldMachO::processRelocationRef(const ObjRelocationInfo &Rel, break; } assert(si != se && "No section containing relocation!"); - Value.SectionID = findOrEmitSection(*si, true, ObjSectionToID); + Value.SectionID = findOrEmitSection(Obj, *si, true, ObjSectionToID); Value.Addend = *(const intptr_t *)Target; if (Value.Addend) { // The MachO addend is offset from the current section, we need set it diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h index 898b85190e71..418d130f6352 100644 --- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h +++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h @@ -49,7 +49,7 @@ protected: int64_t Addend); virtual void processRelocationRef(const ObjRelocationInfo &Rel, - const ObjectFile &Obj, + ObjectImage &Obj, ObjSectionToIDMap &ObjSectionToID, LocalSymbolMap &Symbols, StubMap &Stubs); @@ -59,7 +59,7 @@ public: uint64_t Value, uint32_t Type, int64_t Addend); - + RuntimeDyldMachO(RTDyldMemoryManager *mm) : RuntimeDyldImpl(mm) {} bool isCompatibleFormat(const MemoryBuffer *InputBuffer) const; diff --git a/lib/MC/MCParser/AsmParser.cpp b/lib/MC/MCParser/AsmParser.cpp index 2d61cac62585..8aef43cb0b4c 100644 --- a/lib/MC/MCParser/AsmParser.cpp +++ b/lib/MC/MCParser/AsmParser.cpp @@ -1527,11 +1527,11 @@ bool AsmParser::HandleMacroEntry(StringRef Name, SMLoc NameLoc, } Lex(); } - // If there weren't any arguments, erase the token vector so everything - // else knows that. Leaving around the vestigal empty token list confuses - // things. - if (MacroArguments.size() == 1 && MacroArguments.back().empty()) - MacroArguments.clear(); + // If the last argument didn't end up with any tokens, it's not a real + // argument and we should remove it from the list. This happens with either + // a tailing comma or an empty argument list. + if (MacroArguments.back().empty()) + MacroArguments.pop_back(); // Macro instantiation is lexical, unfortunately. We construct a new buffer // to hold the macro body with substitutions. diff --git a/lib/Object/ELFObjectFile.cpp b/lib/Object/ELFObjectFile.cpp index ab5f8108af13..663b84ec8b1f 100644 --- a/lib/Object/ELFObjectFile.cpp +++ b/lib/Object/ELFObjectFile.cpp @@ -17,16 +17,6 @@ namespace llvm { using namespace object; -namespace { - std::pair<unsigned char, unsigned char> - getElfArchType(MemoryBuffer *Object) { - if (Object->getBufferSize() < ELF::EI_NIDENT) - return std::make_pair((uint8_t)ELF::ELFCLASSNONE,(uint8_t)ELF::ELFDATANONE); - return std::make_pair( (uint8_t)Object->getBufferStart()[ELF::EI_CLASS] - , (uint8_t)Object->getBufferStart()[ELF::EI_DATA]); - } -} - // Creates an in-memory object-file by default: createELFObjectFile(Buffer) ObjectFile *ObjectFile::createELFObjectFile(MemoryBuffer *Object) { std::pair<unsigned char, unsigned char> Ident = getElfArchType(Object); diff --git a/lib/Support/CMakeLists.txt b/lib/Support/CMakeLists.txt index 9b3b6c801dd0..9103327dffad 100644 --- a/lib/Support/CMakeLists.txt +++ b/lib/Support/CMakeLists.txt @@ -32,7 +32,7 @@ add_llvm_library(LLVMSupport IntrusiveRefCntPtr.cpp IsInf.cpp IsNAN.cpp - JSONParser.cpp + Locale.cpp LockFileManager.cpp ManagedStatic.cpp MemoryBuffer.cpp diff --git a/lib/Support/JSONParser.cpp b/lib/Support/JSONParser.cpp deleted file mode 100644 index 5dfcf297a7ea..000000000000 --- a/lib/Support/JSONParser.cpp +++ /dev/null @@ -1,302 +0,0 @@ -//===--- JSONParser.cpp - Simple JSON parser ------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file implements a JSON parser. -// -//===----------------------------------------------------------------------===// - -#include "llvm/Support/JSONParser.h" - -#include "llvm/ADT/Twine.h" -#include "llvm/Support/Casting.h" -#include "llvm/Support/MemoryBuffer.h" - -using namespace llvm; - -JSONParser::JSONParser(StringRef Input, SourceMgr *SM) - : SM(SM), Failed(false) { - InputBuffer = MemoryBuffer::getMemBuffer(Input, "JSON"); - SM->AddNewSourceBuffer(InputBuffer, SMLoc()); - End = InputBuffer->getBuffer().end(); - Position = InputBuffer->getBuffer().begin(); -} - -JSONValue *JSONParser::parseRoot() { - if (Position != InputBuffer->getBuffer().begin()) - report_fatal_error("Cannot reuse JSONParser."); - if (isWhitespace()) - nextNonWhitespace(); - if (errorIfAtEndOfFile("'[' or '{' at start of JSON text")) - return 0; - switch (*Position) { - case '[': - return new (ValueAllocator.Allocate<JSONArray>(1)) JSONArray(this); - case '{': - return new (ValueAllocator.Allocate<JSONObject>(1)) JSONObject(this); - default: - setExpectedError("'[' or '{' at start of JSON text", *Position); - return 0; - } -} - -bool JSONParser::validate() { - JSONValue *Root = parseRoot(); - if (Root == NULL) { - return false; - } - return skip(*Root); -} - -bool JSONParser::skip(const JSONAtom &Atom) { - switch(Atom.getKind()) { - case JSONAtom::JK_Array: - case JSONAtom::JK_Object: - return skipContainer(*cast<JSONContainer>(&Atom)); - case JSONAtom::JK_String: - return true; - case JSONAtom::JK_KeyValuePair: - return skip(*cast<JSONKeyValuePair>(&Atom)->Value); - } - llvm_unreachable("Impossible enum value."); -} - -// Sets the current error to: -// "expected <Expected>, but found <Found>". -void JSONParser::setExpectedError(StringRef Expected, StringRef Found) { - SM->PrintMessage(SMLoc::getFromPointer(Position), SourceMgr::DK_Error, - "expected " + Expected + ", but found " + Found + ".", ArrayRef<SMRange>()); - Failed = true; -} - -// Sets the current error to: -// "expected <Expected>, but found <Found>". -void JSONParser::setExpectedError(StringRef Expected, char Found) { - setExpectedError(Expected, ("'" + StringRef(&Found, 1) + "'").str()); -} - -// If there is no character available, returns true and sets the current error -// to: "expected <Expected>, but found EOF.". -bool JSONParser::errorIfAtEndOfFile(StringRef Expected) { - if (Position == End) { - setExpectedError(Expected, "EOF"); - return true; - } - return false; -} - -// Sets the current error if the current character is not C to: -// "expected 'C', but got <current character>". -bool JSONParser::errorIfNotAt(char C, StringRef Message) { - if (*Position != C) { - std::string Expected = - ("'" + StringRef(&C, 1) + "' " + Message).str(); - if (Position == End) - setExpectedError(Expected, "EOF"); - else - setExpectedError(Expected, *Position); - return true; - } - return false; -} - -// Forbidding inlining improves performance by roughly 20%. -// FIXME: Remove once llvm optimizes this to the faster version without hints. -LLVM_ATTRIBUTE_NOINLINE static bool -wasEscaped(StringRef::iterator First, StringRef::iterator Position); - -// Returns whether a character at 'Position' was escaped with a leading '\'. -// 'First' specifies the position of the first character in the string. -static bool wasEscaped(StringRef::iterator First, - StringRef::iterator Position) { - assert(Position - 1 >= First); - StringRef::iterator I = Position - 1; - // We calulate the number of consecutive '\'s before the current position - // by iterating backwards through our string. - while (I >= First && *I == '\\') --I; - // (Position - 1 - I) now contains the number of '\'s before the current - // position. If it is odd, the character at 'Positon' was escaped. - return (Position - 1 - I) % 2 == 1; -} - -// Parses a JSONString, assuming that the current position is on a quote. -JSONString *JSONParser::parseString() { - assert(Position != End); - assert(!isWhitespace()); - if (errorIfNotAt('"', "at start of string")) - return 0; - StringRef::iterator First = Position + 1; - - // Benchmarking shows that this loop is the hot path of the application with - // about 2/3rd of the runtime cycles. Since escaped quotes are not the common - // case, and multiple escaped backslashes before escaped quotes are very rare, - // we pessimize this case to achieve a smaller inner loop in the common case. - // We're doing that by having a quick inner loop that just scans for the next - // quote. Once we find the quote we check the last character to see whether - // the quote might have been escaped. If the last character is not a '\', we - // know the quote was not escaped and have thus found the end of the string. - // If the immediately preceding character was a '\', we have to scan backwards - // to see whether the previous character was actually an escaped backslash, or - // an escape character for the quote. If we find that the current quote was - // escaped, we continue parsing for the next quote and repeat. - // This optimization brings around 30% performance improvements. - do { - // Step over the current quote. - ++Position; - // Find the next quote. - while (Position != End && *Position != '"') - ++Position; - if (errorIfAtEndOfFile("'\"' at end of string")) - return 0; - // Repeat until the previous character was not a '\' or was an escaped - // backslash. - } while (*(Position - 1) == '\\' && wasEscaped(First, Position)); - - return new (ValueAllocator.Allocate<JSONString>()) - JSONString(StringRef(First, Position - First)); -} - - -// Advances the position to the next non-whitespace position. -void JSONParser::nextNonWhitespace() { - do { - ++Position; - } while (isWhitespace()); -} - -// Checks if there is a whitespace character at the current position. -bool JSONParser::isWhitespace() { - return *Position == ' ' || *Position == '\t' || - *Position == '\n' || *Position == '\r'; -} - -bool JSONParser::failed() const { - return Failed; -} - -// Parses a JSONValue, assuming that the current position is at the first -// character of the value. -JSONValue *JSONParser::parseValue() { - assert(Position != End); - assert(!isWhitespace()); - switch (*Position) { - case '[': - return new (ValueAllocator.Allocate<JSONArray>(1)) JSONArray(this); - case '{': - return new (ValueAllocator.Allocate<JSONObject>(1)) JSONObject(this); - case '"': - return parseString(); - default: - setExpectedError("'[', '{' or '\"' at start of value", *Position); - return 0; - } -} - -// Parses a JSONKeyValuePair, assuming that the current position is at the first -// character of the key, value pair. -JSONKeyValuePair *JSONParser::parseKeyValuePair() { - assert(Position != End); - assert(!isWhitespace()); - - JSONString *Key = parseString(); - if (Key == 0) - return 0; - - nextNonWhitespace(); - if (errorIfNotAt(':', "between key and value")) - return 0; - - nextNonWhitespace(); - const JSONValue *Value = parseValue(); - if (Value == 0) - return 0; - - return new (ValueAllocator.Allocate<JSONKeyValuePair>(1)) - JSONKeyValuePair(Key, Value); -} - -/// \brief Parses the first element of a JSON array or object, or closes the -/// array. -/// -/// The method assumes that the current position is before the first character -/// of the element, with possible white space in between. When successful, it -/// returns the new position after parsing the element. Otherwise, if there is -/// no next value, it returns a default constructed StringRef::iterator. -StringRef::iterator JSONParser::parseFirstElement(JSONAtom::Kind ContainerKind, - char StartChar, char EndChar, - const JSONAtom *&Element) { - assert(*Position == StartChar); - Element = 0; - nextNonWhitespace(); - if (errorIfAtEndOfFile("value or end of container at start of container")) - return StringRef::iterator(); - - if (*Position == EndChar) - return StringRef::iterator(); - - Element = parseElement(ContainerKind); - if (Element == 0) - return StringRef::iterator(); - - return Position; -} - -/// \brief Parses the next element of a JSON array or object, or closes the -/// array. -/// -/// The method assumes that the current position is before the ',' which -/// separates the next element from the current element. When successful, it -/// returns the new position after parsing the element. Otherwise, if there is -/// no next value, it returns a default constructed StringRef::iterator. -StringRef::iterator JSONParser::parseNextElement(JSONAtom::Kind ContainerKind, - char EndChar, - const JSONAtom *&Element) { - Element = 0; - nextNonWhitespace(); - if (errorIfAtEndOfFile("',' or end of container for next element")) - return 0; - - if (*Position == ',') { - nextNonWhitespace(); - if (errorIfAtEndOfFile("element in container")) - return StringRef::iterator(); - - Element = parseElement(ContainerKind); - if (Element == 0) - return StringRef::iterator(); - - return Position; - } else if (*Position == EndChar) { - return StringRef::iterator(); - } else { - setExpectedError("',' or end of container for next element", *Position); - return StringRef::iterator(); - } -} - -const JSONAtom *JSONParser::parseElement(JSONAtom::Kind ContainerKind) { - switch (ContainerKind) { - case JSONAtom::JK_Array: - return parseValue(); - case JSONAtom::JK_Object: - return parseKeyValuePair(); - default: - llvm_unreachable("Impossible code path"); - } -} - -bool JSONParser::skipContainer(const JSONContainer &Container) { - for (JSONContainer::AtomIterator I = Container.atom_current(), - E = Container.atom_end(); - I != E; ++I) { - assert(*I != 0); - if (!skip(**I)) - return false; - } - return !failed(); -} diff --git a/lib/Support/Locale.cpp b/lib/Support/Locale.cpp new file mode 100644 index 000000000000..17b9b6c47d60 --- /dev/null +++ b/lib/Support/Locale.cpp @@ -0,0 +1,10 @@ +#include "llvm/Support/Locale.h" +#include "llvm/Config/config.h" + +#ifdef __APPLE__ +#include "LocaleXlocale.inc" +#elif LLVM_ON_WIN32 +#include "LocaleWindows.inc" +#else +#include "LocaleGeneric.inc" +#endif diff --git a/lib/Support/LocaleGeneric.inc b/lib/Support/LocaleGeneric.inc new file mode 100644 index 000000000000..278deee3e4dd --- /dev/null +++ b/lib/Support/LocaleGeneric.inc @@ -0,0 +1,17 @@ +#include <cwctype> + +namespace llvm { +namespace sys { +namespace locale { + +int columnWidth(StringRef s) { + return s.size(); +} + +bool isPrint(int c) { + return iswprint(c); +} + +} +} +} diff --git a/lib/Support/LocaleWindows.inc b/lib/Support/LocaleWindows.inc new file mode 100644 index 000000000000..6827ac15a1ac --- /dev/null +++ b/lib/Support/LocaleWindows.inc @@ -0,0 +1,15 @@ +namespace llvm { +namespace sys { +namespace locale { + +int columnWidth(StringRef s) { + return s.size(); +} + +bool isPrint(int c) { + return ' ' <= c && c <= '~'; +} + +} +} +}
\ No newline at end of file diff --git a/lib/Support/LocaleXlocale.inc b/lib/Support/LocaleXlocale.inc new file mode 100644 index 000000000000..f595e7c582ca --- /dev/null +++ b/lib/Support/LocaleXlocale.inc @@ -0,0 +1,61 @@ +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/Support/ManagedStatic.h" +#include <cassert> +#include <xlocale.h> + + +namespace { + struct locale_holder { + locale_holder() + : l(newlocale(LC_CTYPE_MASK,"en_US.UTF-8",LC_GLOBAL_LOCALE)) + { + assert(NULL!=l); + } + ~locale_holder() { + freelocale(l); + } + + int mbswidth(llvm::SmallString<16> s) const { + // this implementation assumes no '\0' in s + assert(s.size()==strlen(s.c_str())); + + size_t size = mbstowcs_l(NULL,s.c_str(),0,l); + assert(size!=(size_t)-1); + if (size==0) + return 0; + llvm::SmallVector<wchar_t,200> ws(size); + size = mbstowcs_l(&ws[0],s.c_str(),ws.size(),l); + assert(ws.size()==size); + return wcswidth_l(&ws[0],ws.size(),l); + } + + int isprint(int c) const { + return iswprint_l(c,l); + } + + private: + + locale_t l; + }; + + llvm::ManagedStatic<locale_holder> l; +} + +namespace llvm { +namespace sys { +namespace locale { + +int columnWidth(StringRef s) { + int width = l->mbswidth(s); + assert(width>=0); + return width; +} + +bool isPrint(int c) { + return l->isprint(c); +} + +} +} +} diff --git a/lib/Support/SmallPtrSet.cpp b/lib/Support/SmallPtrSet.cpp index 68d9c29411f0..3b53e9ff49fe 100644 --- a/lib/Support/SmallPtrSet.cpp +++ b/lib/Support/SmallPtrSet.cpp @@ -13,6 +13,7 @@ //===----------------------------------------------------------------------===// #include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/DenseMapInfo.h" #include "llvm/Support/MathExtras.h" #include <algorithm> #include <cstdlib> @@ -102,7 +103,7 @@ bool SmallPtrSetImpl::erase_imp(const void * Ptr) { } const void * const *SmallPtrSetImpl::FindBucketFor(const void *Ptr) const { - unsigned Bucket = Hash(Ptr); + unsigned Bucket = DenseMapInfo<void *>::getHashValue(Ptr) & (CurArraySize-1); unsigned ArraySize = CurArraySize; unsigned ProbeAmt = 1; const void *const *Array = CurArray; diff --git a/lib/Support/SourceMgr.cpp b/lib/Support/SourceMgr.cpp index bbe36b260b9d..15278c598e52 100644 --- a/lib/Support/SourceMgr.cpp +++ b/lib/Support/SourceMgr.cpp @@ -193,7 +193,8 @@ SMDiagnostic SourceMgr::GetMessage(SMLoc Loc, SourceMgr::DiagKind Kind, } void SourceMgr::PrintMessage(SMLoc Loc, SourceMgr::DiagKind Kind, - const Twine &Msg, ArrayRef<SMRange> Ranges) const { + const Twine &Msg, ArrayRef<SMRange> Ranges, + bool ShowColors) const { SMDiagnostic Diagnostic = GetMessage(Loc, Kind, Msg, Ranges); // Report the message with the diagnostic handler if present. @@ -208,7 +209,7 @@ void SourceMgr::PrintMessage(SMLoc Loc, SourceMgr::DiagKind Kind, assert(CurBuf != -1 && "Invalid or unspecified location!"); PrintIncludeStack(getBufferInfo(CurBuf).IncludeLoc, OS); - Diagnostic.print(0, OS); + Diagnostic.print(0, OS, ShowColors); } //===----------------------------------------------------------------------===// @@ -225,7 +226,14 @@ SMDiagnostic::SMDiagnostic(const SourceMgr &sm, SMLoc L, const std::string &FN, } -void SMDiagnostic::print(const char *ProgName, raw_ostream &S) const { +void SMDiagnostic::print(const char *ProgName, raw_ostream &S, + bool ShowColors) const { + // Display colors only if OS goes to a tty. + ShowColors &= S.is_displayed(); + + if (ShowColors) + S.changeColor(raw_ostream::SAVEDCOLOR, true); + if (ProgName && ProgName[0]) S << ProgName << ": "; @@ -244,13 +252,33 @@ void SMDiagnostic::print(const char *ProgName, raw_ostream &S) const { } switch (Kind) { - case SourceMgr::DK_Error: S << "error: "; break; - case SourceMgr::DK_Warning: S << "warning: "; break; - case SourceMgr::DK_Note: S << "note: "; break; + case SourceMgr::DK_Error: + if (ShowColors) + S.changeColor(raw_ostream::RED, true); + S << "error: "; + break; + case SourceMgr::DK_Warning: + if (ShowColors) + S.changeColor(raw_ostream::MAGENTA, true); + S << "warning: "; + break; + case SourceMgr::DK_Note: + if (ShowColors) + S.changeColor(raw_ostream::BLACK, true); + S << "note: "; + break; } - + + if (ShowColors) { + S.resetColor(); + S.changeColor(raw_ostream::SAVEDCOLOR, true); + } + S << Message << '\n'; + if (ShowColors) + S.resetColor(); + if (LineNo == -1 || ColumnNo == -1) return; @@ -292,6 +320,9 @@ void SMDiagnostic::print(const char *ProgName, raw_ostream &S) const { } S << '\n'; + if (ShowColors) + S.changeColor(raw_ostream::GREEN, true); + // Print out the caret line, matching tabs in the source line. for (unsigned i = 0, e = CaretLine.size(), OutCol = 0; i != e; ++i) { if (i >= LineContents.size() || LineContents[i] != '\t') { @@ -306,6 +337,9 @@ void SMDiagnostic::print(const char *ProgName, raw_ostream &S) const { ++OutCol; } while (OutCol & 7); } + + if (ShowColors) + S.resetColor(); S << '\n'; } diff --git a/lib/Support/Unix/Process.inc b/lib/Support/Unix/Process.inc index 2d7fd384e8bb..f640462a4517 100644 --- a/lib/Support/Unix/Process.inc +++ b/lib/Support/Unix/Process.inc @@ -290,6 +290,10 @@ const char *Process::OutputBold(bool bg) { return "\033[1m"; } +const char *Process::OutputReverse() { + return "\033[7m"; +} + const char *Process::ResetColor() { return "\033[0m"; } diff --git a/lib/Support/Windows/Process.inc b/lib/Support/Windows/Process.inc index 913b0734ddc9..9a388b4efc6d 100644 --- a/lib/Support/Windows/Process.inc +++ b/lib/Support/Windows/Process.inc @@ -215,6 +215,38 @@ const char *Process::OutputColor(char code, bool bold, bool bg) { return 0; } +static WORD GetConsoleTextAttribute(HANDLE hConsoleOutput) { + CONSOLE_SCREEN_BUFFER_INFO info; + GetConsoleScreenBufferInfo(GetStdHandle(STD_OUTPUT_HANDLE), &info); + return info.wAttributes; +} + +const char *Process::OutputReverse() { + const WORD attributes + = GetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE)); + + const WORD foreground_mask = FOREGROUND_BLUE | FOREGROUND_GREEN | + FOREGROUND_RED | FOREGROUND_INTENSITY; + const WORD background_mask = BACKGROUND_BLUE | BACKGROUND_GREEN | + BACKGROUND_RED | BACKGROUND_INTENSITY; + const WORD color_mask = foreground_mask | background_mask; + + WORD new_attributes = + ((attributes & FOREGROUND_BLUE )?BACKGROUND_BLUE :0) | + ((attributes & FOREGROUND_GREEN )?BACKGROUND_GREEN :0) | + ((attributes & FOREGROUND_RED )?BACKGROUND_RED :0) | + ((attributes & FOREGROUND_INTENSITY)?BACKGROUND_INTENSITY:0) | + ((attributes & BACKGROUND_BLUE )?FOREGROUND_BLUE :0) | + ((attributes & BACKGROUND_GREEN )?FOREGROUND_GREEN :0) | + ((attributes & BACKGROUND_RED )?FOREGROUND_RED :0) | + ((attributes & BACKGROUND_INTENSITY)?FOREGROUND_INTENSITY:0) | + 0; + new_attributes = (attributes & ~color_mask) | (new_attributes & color_mask); + + SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), new_attributes); + return 0; +} + const char *Process::ResetColor() { SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), defaultColors()); return 0; diff --git a/lib/Support/YAMLParser.cpp b/lib/Support/YAMLParser.cpp index 330519f3019d..d38b51b0b28a 100644 --- a/lib/Support/YAMLParser.cpp +++ b/lib/Support/YAMLParser.cpp @@ -1732,7 +1732,7 @@ StringRef ScalarNode::unescapeDoubleQuoted( StringRef UnquotedValue if (UnquotedValue.size() < 3) // TODO: Report error. break; - unsigned int UnicodeScalarValue; + unsigned int UnicodeScalarValue = 0; UnquotedValue.substr(1, 2).getAsInteger(16, UnicodeScalarValue); encodeUTF8(UnicodeScalarValue, Storage); UnquotedValue = UnquotedValue.substr(2); @@ -1742,7 +1742,7 @@ StringRef ScalarNode::unescapeDoubleQuoted( StringRef UnquotedValue if (UnquotedValue.size() < 5) // TODO: Report error. break; - unsigned int UnicodeScalarValue; + unsigned int UnicodeScalarValue = 0; UnquotedValue.substr(1, 4).getAsInteger(16, UnicodeScalarValue); encodeUTF8(UnicodeScalarValue, Storage); UnquotedValue = UnquotedValue.substr(4); @@ -1752,7 +1752,7 @@ StringRef ScalarNode::unescapeDoubleQuoted( StringRef UnquotedValue if (UnquotedValue.size() < 9) // TODO: Report error. break; - unsigned int UnicodeScalarValue; + unsigned int UnicodeScalarValue = 0; UnquotedValue.substr(1, 8).getAsInteger(16, UnicodeScalarValue); encodeUTF8(UnicodeScalarValue, Storage); UnquotedValue = UnquotedValue.substr(8); diff --git a/lib/Support/raw_ostream.cpp b/lib/Support/raw_ostream.cpp index 72d3986f41dd..86cdca157278 100644 --- a/lib/Support/raw_ostream.cpp +++ b/lib/Support/raw_ostream.cpp @@ -633,6 +633,19 @@ raw_ostream &raw_fd_ostream::resetColor() { return *this; } +raw_ostream &raw_fd_ostream::reverseColor() { + if (sys::Process::ColorNeedsFlush()) + flush(); + const char *colorcode = sys::Process::OutputReverse(); + if (colorcode) { + size_t len = strlen(colorcode); + write(colorcode, len); + // don't account colors towards output characters + pos -= len; + } + return *this; +} + bool raw_fd_ostream::is_displayed() const { return sys::Process::FileDescriptorIsDisplayed(FD); } diff --git a/lib/TableGen/Error.cpp b/lib/TableGen/Error.cpp index 5071ee77ac43..1463b68144a8 100644 --- a/lib/TableGen/Error.cpp +++ b/lib/TableGen/Error.cpp @@ -20,6 +20,22 @@ namespace llvm { SourceMgr SrcMgr; +void PrintWarning(SMLoc WarningLoc, const Twine &Msg) { + SrcMgr.PrintMessage(WarningLoc, SourceMgr::DK_Warning, Msg); +} + +void PrintWarning(const char *Loc, const Twine &Msg) { + SrcMgr.PrintMessage(SMLoc::getFromPointer(Loc), SourceMgr::DK_Warning, Msg); +} + +void PrintWarning(const Twine &Msg) { + errs() << "warning:" << Msg << "\n"; +} + +void PrintWarning(const TGError &Warning) { + PrintWarning(Warning.getLoc(), Warning.getMessage()); +} + void PrintError(SMLoc ErrorLoc, const Twine &Msg) { SrcMgr.PrintMessage(ErrorLoc, SourceMgr::DK_Error, Msg); } diff --git a/lib/Target/ARM/ARMCallingConv.td b/lib/Target/ARM/ARMCallingConv.td index d33364bb2871..b9a25126ba67 100644 --- a/lib/Target/ARM/ARMCallingConv.td +++ b/lib/Target/ARM/ARMCallingConv.td @@ -9,10 +9,6 @@ // This describes the calling conventions for ARM architecture. //===----------------------------------------------------------------------===// -/// CCIfSubtarget - Match if the current subtarget has a feature F. -class CCIfSubtarget<string F, CCAction A>: - CCIf<!strconcat("State.getTarget().getSubtarget<ARMSubtarget>().", F), A>; - /// CCIfAlign - Match of the original alignment of the arg class CCIfAlign<string Align, CCAction A>: CCIf<!strconcat("ArgFlags.getOrigAlign() == ", Align), A>; diff --git a/lib/Target/ARM/ARMInstrFormats.td b/lib/Target/ARM/ARMInstrFormats.td index 1d38bcf9e843..f04926aaceba 100644 --- a/lib/Target/ARM/ARMInstrFormats.td +++ b/lib/Target/ARM/ARMInstrFormats.td @@ -532,6 +532,7 @@ class AIswp<bit b, dag oops, dag iops, string opc, list<dag> pattern> let Inst{11-4} = 0b00001001; let Inst{3-0} = Rt2; + let Unpredictable{11-8} = 0b1111; let DecoderMethod = "DecodeSwap"; } diff --git a/lib/Target/ARM/ARMInstrInfo.td b/lib/Target/ARM/ARMInstrInfo.td index 3caaa2366123..1eb561d69016 100644 --- a/lib/Target/ARM/ARMInstrInfo.td +++ b/lib/Target/ARM/ARMInstrInfo.td @@ -219,8 +219,11 @@ def UseFPVMLx : Predicate<"Subtarget->useFPVMLx()">; // Prefer fused MAC for fp mul + add over fp VMLA / VMLS if they are available. // But only select them if more precision in FP computation is allowed. -def UseFusedMAC : Predicate<"!TM.Options.NoExcessFPPrecision">; -def DontUseFusedMAC : Predicate<"!Subtarget->hasVFP4()">; +// Do not use them for Darwin platforms. +def UseFusedMAC : Predicate<"!TM.Options.NoExcessFPPrecision && " + "!Subtarget->isTargetDarwin()">; +def DontUseFusedMAC : Predicate<"!Subtarget->hasVFP4() || " + "Subtarget->isTargetDarwin()">; //===----------------------------------------------------------------------===// // ARM Flag Definitions. @@ -905,6 +908,11 @@ def p_imm : Operand<i32> { let DecoderMethod = "DecodeCoprocessor"; } +def pf_imm : Operand<i32> { + let PrintMethod = "printPImmediate"; + let ParserMatchClass = CoprocNumAsmOperand; +} + def CoprocRegAsmOperand : AsmOperandClass { let Name = "CoprocReg"; let ParserMethod = "parseCoprocRegOperand"; @@ -1184,6 +1192,8 @@ multiclass AI1_cmp_irs<bits<4> opcod, string opc, let Inst{19-16} = Rn; let Inst{15-12} = 0b0000; let Inst{11-0} = imm; + + let Unpredictable{15-12} = 0b1111; } def rr : AI1<opcod, (outs), (ins GPR:$Rn, GPR:$Rm), DPFrm, iir, opc, "\t$Rn, $Rm", @@ -1197,6 +1207,8 @@ multiclass AI1_cmp_irs<bits<4> opcod, string opc, let Inst{15-12} = 0b0000; let Inst{11-4} = 0b00000000; let Inst{3-0} = Rm; + + let Unpredictable{15-12} = 0b1111; } def rsi : AI1<opcod, (outs), (ins GPR:$Rn, so_reg_imm:$shift), DPSoRegImmFrm, iis, @@ -1211,11 +1223,13 @@ multiclass AI1_cmp_irs<bits<4> opcod, string opc, let Inst{11-5} = shift{11-5}; let Inst{4} = 0; let Inst{3-0} = shift{3-0}; + + let Unpredictable{15-12} = 0b1111; } def rsr : AI1<opcod, (outs), - (ins GPR:$Rn, so_reg_reg:$shift), DPSoRegRegFrm, iis, + (ins GPRnopc:$Rn, so_reg_reg:$shift), DPSoRegRegFrm, iis, opc, "\t$Rn, $shift", - [(opnode GPR:$Rn, so_reg_reg:$shift)]> { + [(opnode GPRnopc:$Rn, so_reg_reg:$shift)]> { bits<4> Rn; bits<12> shift; let Inst{25} = 0; @@ -1227,6 +1241,8 @@ multiclass AI1_cmp_irs<bits<4> opcod, string opc, let Inst{6-5} = shift{6-5}; let Inst{4} = 1; let Inst{3-0} = shift{3-0}; + + let Unpredictable{15-12} = 0b1111; } } @@ -4103,7 +4119,7 @@ def ISB : AInoP<(outs), (ins memb_opt:$opt), MiscFrm, NoItinerary, let Inst{3-0} = opt; } -// Pseudo isntruction that combines movs + predicated rsbmi +// Pseudo instruction that combines movs + predicated rsbmi // to implement integer ABS let usesCustomInserter = 1, Defs = [CPSR] in { def ABS : ARMPseudoInst< @@ -4264,9 +4280,9 @@ def CLREX : AXI<(outs), (ins), MiscFrm, NoItinerary, "clrex", []>, // SWP/SWPB are deprecated in V6/V7. let mayLoad = 1, mayStore = 1 in { -def SWP : AIswp<0, (outs GPR:$Rt), (ins GPR:$Rt2, addr_offset_none:$addr), +def SWP : AIswp<0, (outs GPRnopc:$Rt), (ins GPRnopc:$Rt2, addr_offset_none:$addr), "swp", []>; -def SWPB: AIswp<1, (outs GPR:$Rt), (ins GPR:$Rt2, addr_offset_none:$addr), +def SWPB: AIswp<1, (outs GPRnopc:$Rt), (ins GPRnopc:$Rt2, addr_offset_none:$addr), "swpb", []>; } @@ -4295,7 +4311,7 @@ def CDP : ABI<0b1110, (outs), (ins p_imm:$cop, imm0_15:$opc1, let Inst{23-20} = opc1; } -def CDP2 : ABXI<0b1110, (outs), (ins p_imm:$cop, imm0_15:$opc1, +def CDP2 : ABXI<0b1110, (outs), (ins pf_imm:$cop, imm0_15:$opc1, c_imm:$CRd, c_imm:$CRn, c_imm:$CRm, imm0_7:$opc2), NoItinerary, "cdp2\t$cop, $opc1, $CRd, $CRn, $CRm, $opc2", [(int_arm_cdp2 imm:$cop, imm:$opc1, imm:$CRd, imm:$CRn, @@ -4574,7 +4590,7 @@ def : ARMV5TPat<(int_arm_mrc2 imm:$cop, imm:$opc1, imm:$CRn, class MovRRCopro<string opc, bit direction, list<dag> pattern = []> : ABI<0b1100, (outs), (ins p_imm:$cop, imm0_15:$opc1, - GPR:$Rt, GPR:$Rt2, c_imm:$CRm), + GPRnopc:$Rt, GPRnopc:$Rt2, c_imm:$CRm), NoItinerary, opc, "\t$cop, $opc1, $Rt, $Rt2, $CRm", pattern> { let Inst{23-21} = 0b010; let Inst{20} = direction; @@ -4593,13 +4609,13 @@ class MovRRCopro<string opc, bit direction, list<dag> pattern = []> } def MCRR : MovRRCopro<"mcrr", 0 /* from ARM core register to coprocessor */, - [(int_arm_mcrr imm:$cop, imm:$opc1, GPR:$Rt, GPR:$Rt2, + [(int_arm_mcrr imm:$cop, imm:$opc1, GPRnopc:$Rt, GPRnopc:$Rt2, imm:$CRm)]>; def MRRC : MovRRCopro<"mrrc", 1 /* from coprocessor to ARM core register */>; class MovRRCopro2<string opc, bit direction, list<dag> pattern = []> : ABXI<0b1100, (outs), (ins p_imm:$cop, imm0_15:$opc1, - GPR:$Rt, GPR:$Rt2, c_imm:$CRm), NoItinerary, + GPRnopc:$Rt, GPRnopc:$Rt2, c_imm:$CRm), NoItinerary, !strconcat(opc, "\t$cop, $opc1, $Rt, $Rt2, $CRm"), pattern> { let Inst{31-28} = 0b1111; let Inst{23-21} = 0b010; @@ -4616,10 +4632,12 @@ class MovRRCopro2<string opc, bit direction, list<dag> pattern = []> let Inst{11-8} = cop; let Inst{7-4} = opc1; let Inst{3-0} = CRm; + + let DecoderMethod = "DecodeMRRC2"; } def MCRR2 : MovRRCopro2<"mcrr2", 0 /* from ARM core register to coprocessor */, - [(int_arm_mcrr2 imm:$cop, imm:$opc1, GPR:$Rt, GPR:$Rt2, + [(int_arm_mcrr2 imm:$cop, imm:$opc1, GPRnopc:$Rt, GPRnopc:$Rt2, imm:$CRm)]>; def MRRC2 : MovRRCopro2<"mrrc2", 1 /* from coprocessor to ARM core register */>; @@ -4628,22 +4646,32 @@ def MRRC2 : MovRRCopro2<"mrrc2", 1 /* from coprocessor to ARM core register */>; // // Move to ARM core register from Special Register -def MRS : ABI<0b0001, (outs GPR:$Rd), (ins), NoItinerary, +def MRS : ABI<0b0001, (outs GPRnopc:$Rd), (ins), NoItinerary, "mrs", "\t$Rd, apsr", []> { bits<4> Rd; let Inst{23-16} = 0b00001111; + let Unpredictable{19-17} = 0b111; + let Inst{15-12} = Rd; - let Inst{7-4} = 0b0000; + + let Inst{11-0} = 0b000000000000; + let Unpredictable{11-0} = 0b110100001111; } -def : InstAlias<"mrs${p} $Rd, cpsr", (MRS GPR:$Rd, pred:$p)>, Requires<[IsARM]>; +def : InstAlias<"mrs${p} $Rd, cpsr", (MRS GPRnopc:$Rd, pred:$p)>, Requires<[IsARM]>; -def MRSsys : ABI<0b0001, (outs GPR:$Rd), (ins), NoItinerary, +// The MRSsys instruction is the MRS instruction from the ARM ARM, +// section B9.3.9, with the R bit set to 1. +def MRSsys : ABI<0b0001, (outs GPRnopc:$Rd), (ins), NoItinerary, "mrs", "\t$Rd, spsr", []> { bits<4> Rd; let Inst{23-16} = 0b01001111; + let Unpredictable{19-16} = 0b1111; + let Inst{15-12} = Rd; - let Inst{7-4} = 0b0000; + + let Inst{11-0} = 0b000000000000; + let Unpredictable{11-0} = 0b110100001111; } // Move from ARM core register to Special Register diff --git a/lib/Target/ARM/ARMInstrNEON.td b/lib/Target/ARM/ARMInstrNEON.td index c7219a60f6c3..fd8ac0b328eb 100644 --- a/lib/Target/ARM/ARMInstrNEON.td +++ b/lib/Target/ARM/ARMInstrNEON.td @@ -5634,6 +5634,7 @@ multiclass Lengthen_HalfSingle<string DestLanes, string DestTy, string SrcTy, // extload, zextload and sextload for a lengthening load followed by another // lengthening load, to quadruple the initial length. +// // Lengthen_Double<"4", "i32", "i8", "8", "i16", "4", "i32", qsub_0> = // Pat<(v4i32 (extloadvi8 addrmode5:$addr)) // (EXTRACT_SUBREG (VMOVLuv4i32 @@ -5644,28 +5645,63 @@ multiclass Lengthen_HalfSingle<string DestLanes, string DestTy, string SrcTy, // qsub_0)>; multiclass Lengthen_Double<string DestLanes, string DestTy, string SrcTy, string Insn1Lanes, string Insn1Ty, string Insn2Lanes, - string Insn2Ty, SubRegIndex RegType> { + string Insn2Ty> { + def _Any : Pat<(!cast<ValueType>("v" # DestLanes # DestTy) + (!cast<PatFrag>("extloadv" # SrcTy) addrmode5:$addr)), + (!cast<Instruction>("VMOVLuv" # Insn2Lanes # Insn2Ty) + (EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn1Lanes # Insn1Ty) + (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr), + ssub_0)), dsub_0))>; + def _Z : Pat<(!cast<ValueType>("v" # DestLanes # DestTy) + (!cast<PatFrag>("zextloadv" # SrcTy) addrmode5:$addr)), + (!cast<Instruction>("VMOVLuv" # Insn2Lanes # Insn2Ty) + (EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn1Lanes # Insn1Ty) + (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr), + ssub_0)), dsub_0))>; + def _S : Pat<(!cast<ValueType>("v" # DestLanes # DestTy) + (!cast<PatFrag>("sextloadv" # SrcTy) addrmode5:$addr)), + (!cast<Instruction>("VMOVLsv" # Insn2Lanes # Insn2Ty) + (EXTRACT_SUBREG (!cast<Instruction>("VMOVLsv" # Insn1Lanes # Insn1Ty) + (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr), + ssub_0)), dsub_0))>; +} + +// extload, zextload and sextload for a lengthening load followed by another +// lengthening load, to quadruple the initial length, but which ends up only +// requiring half the available lanes (a 64-bit outcome instead of a 128-bit). +// +// Lengthen_HalfDouble<"2", "i32", "i8", "8", "i16", "4", "i32"> = +// Pat<(v4i32 (extloadvi8 addrmode5:$addr)) +// (EXTRACT_SUBREG (VMOVLuv4i32 +// (EXTRACT_SUBREG (VMOVLuv8i16 (INSERT_SUBREG (f64 (IMPLICIT_DEF)), +// (VLDRS addrmode5:$addr), +// ssub_0)), +// dsub_0)), +// dsub_0)>; +multiclass Lengthen_HalfDouble<string DestLanes, string DestTy, string SrcTy, + string Insn1Lanes, string Insn1Ty, string Insn2Lanes, + string Insn2Ty> { def _Any : Pat<(!cast<ValueType>("v" # DestLanes # DestTy) (!cast<PatFrag>("extloadv" # SrcTy) addrmode5:$addr)), (EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn2Lanes # Insn2Ty) (EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn1Lanes # Insn1Ty) (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr), ssub_0)), dsub_0)), - RegType)>; + dsub_0)>; def _Z : Pat<(!cast<ValueType>("v" # DestLanes # DestTy) (!cast<PatFrag>("zextloadv" # SrcTy) addrmode5:$addr)), (EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn2Lanes # Insn2Ty) (EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn1Lanes # Insn1Ty) (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr), ssub_0)), dsub_0)), - RegType)>; + dsub_0)>; def _S : Pat<(!cast<ValueType>("v" # DestLanes # DestTy) (!cast<PatFrag>("sextloadv" # SrcTy) addrmode5:$addr)), (EXTRACT_SUBREG (!cast<Instruction>("VMOVLsv" # Insn2Lanes # Insn2Ty) (EXTRACT_SUBREG (!cast<Instruction>("VMOVLsv" # Insn1Lanes # Insn1Ty) (INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr), ssub_0)), dsub_0)), - RegType)>; + dsub_0)>; } defm : Lengthen_Single<"8", "i16", "i8">; // v8i8 -> v8i16 @@ -5676,12 +5712,12 @@ defm : Lengthen_HalfSingle<"4", "i16", "i8", "8", "i16">; // v4i8 -> v4i16 defm : Lengthen_HalfSingle<"2", "i16", "i8", "8", "i16">; // v2i8 -> v2i16 defm : Lengthen_HalfSingle<"2", "i32", "i16", "4", "i32">; // v2i16 -> v2i32 -// Double lengthening - v4i8 -> v4i16 -> v4i32 -defm : Lengthen_Double<"4", "i32", "i8", "8", "i16", "4", "i32", qsub_0>; +// Double lengthening - v4i8 -> v4i16 -> v4i32 +defm : Lengthen_Double<"4", "i32", "i8", "8", "i16", "4", "i32">; // v2i8 -> v2i16 -> v2i32 -defm : Lengthen_Double<"2", "i32", "i8", "8", "i16", "4", "i32", dsub_0>; +defm : Lengthen_HalfDouble<"2", "i32", "i8", "8", "i16", "4", "i32">; // v2i16 -> v2i32 -> v2i64 -defm : Lengthen_Double<"2", "i64", "i16", "4", "i32", "2", "i64", qsub_0>; +defm : Lengthen_Double<"2", "i64", "i16", "4", "i32", "2", "i64">; // Triple lengthening - v2i8 -> v2i16 -> v2i32 -> v2i64 def : Pat<(v2i64 (extloadvi8 addrmode5:$addr)), @@ -5951,7 +5987,7 @@ def : NEONInstAlias<"vshl${p}.u32 $Vdn, $Vm", def : NEONInstAlias<"vshl${p}.u64 $Vdn, $Vm", (VSHLuv2i64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>; -// VSHL (immediate) two-operand aliases. +// VSHR (immediate) two-operand aliases. def : NEONInstAlias<"vshr${p}.s8 $Vdn, $imm", (VSHRsv8i8 DPR:$Vdn, DPR:$Vdn, shr_imm8:$imm, pred:$p)>; def : NEONInstAlias<"vshr${p}.s16 $Vdn, $imm", @@ -5988,6 +6024,41 @@ def : NEONInstAlias<"vshr${p}.u32 $Vdn, $imm", def : NEONInstAlias<"vshr${p}.u64 $Vdn, $imm", (VSHRuv2i64 QPR:$Vdn, QPR:$Vdn, shr_imm64:$imm, pred:$p)>; +// VRSHL two-operand aliases. +def : NEONInstAlias<"vrshl${p}.s8 $Vdn, $Vm", + (VRSHLsv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vrshl${p}.s16 $Vdn, $Vm", + (VRSHLsv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vrshl${p}.s32 $Vdn, $Vm", + (VRSHLsv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vrshl${p}.s64 $Vdn, $Vm", + (VRSHLsv1i64 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vrshl${p}.u8 $Vdn, $Vm", + (VRSHLuv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vrshl${p}.u16 $Vdn, $Vm", + (VRSHLuv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vrshl${p}.u32 $Vdn, $Vm", + (VRSHLuv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vrshl${p}.u64 $Vdn, $Vm", + (VRSHLuv1i64 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>; + +def : NEONInstAlias<"vrshl${p}.s8 $Vdn, $Vm", + (VRSHLsv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vrshl${p}.s16 $Vdn, $Vm", + (VRSHLsv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vrshl${p}.s32 $Vdn, $Vm", + (VRSHLsv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vrshl${p}.s64 $Vdn, $Vm", + (VRSHLsv2i64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vrshl${p}.u8 $Vdn, $Vm", + (VRSHLuv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vrshl${p}.u16 $Vdn, $Vm", + (VRSHLuv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vrshl${p}.u32 $Vdn, $Vm", + (VRSHLuv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vrshl${p}.u64 $Vdn, $Vm", + (VRSHLuv2i64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>; + // VLD1 single-lane pseudo-instructions. These need special handling for // the lane index that an InstAlias can't handle, so we use these instead. def VLD1LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vld1${p}", ".8", "$list, $addr", @@ -6951,6 +7022,100 @@ def : NEONInstAlias<"vsli${p}.32 $Vdm, $imm", def : NEONInstAlias<"vsli${p}.64 $Vdm, $imm", (VSLIv2i64 QPR:$Vdm, QPR:$Vdm, shr_imm64:$imm, pred:$p)>; +// Two-operand variants for VHSUB. + // Signed. +def : NEONInstAlias<"vhsub${p}.s8 $Vdn, $Vm", + (VHSUBsv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vhsub${p}.s16 $Vdn, $Vm", + (VHSUBsv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vhsub${p}.s32 $Vdn, $Vm", + (VHSUBsv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>; + +def : NEONInstAlias<"vhsub${p}.s8 $Vdn, $Vm", + (VHSUBsv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vhsub${p}.s16 $Vdn, $Vm", + (VHSUBsv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vhsub${p}.s32 $Vdn, $Vm", + (VHSUBsv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>; + + // Unsigned. +def : NEONInstAlias<"vhsub${p}.u8 $Vdn, $Vm", + (VHSUBuv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vhsub${p}.u16 $Vdn, $Vm", + (VHSUBuv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vhsub${p}.u32 $Vdn, $Vm", + (VHSUBuv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>; + +def : NEONInstAlias<"vhsub${p}.u8 $Vdn, $Vm", + (VHSUBuv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vhsub${p}.u16 $Vdn, $Vm", + (VHSUBuv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vhsub${p}.u32 $Vdn, $Vm", + (VHSUBuv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>; + + +// Two-operand variants for VHADD. + // Signed. +def : NEONInstAlias<"vhadd${p}.s8 $Vdn, $Vm", + (VHADDsv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vhadd${p}.s16 $Vdn, $Vm", + (VHADDsv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vhadd${p}.s32 $Vdn, $Vm", + (VHADDsv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>; + +def : NEONInstAlias<"vhadd${p}.s8 $Vdn, $Vm", + (VHADDsv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vhadd${p}.s16 $Vdn, $Vm", + (VHADDsv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vhadd${p}.s32 $Vdn, $Vm", + (VHADDsv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>; + + // Unsigned. +def : NEONInstAlias<"vhadd${p}.u8 $Vdn, $Vm", + (VHADDuv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vhadd${p}.u16 $Vdn, $Vm", + (VHADDuv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vhadd${p}.u32 $Vdn, $Vm", + (VHADDuv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>; + +def : NEONInstAlias<"vhadd${p}.u8 $Vdn, $Vm", + (VHADDuv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vhadd${p}.u16 $Vdn, $Vm", + (VHADDuv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>; +def : NEONInstAlias<"vhadd${p}.u32 $Vdn, $Vm", + (VHADDuv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>; + +// Two-operand variants for VRHADD. + // Signed. +def : NEONInstAlias<"vrhadd${p}.s8 $Vdn, $Rm", + (VRHADDsv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Rm, pred:$p)>; +def : NEONInstAlias<"vrhadd${p}.s16 $Vdn, $Rm", + (VRHADDsv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Rm, pred:$p)>; +def : NEONInstAlias<"vrhadd${p}.s32 $Vdn, $Rm", + (VRHADDsv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Rm, pred:$p)>; + +def : NEONInstAlias<"vrhadd${p}.s8 $Vdn, $Rm", + (VRHADDsv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Rm, pred:$p)>; +def : NEONInstAlias<"vrhadd${p}.s16 $Vdn, $Rm", + (VRHADDsv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Rm, pred:$p)>; +def : NEONInstAlias<"vrhadd${p}.s32 $Vdn, $Rm", + (VRHADDsv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Rm, pred:$p)>; + + // Unsigned. +def : NEONInstAlias<"vrhadd${p}.u8 $Vdn, $Rm", + (VRHADDuv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Rm, pred:$p)>; +def : NEONInstAlias<"vrhadd${p}.u16 $Vdn, $Rm", + (VRHADDuv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Rm, pred:$p)>; +def : NEONInstAlias<"vrhadd${p}.u32 $Vdn, $Rm", + (VRHADDuv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Rm, pred:$p)>; + +def : NEONInstAlias<"vrhadd${p}.u8 $Vdn, $Rm", + (VRHADDuv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Rm, pred:$p)>; +def : NEONInstAlias<"vrhadd${p}.u16 $Vdn, $Rm", + (VRHADDuv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Rm, pred:$p)>; +def : NEONInstAlias<"vrhadd${p}.u32 $Vdn, $Rm", + (VRHADDuv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Rm, pred:$p)>; + // VSWP allows, but does not require, a type suffix. defm : NEONDTAnyInstAlias<"vswp${p}", "$Vd, $Vm", (VSWPd DPR:$Vd, DPR:$Vm, pred:$p)>; diff --git a/lib/Target/ARM/ARMTargetMachine.cpp b/lib/Target/ARM/ARMTargetMachine.cpp index 047efc23a4ea..9aa83089202d 100644 --- a/lib/Target/ARM/ARMTargetMachine.cpp +++ b/lib/Target/ARM/ARMTargetMachine.cpp @@ -136,22 +136,22 @@ TargetPassConfig *ARMBaseTargetMachine::createPassConfig(PassManagerBase &PM) { bool ARMPassConfig::addPreISel() { if (TM->getOptLevel() != CodeGenOpt::None && EnableGlobalMerge) - PM.add(createGlobalMergePass(TM->getTargetLowering())); + PM->add(createGlobalMergePass(TM->getTargetLowering())); return false; } bool ARMPassConfig::addInstSelector() { - PM.add(createARMISelDag(getARMTargetMachine(), getOptLevel())); + PM->add(createARMISelDag(getARMTargetMachine(), getOptLevel())); return false; } bool ARMPassConfig::addPreRegAlloc() { // FIXME: temporarily disabling load / store optimization pass for Thumb1. if (getOptLevel() != CodeGenOpt::None && !getARMSubtarget().isThumb1Only()) - PM.add(createARMLoadStoreOptimizationPass(true)); + PM->add(createARMLoadStoreOptimizationPass(true)); if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isCortexA9()) - PM.add(createMLxExpansionPass()); + PM->add(createMLxExpansionPass()); return true; } @@ -159,23 +159,23 @@ bool ARMPassConfig::addPreSched2() { // FIXME: temporarily disabling load / store optimization pass for Thumb1. if (getOptLevel() != CodeGenOpt::None) { if (!getARMSubtarget().isThumb1Only()) { - PM.add(createARMLoadStoreOptimizationPass()); + PM->add(createARMLoadStoreOptimizationPass()); printAndVerify("After ARM load / store optimizer"); } if (getARMSubtarget().hasNEON()) - PM.add(createExecutionDependencyFixPass(&ARM::DPRRegClass)); + PM->add(createExecutionDependencyFixPass(&ARM::DPRRegClass)); } // Expand some pseudo instructions into multiple instructions to allow // proper scheduling. - PM.add(createARMExpandPseudoPass()); + PM->add(createARMExpandPseudoPass()); if (getOptLevel() != CodeGenOpt::None) { if (!getARMSubtarget().isThumb1Only()) addPass(IfConverterID); } if (getARMSubtarget().isThumb2()) - PM.add(createThumb2ITBlockPass()); + PM->add(createThumb2ITBlockPass()); return true; } @@ -183,13 +183,13 @@ bool ARMPassConfig::addPreSched2() { bool ARMPassConfig::addPreEmitPass() { if (getARMSubtarget().isThumb2()) { if (!getARMSubtarget().prefers32BitThumb()) - PM.add(createThumb2SizeReductionPass()); + PM->add(createThumb2SizeReductionPass()); // Constant island pass work on unbundled instructions. addPass(UnpackMachineBundlesID); } - PM.add(createARMConstantIslandPass()); + PM->add(createARMConstantIslandPass()); return true; } diff --git a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp index e55a7dad45db..2c53e3f8f8cd 100644 --- a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -82,8 +82,14 @@ class ARMAsmParser : public MCTargetAsmParser { MCAsmParser &getParser() const { return Parser; } MCAsmLexer &getLexer() const { return Parser.getLexer(); } - void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); } - bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); } + bool Warning(SMLoc L, const Twine &Msg, + ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) { + return Parser.Warning(L, Msg, Ranges); + } + bool Error(SMLoc L, const Twine &Msg, + ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) { + return Parser.Error(L, Msg, Ranges); + } int tryParseRegister(); bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &); @@ -478,6 +484,8 @@ public: /// getEndLoc - Get the location of the last token of this operand. SMLoc getEndLoc() const { return EndLoc; } + SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); } + ARMCC::CondCodes getCondCode() const { assert(Kind == k_CondCode && "Invalid access!"); return CC.Val; @@ -4518,22 +4526,26 @@ bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, case AsmToken::Dollar: case AsmToken::Hash: { // #42 -> immediate. - // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate S = Parser.getTok().getLoc(); Parser.Lex(); - bool isNegative = Parser.getTok().is(AsmToken::Minus); - const MCExpr *ImmVal; - if (getParser().ParseExpression(ImmVal)) - return true; - const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); - if (CE) { - int32_t Val = CE->getValue(); - if (isNegative && Val == 0) - ImmVal = MCConstantExpr::Create(INT32_MIN, getContext()); + + if (Parser.getTok().isNot(AsmToken::Colon)) { + bool isNegative = Parser.getTok().is(AsmToken::Minus); + const MCExpr *ImmVal; + if (getParser().ParseExpression(ImmVal)) + return true; + const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); + if (CE) { + int32_t Val = CE->getValue(); + if (isNegative && Val == 0) + ImmVal = MCConstantExpr::Create(INT32_MIN, getContext()); + } + E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); + Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); + return false; } - E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); - Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); - return false; + // w/ a ':' after the '#', it's just like a plain ':'. + // FALLTHROUGH } case AsmToken::Colon: { // ":lower16:" and ":upper16:" expression prefixes @@ -7321,7 +7333,8 @@ MatchAndEmitInstruction(SMLoc IDLoc, return Error(ErrorLoc, "invalid operand for instruction"); } case Match_MnemonicFail: - return Error(IDLoc, "invalid instruction"); + return Error(IDLoc, "invalid instruction", + ((ARMOperand*)Operands[0])->getLocRange()); case Match_ConversionFail: // The converter function will have already emited a diagnostic. return true; diff --git a/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/lib/Target/ARM/Disassembler/ARMDisassembler.cpp index 2f504b756b1b..912935db17ac 100644 --- a/lib/Target/ARM/Disassembler/ARMDisassembler.cpp +++ b/lib/Target/ARM/Disassembler/ARMDisassembler.cpp @@ -326,6 +326,8 @@ static DecodeStatus DecodeT2ShifterImmOperand(MCInst &Inst, unsigned Val, static DecodeStatus DecodeLDR(MCInst &Inst, unsigned Val, uint64_t Address, const void *Decoder); +static DecodeStatus DecodeMRRC2(llvm::MCInst &Inst, unsigned Val, + uint64_t Address, const void *Decoder); #include "ARMGenDisassemblerTables.inc" #include "ARMGenInstrInfo.inc" #include "ARMGenEDInfo.inc" @@ -2690,7 +2692,6 @@ static DecodeStatus DecodeVLD2DupInstruction(MCInst &Inst, unsigned Insn, unsigned Rm = fieldFromInstruction32(Insn, 0, 4); unsigned align = fieldFromInstruction32(Insn, 4, 1); unsigned size = 1 << fieldFromInstruction32(Insn, 6, 2); - unsigned pred = fieldFromInstruction32(Insn, 22, 4); align *= 2*size; switch (Inst.getOpcode()) { @@ -2721,16 +2722,11 @@ static DecodeStatus DecodeVLD2DupInstruction(MCInst &Inst, unsigned Insn, return MCDisassembler::Fail; Inst.addOperand(MCOperand::CreateImm(align)); - if (Rm == 0xD) - Inst.addOperand(MCOperand::CreateReg(0)); - else if (Rm != 0xF) { + if (Rm != 0xD && Rm != 0xF) { if (!Check(S, DecodeGPRRegisterClass(Inst, Rm, Address, Decoder))) return MCDisassembler::Fail; } - if (!Check(S, DecodePredicateOperand(Inst, pred, Address, Decoder))) - return MCDisassembler::Fail; - return S; } @@ -4314,6 +4310,10 @@ static DecodeStatus DecodeSwap(MCInst &Inst, unsigned Insn, return DecodeCPSInstruction(Inst, Insn, Address, Decoder); DecodeStatus S = MCDisassembler::Success; + + if (Rt == Rn || Rn == Rt2) + S = MCDisassembler::SoftFail; + if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rt, Address, Decoder))) return MCDisassembler::Fail; if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rt2, Address, Decoder))) @@ -4409,3 +4409,31 @@ static DecodeStatus DecodeLDR(MCInst &Inst, unsigned Val, return S; } +static DecodeStatus DecodeMRRC2(llvm::MCInst &Inst, unsigned Val, + uint64_t Address, const void *Decoder) { + + DecodeStatus S = MCDisassembler::Success; + + unsigned CRm = fieldFromInstruction32(Val, 0, 4); + unsigned opc1 = fieldFromInstruction32(Val, 4, 4); + unsigned cop = fieldFromInstruction32(Val, 8, 4); + unsigned Rt = fieldFromInstruction32(Val, 12, 4); + unsigned Rt2 = fieldFromInstruction32(Val, 16, 4); + + if ((cop & ~0x1) == 0xa) + return MCDisassembler::Fail; + + if (Rt == Rt2) + S = MCDisassembler::SoftFail; + + Inst.addOperand(MCOperand::CreateImm(cop)); + Inst.addOperand(MCOperand::CreateImm(opc1)); + if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rt, Address, Decoder))) + return MCDisassembler::Fail; + if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rt2, Address, Decoder))) + return MCDisassembler::Fail; + Inst.addOperand(MCOperand::CreateImm(CRm)); + + return S; +} + diff --git a/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp b/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp index b3eeafe08314..cbd81c11a45b 100644 --- a/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp +++ b/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp @@ -209,12 +209,12 @@ void ARMInstPrinter::printOperand(const MCInst *MI, unsigned OpNo, } else { assert(Op.isExpr() && "unknown operand kind in printOperand"); // If a symbolic branch target was added as a constant expression then print - // that address in hex. + // that address in hex. And only print 32 unsigned bits for the address. const MCConstantExpr *BranchTarget = dyn_cast<MCConstantExpr>(Op.getExpr()); int64_t Address; if (BranchTarget && BranchTarget->EvaluateAsAbsolute(Address)) { O << "0x"; - O.write_hex(Address); + O.write_hex((uint32_t)Address); } else { // Otherwise, just print the expression. diff --git a/lib/Target/CellSPU/SPUCallingConv.td b/lib/Target/CellSPU/SPUCallingConv.td index 9f9692bf67fe..9bc6be79860b 100644 --- a/lib/Target/CellSPU/SPUCallingConv.td +++ b/lib/Target/CellSPU/SPUCallingConv.td @@ -11,10 +11,6 @@ // //===----------------------------------------------------------------------===// -/// CCIfSubtarget - Match if the current subtarget has a feature F. -class CCIfSubtarget<string F, CCAction A> - : CCIf<!strconcat("State.getTarget().getSubtarget<PPCSubtarget>().", F), A>; - //===----------------------------------------------------------------------===// // Return Value Calling Convention //===----------------------------------------------------------------------===// diff --git a/lib/Target/CellSPU/SPUTargetMachine.cpp b/lib/Target/CellSPU/SPUTargetMachine.cpp index 21f6b25bf256..3b90261fe690 100644 --- a/lib/Target/CellSPU/SPUTargetMachine.cpp +++ b/lib/Target/CellSPU/SPUTargetMachine.cpp @@ -72,7 +72,7 @@ TargetPassConfig *SPUTargetMachine::createPassConfig(PassManagerBase &PM) { bool SPUPassConfig::addInstSelector() { // Install an instruction selector. - PM.add(createSPUISelDag(getSPUTargetMachine())); + PM->add(createSPUISelDag(getSPUTargetMachine())); return false; } @@ -85,9 +85,9 @@ bool SPUPassConfig::addPreEmitPass() { (BuilderFunc)(intptr_t)sys::DynamicLibrary::SearchForAddressOfSymbol( "createTCESchedulerPass"); if (schedulerCreator != NULL) - PM.add(schedulerCreator("cellspu")); + PM->add(schedulerCreator("cellspu")); //align instructions with nops/lnops for dual issue - PM.add(createSPUNopFillerPass(getSPUTargetMachine())); + PM->add(createSPUNopFillerPass(getSPUTargetMachine())); return true; } diff --git a/lib/Target/Hexagon/CMakeLists.txt b/lib/Target/Hexagon/CMakeLists.txt index 8a49cd8105a7..af9e8136bf16 100644 --- a/lib/Target/Hexagon/CMakeLists.txt +++ b/lib/Target/Hexagon/CMakeLists.txt @@ -11,15 +11,15 @@ add_public_tablegen_target(HexagonCommonTableGen) add_llvm_target(HexagonCodeGen HexagonAsmPrinter.cpp - HexagonCFGOptimizer.cpp HexagonCallingConvLower.cpp + HexagonCFGOptimizer.cpp HexagonExpandPredSpillCode.cpp HexagonFrameLowering.cpp HexagonHardwareLoops.cpp + HexagonMCInstLower.cpp + HexagonInstrInfo.cpp HexagonISelDAGToDAG.cpp HexagonISelLowering.cpp - HexagonInstrInfo.cpp - HexagonMCInstLower.cpp HexagonPeephole.cpp HexagonRegisterInfo.cpp HexagonRemoveSZExtArgs.cpp @@ -28,7 +28,6 @@ add_llvm_target(HexagonCodeGen HexagonSubtarget.cpp HexagonTargetMachine.cpp HexagonTargetObjectFile.cpp - HexagonVLIWPacketizer.cpp ) add_subdirectory(TargetInfo) diff --git a/lib/Target/Hexagon/Hexagon.h b/lib/Target/Hexagon/Hexagon.h index 43858b9624f1..080832333671 100644 --- a/lib/Target/Hexagon/Hexagon.h +++ b/lib/Target/Hexagon/Hexagon.h @@ -40,7 +40,6 @@ namespace llvm { FunctionPass *createHexagonHardwareLoops(); FunctionPass *createHexagonPeephole(); FunctionPass *createHexagonFixupHwLoops(); - FunctionPass *createHexagonPacketizer(); /* TODO: object output. MCCodeEmitter *createHexagonMCCodeEmitter(const Target &, diff --git a/lib/Target/Hexagon/HexagonAsmPrinter.cpp b/lib/Target/Hexagon/HexagonAsmPrinter.cpp index 2cc8b814a022..39bf45d2d773 100644 --- a/lib/Target/Hexagon/HexagonAsmPrinter.cpp +++ b/lib/Target/Hexagon/HexagonAsmPrinter.cpp @@ -13,11 +13,11 @@ // //===----------------------------------------------------------------------===// + #define DEBUG_TYPE "asm-printer" #include "Hexagon.h" #include "HexagonAsmPrinter.h" #include "HexagonMachineFunctionInfo.h" -#include "HexagonMCInst.h" #include "HexagonTargetMachine.h" #include "HexagonSubtarget.h" #include "InstPrinter/HexagonInstPrinter.h" @@ -54,7 +54,6 @@ #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringExtras.h" -#include <map> using namespace llvm; @@ -78,7 +77,8 @@ void HexagonAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo, const MachineOperand &MO = MI->getOperand(OpNo); switch (MO.getType()) { - default: llvm_unreachable("<unknown operand type>"); + default: + assert(0 && "<unknown operand type>"); case MachineOperand::MO_Register: O << HexagonInstPrinter::getRegisterName(MO.getReg()); return; @@ -196,45 +196,10 @@ void HexagonAsmPrinter::printPredicateOperand(const MachineInstr *MI, /// the current output stream. /// void HexagonAsmPrinter::EmitInstruction(const MachineInstr *MI) { - if (MI->isBundle()) { - std::vector<const MachineInstr*> BundleMIs; - - const MachineBasicBlock *MBB = MI->getParent(); - MachineBasicBlock::const_instr_iterator MII = MI; - ++MII; - unsigned int IgnoreCount = 0; - while (MII != MBB->end() && MII->isInsideBundle()) { - const MachineInstr *MInst = MII; - if (MInst->getOpcode() == TargetOpcode::DBG_VALUE || - MInst->getOpcode() == TargetOpcode::IMPLICIT_DEF) { - IgnoreCount++; - ++MII; - continue; - } - //BundleMIs.push_back(&*MII); - BundleMIs.push_back(MInst); - ++MII; - } - unsigned Size = BundleMIs.size(); - assert((Size+IgnoreCount) == MI->getBundleSize() && "Corrupt Bundle!"); - for (unsigned Index = 0; Index < Size; Index++) { - HexagonMCInst MCI; - MCI.setStartPacket(Index == 0); - MCI.setEndPacket(Index == (Size-1)); - - HexagonLowerToMC(BundleMIs[Index], MCI, *this); - OutStreamer.EmitInstruction(MCI); - } - } - else { - HexagonMCInst MCI; - if (MI->getOpcode() == Hexagon::ENDLOOP0) { - MCI.setStartPacket(true); - MCI.setEndPacket(true); - } - HexagonLowerToMC(MI, MCI, *this); - OutStreamer.EmitInstruction(MCI); - } + MCInst MCI; + + HexagonLowerToMC(MI, MCI, *this); + OutStreamer.EmitInstruction(MCI); return; } @@ -277,17 +242,17 @@ void HexagonAsmPrinter::printJumpTable(const MachineInstr *MI, int OpNo, raw_ostream &O) { const MachineOperand &MO = MI->getOperand(OpNo); assert( (MO.getType() == MachineOperand::MO_JumpTableIndex) && - "Expecting jump table index"); + "Expecting jump table index"); // Hexagon_TODO: Do we need name mangling? O << *GetJTISymbol(MO.getIndex()); } void HexagonAsmPrinter::printConstantPool(const MachineInstr *MI, int OpNo, - raw_ostream &O) { + raw_ostream &O) { const MachineOperand &MO = MI->getOperand(OpNo); assert( (MO.getType() == MachineOperand::MO_ConstantPoolIndex) && - "Expecting constant pool index"); + "Expecting constant pool index"); // Hexagon_TODO: Do we need name mangling? O << *GetCPISymbol(MO.getIndex()); diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp index d6da0d0911b9..8c4350d1c504 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -32,9 +32,11 @@ #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/SelectionDAGISel.h" #include "llvm/CodeGen/ValueTypes.h" +#include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/CommandLine.h" +#include "llvm/Support/raw_ostream.h" + using namespace llvm; const unsigned Hexagon_MAX_RET_SIZE = 64; diff --git a/lib/Target/Hexagon/HexagonInstrFormats.td b/lib/Target/Hexagon/HexagonInstrFormats.td index 48f0f01bb4cf..c9f16fb538aa 100644 --- a/lib/Target/Hexagon/HexagonInstrFormats.td +++ b/lib/Target/Hexagon/HexagonInstrFormats.td @@ -13,26 +13,13 @@ // *** Must match HexagonBaseInfo.h *** //===----------------------------------------------------------------------===// -class Type<bits<5> t> { - bits<5> Value = t; -} -def TypePSEUDO : Type<0>; -def TypeALU32 : Type<1>; -def TypeCR : Type<2>; -def TypeJR : Type<3>; -def TypeJ : Type<4>; -def TypeLD : Type<5>; -def TypeST : Type<6>; -def TypeSYSTEM : Type<7>; -def TypeXTYPE : Type<8>; -def TypeMARKER : Type<31>; //===----------------------------------------------------------------------===// // Intruction Class Declaration + //===----------------------------------------------------------------------===// class InstHexagon<dag outs, dag ins, string asmstr, list<dag> pattern, - string cstr, InstrItinClass itin, Type type> : Instruction { + string cstr, InstrItinClass itin> : Instruction { field bits<32> Inst; let Namespace = "Hexagon"; @@ -44,15 +31,11 @@ class InstHexagon<dag outs, dag ins, string asmstr, list<dag> pattern, let Constraints = cstr; let Itinerary = itin; - // *** Must match HexagonBaseInfo.h *** - Type HexagonType = type; - let TSFlags{4-0} = HexagonType.Value; - bits<1> isHexagonSolo = 0; - let TSFlags{5} = isHexagonSolo; + // *** The code below must match HexagonBaseInfo.h *** // Predicated instructions. bits<1> isPredicated = 0; - let TSFlags{6} = isPredicated; + let TSFlags{1} = isPredicated; // *** The code above must match HexagonBaseInfo.h *** } @@ -64,40 +47,28 @@ class InstHexagon<dag outs, dag ins, string asmstr, list<dag> pattern, // LD Instruction Class in V2/V3/V4. // Definition of the instruction class NOT CHANGED. class LDInst<dag outs, dag ins, string asmstr, list<dag> pattern> - : InstHexagon<outs, ins, asmstr, pattern, "", LD, TypeLD> { + : InstHexagon<outs, ins, asmstr, pattern, "", LD> { bits<5> rd; bits<5> rs; bits<13> imm13; - let mayLoad = 1; } // LD Instruction Class in V2/V3/V4. // Definition of the instruction class NOT CHANGED. class LDInstPost<dag outs, dag ins, string asmstr, list<dag> pattern, string cstr> - : InstHexagon<outs, ins, asmstr, pattern, cstr, LD, TypeLD> { + : InstHexagon<outs, ins, asmstr, pattern, cstr, LD> { bits<5> rd; bits<5> rs; bits<5> rt; bits<13> imm13; - let mayLoad = 1; } // ST Instruction Class in V2/V3 can take SLOT0 only. // ST Instruction Class in V4 can take SLOT0 & SLOT1. // Definition of the instruction class CHANGED from V2/V3 to V4. class STInst<dag outs, dag ins, string asmstr, list<dag> pattern> - : InstHexagon<outs, ins, asmstr, pattern, "", ST, TypeST> { - bits<5> rd; - bits<5> rs; - bits<13> imm13; - let mayStore = 1; -} - -// SYSTEM Instruction Class in V4 can take SLOT0 only -// In V2/V3 we used ST for this but in v4 ST can take SLOT0 or SLOT1. -class SYSInst<dag outs, dag ins, string asmstr, list<dag> pattern> - : InstHexagon<outs, ins, asmstr, pattern, "", SYS, TypeSYSTEM> { + : InstHexagon<outs, ins, asmstr, pattern, "", ST> { bits<5> rd; bits<5> rs; bits<13> imm13; @@ -108,18 +79,17 @@ class SYSInst<dag outs, dag ins, string asmstr, list<dag> pattern> // Definition of the instruction class CHANGED from V2/V3 to V4. class STInstPost<dag outs, dag ins, string asmstr, list<dag> pattern, string cstr> - : InstHexagon<outs, ins, asmstr, pattern, cstr, ST, TypeST> { + : InstHexagon<outs, ins, asmstr, pattern, cstr, ST> { bits<5> rd; bits<5> rs; bits<5> rt; bits<13> imm13; - let mayStore = 1; } // ALU32 Instruction Class in V2/V3/V4. // Definition of the instruction class NOT CHANGED. class ALU32Type<dag outs, dag ins, string asmstr, list<dag> pattern> - : InstHexagon<outs, ins, asmstr, pattern, "", ALU32, TypeALU32> { + : InstHexagon<outs, ins, asmstr, pattern, "", ALU32> { bits<5> rd; bits<5> rs; bits<5> rt; @@ -132,17 +102,7 @@ class ALU32Type<dag outs, dag ins, string asmstr, list<dag> pattern> // Definition of the instruction class NOT CHANGED. // Name of the Instruction Class changed from ALU64 to XTYPE from V2/V3 to V4. class ALU64Type<dag outs, dag ins, string asmstr, list<dag> pattern> - : InstHexagon<outs, ins, asmstr, pattern, "", ALU64, TypeXTYPE> { - bits<5> rd; - bits<5> rs; - bits<5> rt; - bits<16> imm16; - bits<16> imm16_2; -} - -class ALU64_acc<dag outs, dag ins, string asmstr, list<dag> pattern, - string cstr> - : InstHexagon<outs, ins, asmstr, pattern, cstr, ALU64, TypeXTYPE> { + : InstHexagon<outs, ins, asmstr, pattern, "", ALU64> { bits<5> rd; bits<5> rs; bits<5> rt; @@ -155,7 +115,7 @@ class ALU64_acc<dag outs, dag ins, string asmstr, list<dag> pattern, // Definition of the instruction class NOT CHANGED. // Name of the Instruction Class changed from M to XTYPE from V2/V3 to V4. class MInst<dag outs, dag ins, string asmstr, list<dag> pattern> - : InstHexagon<outs, ins, asmstr, pattern, "", M, TypeXTYPE> { + : InstHexagon<outs, ins, asmstr, pattern, "", M> { bits<5> rd; bits<5> rs; bits<5> rt; @@ -166,8 +126,8 @@ class MInst<dag outs, dag ins, string asmstr, list<dag> pattern> // Definition of the instruction class NOT CHANGED. // Name of the Instruction Class changed from M to XTYPE from V2/V3 to V4. class MInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern, - string cstr> - : InstHexagon<outs, ins, asmstr, pattern, cstr, M, TypeXTYPE> { + string cstr> + : InstHexagon<outs, ins, asmstr, pattern, cstr, M> { bits<5> rd; bits<5> rs; bits<5> rt; @@ -178,7 +138,9 @@ class MInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern, // Definition of the instruction class NOT CHANGED. // Name of the Instruction Class changed from S to XTYPE from V2/V3 to V4. class SInst<dag outs, dag ins, string asmstr, list<dag> pattern> - : InstHexagon<outs, ins, asmstr, pattern, "", S, TypeXTYPE> { +//: InstHexagon<outs, ins, asmstr, pattern, cstr, !if(V4T, XTYPE_V4, M)> { + : InstHexagon<outs, ins, asmstr, pattern, "", S> { +// : InstHexagon<outs, ins, asmstr, pattern, "", S> { bits<5> rd; bits<5> rs; bits<5> rt; @@ -189,8 +151,8 @@ class SInst<dag outs, dag ins, string asmstr, list<dag> pattern> // Definition of the instruction class NOT CHANGED. // Name of the Instruction Class changed from S to XTYPE from V2/V3 to V4. class SInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern, - string cstr> - : InstHexagon<outs, ins, asmstr, pattern, cstr, S, TypeXTYPE> { + string cstr> + : InstHexagon<outs, ins, asmstr, pattern, cstr, S> { // : InstHexagon<outs, ins, asmstr, pattern, cstr, S> { // : InstHexagon<outs, ins, asmstr, pattern, cstr, !if(V4T, XTYPE_V4, S)> { bits<5> rd; @@ -201,14 +163,14 @@ class SInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern, // J Instruction Class in V2/V3/V4. // Definition of the instruction class NOT CHANGED. class JType<dag outs, dag ins, string asmstr, list<dag> pattern> - : InstHexagon<outs, ins, asmstr, pattern, "", J, TypeJ> { + : InstHexagon<outs, ins, asmstr, pattern, "", J> { bits<16> imm16; } // JR Instruction Class in V2/V3/V4. // Definition of the instruction class NOT CHANGED. class JRType<dag outs, dag ins, string asmstr, list<dag> pattern> - : InstHexagon<outs, ins, asmstr, pattern, "", JR, TypeJR> { + : InstHexagon<outs, ins, asmstr, pattern, "", JR> { bits<5> rs; bits<5> pu; // Predicate register } @@ -216,22 +178,15 @@ class JRType<dag outs, dag ins, string asmstr, list<dag> pattern> // CR Instruction Class in V2/V3/V4. // Definition of the instruction class NOT CHANGED. class CRInst<dag outs, dag ins, string asmstr, list<dag> pattern> - : InstHexagon<outs, ins, asmstr, pattern, "", CR, TypeCR> { + : InstHexagon<outs, ins, asmstr, pattern, "", CR> { bits<5> rs; bits<10> imm10; } -class Marker<dag outs, dag ins, string asmstr, list<dag> pattern> - : InstHexagon<outs, ins, asmstr, pattern, "", MARKER, TypeMARKER> { - let isCodeGenOnly = 1; - let isPseudo = 1; -} class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern> - : InstHexagon<outs, ins, asmstr, pattern, "", PSEUDO, TypePSEUDO> { - let isCodeGenOnly = 1; - let isPseudo = 1; -} + : InstHexagon<outs, ins, asmstr, pattern, "", PSEUDO>; + //===----------------------------------------------------------------------===// // Intruction Classes Definitions - @@ -267,11 +222,6 @@ class ALU64_rr<dag outs, dag ins, string asmstr, list<dag> pattern> : ALU64Type<outs, ins, asmstr, pattern> { } -class ALU64_ri<dag outs, dag ins, string asmstr, list<dag> pattern> - : ALU64Type<outs, ins, asmstr, pattern> { - let rt{0-4} = 0; -} - // J Type Instructions. class JInst<dag outs, dag ins, string asmstr, list<dag> pattern> : JType<outs, ins, asmstr, pattern> { @@ -287,14 +237,12 @@ class JRInst<dag outs, dag ins, string asmstr, list<dag> pattern> class STInstPI<dag outs, dag ins, string asmstr, list<dag> pattern, string cstr> : STInstPost<outs, ins, asmstr, pattern, cstr> { let rt{0-4} = 0; - let mayStore = 1; } // Post increment LD Instruction. class LDInstPI<dag outs, dag ins, string asmstr, list<dag> pattern, string cstr> : LDInstPost<outs, ins, asmstr, pattern, cstr> { let rt{0-4} = 0; - let mayLoad = 1; } //===----------------------------------------------------------------------===// diff --git a/lib/Target/Hexagon/HexagonInstrFormatsV4.td b/lib/Target/Hexagon/HexagonInstrFormatsV4.td index 49741a3d1b20..bd5e4493d7c2 100644 --- a/lib/Target/Hexagon/HexagonInstrFormatsV4.td +++ b/lib/Target/Hexagon/HexagonInstrFormatsV4.td @@ -11,25 +11,11 @@ // //===----------------------------------------------------------------------===// -//----------------------------------------------------------------------------// -// Hexagon Intruction Flags + -// -// *** Must match BaseInfo.h *** -//----------------------------------------------------------------------------// - -def TypeMEMOP : Type<9>; -def TypeNV : Type<10>; -def TypePREFIX : Type<30>; - -//----------------------------------------------------------------------------// -// Intruction Classes Definitions + -//----------------------------------------------------------------------------// - // // NV type instructions. // class NVInst_V4<dag outs, dag ins, string asmstr, list<dag> pattern> - : InstHexagon<outs, ins, asmstr, pattern, "", NV_V4, TypeNV> { + : InstHexagon<outs, ins, asmstr, pattern, "", NV_V4> { bits<5> rd; bits<5> rs; bits<13> imm13; @@ -38,7 +24,7 @@ class NVInst_V4<dag outs, dag ins, string asmstr, list<dag> pattern> // Definition of Post increment new value store. class NVInstPost_V4<dag outs, dag ins, string asmstr, list<dag> pattern, string cstr> - : InstHexagon<outs, ins, asmstr, pattern, cstr, NV_V4, TypeNV> { + : InstHexagon<outs, ins, asmstr, pattern, cstr, NV_V4> { bits<5> rd; bits<5> rs; bits<5> rt; @@ -53,15 +39,8 @@ class NVInstPI_V4<dag outs, dag ins, string asmstr, list<dag> pattern, } class MEMInst_V4<dag outs, dag ins, string asmstr, list<dag> pattern> - : InstHexagon<outs, ins, asmstr, pattern, "", MEM_V4, TypeMEMOP> { + : InstHexagon<outs, ins, asmstr, pattern, "", MEM_V4> { bits<5> rd; bits<5> rs; bits<6> imm6; } - -class Immext<dag outs, dag ins, string asmstr, list<dag> pattern> - : InstHexagon<outs, ins, asmstr, pattern, "", PREFIX, TypePREFIX> { - let isCodeGenOnly = 1; - - bits<26> imm26; -} diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp index 8685ec192c7e..77b366372cf8 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -11,10 +11,10 @@ // //===----------------------------------------------------------------------===// +#include "Hexagon.h" #include "HexagonInstrInfo.h" #include "HexagonRegisterInfo.h" #include "HexagonSubtarget.h" -#include "Hexagon.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/CodeGen/DFAPacketizer.h" @@ -466,865 +466,7 @@ unsigned HexagonInstrInfo::createVR(MachineFunction* MF, MVT VT) const { return NewReg; } -bool HexagonInstrInfo::isExtendable(const MachineInstr *MI) const { - switch(MI->getOpcode()) { - // JMP_EQri - case Hexagon::JMP_EQriPt_nv_V4: - case Hexagon::JMP_EQriPnt_nv_V4: - case Hexagon::JMP_EQriNotPt_nv_V4: - case Hexagon::JMP_EQriNotPnt_nv_V4: - - // JMP_EQri - with -1 - case Hexagon::JMP_EQriPtneg_nv_V4: - case Hexagon::JMP_EQriPntneg_nv_V4: - case Hexagon::JMP_EQriNotPtneg_nv_V4: - case Hexagon::JMP_EQriNotPntneg_nv_V4: - - // JMP_EQrr - case Hexagon::JMP_EQrrPt_nv_V4: - case Hexagon::JMP_EQrrPnt_nv_V4: - case Hexagon::JMP_EQrrNotPt_nv_V4: - case Hexagon::JMP_EQrrNotPnt_nv_V4: - - // JMP_GTri - case Hexagon::JMP_GTriPt_nv_V4: - case Hexagon::JMP_GTriPnt_nv_V4: - case Hexagon::JMP_GTriNotPt_nv_V4: - case Hexagon::JMP_GTriNotPnt_nv_V4: - - // JMP_GTri - with -1 - case Hexagon::JMP_GTriPtneg_nv_V4: - case Hexagon::JMP_GTriPntneg_nv_V4: - case Hexagon::JMP_GTriNotPtneg_nv_V4: - case Hexagon::JMP_GTriNotPntneg_nv_V4: - - // JMP_GTrr - case Hexagon::JMP_GTrrPt_nv_V4: - case Hexagon::JMP_GTrrPnt_nv_V4: - case Hexagon::JMP_GTrrNotPt_nv_V4: - case Hexagon::JMP_GTrrNotPnt_nv_V4: - - // JMP_GTrrdn - case Hexagon::JMP_GTrrdnPt_nv_V4: - case Hexagon::JMP_GTrrdnPnt_nv_V4: - case Hexagon::JMP_GTrrdnNotPt_nv_V4: - case Hexagon::JMP_GTrrdnNotPnt_nv_V4: - - // JMP_GTUri - case Hexagon::JMP_GTUriPt_nv_V4: - case Hexagon::JMP_GTUriPnt_nv_V4: - case Hexagon::JMP_GTUriNotPt_nv_V4: - case Hexagon::JMP_GTUriNotPnt_nv_V4: - - // JMP_GTUrr - case Hexagon::JMP_GTUrrPt_nv_V4: - case Hexagon::JMP_GTUrrPnt_nv_V4: - case Hexagon::JMP_GTUrrNotPt_nv_V4: - case Hexagon::JMP_GTUrrNotPnt_nv_V4: - - // JMP_GTUrrdn - case Hexagon::JMP_GTUrrdnPt_nv_V4: - case Hexagon::JMP_GTUrrdnPnt_nv_V4: - case Hexagon::JMP_GTUrrdnNotPt_nv_V4: - case Hexagon::JMP_GTUrrdnNotPnt_nv_V4: - return true; - - // TFR_FI - case Hexagon::TFR_FI: - return true; - - - default: - return false; - } - return false; -} - -bool HexagonInstrInfo::isExtended(const MachineInstr *MI) const { - switch(MI->getOpcode()) { - // JMP_EQri - case Hexagon::JMP_EQriPt_ie_nv_V4: - case Hexagon::JMP_EQriPnt_ie_nv_V4: - case Hexagon::JMP_EQriNotPt_ie_nv_V4: - case Hexagon::JMP_EQriNotPnt_ie_nv_V4: - - // JMP_EQri - with -1 - case Hexagon::JMP_EQriPtneg_ie_nv_V4: - case Hexagon::JMP_EQriPntneg_ie_nv_V4: - case Hexagon::JMP_EQriNotPtneg_ie_nv_V4: - case Hexagon::JMP_EQriNotPntneg_ie_nv_V4: - - // JMP_EQrr - case Hexagon::JMP_EQrrPt_ie_nv_V4: - case Hexagon::JMP_EQrrPnt_ie_nv_V4: - case Hexagon::JMP_EQrrNotPt_ie_nv_V4: - case Hexagon::JMP_EQrrNotPnt_ie_nv_V4: - - // JMP_GTri - case Hexagon::JMP_GTriPt_ie_nv_V4: - case Hexagon::JMP_GTriPnt_ie_nv_V4: - case Hexagon::JMP_GTriNotPt_ie_nv_V4: - case Hexagon::JMP_GTriNotPnt_ie_nv_V4: - - // JMP_GTri - with -1 - case Hexagon::JMP_GTriPtneg_ie_nv_V4: - case Hexagon::JMP_GTriPntneg_ie_nv_V4: - case Hexagon::JMP_GTriNotPtneg_ie_nv_V4: - case Hexagon::JMP_GTriNotPntneg_ie_nv_V4: - - // JMP_GTrr - case Hexagon::JMP_GTrrPt_ie_nv_V4: - case Hexagon::JMP_GTrrPnt_ie_nv_V4: - case Hexagon::JMP_GTrrNotPt_ie_nv_V4: - case Hexagon::JMP_GTrrNotPnt_ie_nv_V4: - - // JMP_GTrrdn - case Hexagon::JMP_GTrrdnPt_ie_nv_V4: - case Hexagon::JMP_GTrrdnPnt_ie_nv_V4: - case Hexagon::JMP_GTrrdnNotPt_ie_nv_V4: - case Hexagon::JMP_GTrrdnNotPnt_ie_nv_V4: - - // JMP_GTUri - case Hexagon::JMP_GTUriPt_ie_nv_V4: - case Hexagon::JMP_GTUriPnt_ie_nv_V4: - case Hexagon::JMP_GTUriNotPt_ie_nv_V4: - case Hexagon::JMP_GTUriNotPnt_ie_nv_V4: - - // JMP_GTUrr - case Hexagon::JMP_GTUrrPt_ie_nv_V4: - case Hexagon::JMP_GTUrrPnt_ie_nv_V4: - case Hexagon::JMP_GTUrrNotPt_ie_nv_V4: - case Hexagon::JMP_GTUrrNotPnt_ie_nv_V4: - - // JMP_GTUrrdn - case Hexagon::JMP_GTUrrdnPt_ie_nv_V4: - case Hexagon::JMP_GTUrrdnPnt_ie_nv_V4: - case Hexagon::JMP_GTUrrdnNotPt_ie_nv_V4: - case Hexagon::JMP_GTUrrdnNotPnt_ie_nv_V4: - - // V4 absolute set addressing. - case Hexagon::LDrid_abs_setimm_V4: - case Hexagon::LDriw_abs_setimm_V4: - case Hexagon::LDrih_abs_setimm_V4: - case Hexagon::LDrib_abs_setimm_V4: - case Hexagon::LDriuh_abs_setimm_V4: - case Hexagon::LDriub_abs_setimm_V4: - - case Hexagon::STrid_abs_setimm_V4: - case Hexagon::STrib_abs_setimm_V4: - case Hexagon::STrih_abs_setimm_V4: - case Hexagon::STriw_abs_setimm_V4: - - // V4 global address load. - case Hexagon::LDrid_GP_cPt_V4 : - case Hexagon::LDrid_GP_cNotPt_V4 : - case Hexagon::LDrid_GP_cdnPt_V4 : - case Hexagon::LDrid_GP_cdnNotPt_V4 : - case Hexagon::LDrib_GP_cPt_V4 : - case Hexagon::LDrib_GP_cNotPt_V4 : - case Hexagon::LDrib_GP_cdnPt_V4 : - case Hexagon::LDrib_GP_cdnNotPt_V4 : - case Hexagon::LDriub_GP_cPt_V4 : - case Hexagon::LDriub_GP_cNotPt_V4 : - case Hexagon::LDriub_GP_cdnPt_V4 : - case Hexagon::LDriub_GP_cdnNotPt_V4 : - case Hexagon::LDrih_GP_cPt_V4 : - case Hexagon::LDrih_GP_cNotPt_V4 : - case Hexagon::LDrih_GP_cdnPt_V4 : - case Hexagon::LDrih_GP_cdnNotPt_V4 : - case Hexagon::LDriuh_GP_cPt_V4 : - case Hexagon::LDriuh_GP_cNotPt_V4 : - case Hexagon::LDriuh_GP_cdnPt_V4 : - case Hexagon::LDriuh_GP_cdnNotPt_V4 : - case Hexagon::LDriw_GP_cPt_V4 : - case Hexagon::LDriw_GP_cNotPt_V4 : - case Hexagon::LDriw_GP_cdnPt_V4 : - case Hexagon::LDriw_GP_cdnNotPt_V4 : - case Hexagon::LDd_GP_cPt_V4 : - case Hexagon::LDd_GP_cNotPt_V4 : - case Hexagon::LDd_GP_cdnPt_V4 : - case Hexagon::LDd_GP_cdnNotPt_V4 : - case Hexagon::LDb_GP_cPt_V4 : - case Hexagon::LDb_GP_cNotPt_V4 : - case Hexagon::LDb_GP_cdnPt_V4 : - case Hexagon::LDb_GP_cdnNotPt_V4 : - case Hexagon::LDub_GP_cPt_V4 : - case Hexagon::LDub_GP_cNotPt_V4 : - case Hexagon::LDub_GP_cdnPt_V4 : - case Hexagon::LDub_GP_cdnNotPt_V4 : - case Hexagon::LDh_GP_cPt_V4 : - case Hexagon::LDh_GP_cNotPt_V4 : - case Hexagon::LDh_GP_cdnPt_V4 : - case Hexagon::LDh_GP_cdnNotPt_V4 : - case Hexagon::LDuh_GP_cPt_V4 : - case Hexagon::LDuh_GP_cNotPt_V4 : - case Hexagon::LDuh_GP_cdnPt_V4 : - case Hexagon::LDuh_GP_cdnNotPt_V4 : - case Hexagon::LDw_GP_cPt_V4 : - case Hexagon::LDw_GP_cNotPt_V4 : - case Hexagon::LDw_GP_cdnPt_V4 : - case Hexagon::LDw_GP_cdnNotPt_V4 : - - // V4 global address store. - case Hexagon::STrid_GP_cPt_V4 : - case Hexagon::STrid_GP_cNotPt_V4 : - case Hexagon::STrid_GP_cdnPt_V4 : - case Hexagon::STrid_GP_cdnNotPt_V4 : - case Hexagon::STrib_GP_cPt_V4 : - case Hexagon::STrib_GP_cNotPt_V4 : - case Hexagon::STrib_GP_cdnPt_V4 : - case Hexagon::STrib_GP_cdnNotPt_V4 : - case Hexagon::STrih_GP_cPt_V4 : - case Hexagon::STrih_GP_cNotPt_V4 : - case Hexagon::STrih_GP_cdnPt_V4 : - case Hexagon::STrih_GP_cdnNotPt_V4 : - case Hexagon::STriw_GP_cPt_V4 : - case Hexagon::STriw_GP_cNotPt_V4 : - case Hexagon::STriw_GP_cdnPt_V4 : - case Hexagon::STriw_GP_cdnNotPt_V4 : - case Hexagon::STd_GP_cPt_V4 : - case Hexagon::STd_GP_cNotPt_V4 : - case Hexagon::STd_GP_cdnPt_V4 : - case Hexagon::STd_GP_cdnNotPt_V4 : - case Hexagon::STb_GP_cPt_V4 : - case Hexagon::STb_GP_cNotPt_V4 : - case Hexagon::STb_GP_cdnPt_V4 : - case Hexagon::STb_GP_cdnNotPt_V4 : - case Hexagon::STh_GP_cPt_V4 : - case Hexagon::STh_GP_cNotPt_V4 : - case Hexagon::STh_GP_cdnPt_V4 : - case Hexagon::STh_GP_cdnNotPt_V4 : - case Hexagon::STw_GP_cPt_V4 : - case Hexagon::STw_GP_cNotPt_V4 : - case Hexagon::STw_GP_cdnPt_V4 : - case Hexagon::STw_GP_cdnNotPt_V4 : - - // V4 predicated global address new value store. - case Hexagon::STrib_GP_cPt_nv_V4 : - case Hexagon::STrib_GP_cNotPt_nv_V4 : - case Hexagon::STrib_GP_cdnPt_nv_V4 : - case Hexagon::STrib_GP_cdnNotPt_nv_V4 : - case Hexagon::STrih_GP_cPt_nv_V4 : - case Hexagon::STrih_GP_cNotPt_nv_V4 : - case Hexagon::STrih_GP_cdnPt_nv_V4 : - case Hexagon::STrih_GP_cdnNotPt_nv_V4 : - case Hexagon::STriw_GP_cPt_nv_V4 : - case Hexagon::STriw_GP_cNotPt_nv_V4 : - case Hexagon::STriw_GP_cdnPt_nv_V4 : - case Hexagon::STriw_GP_cdnNotPt_nv_V4 : - case Hexagon::STb_GP_cPt_nv_V4 : - case Hexagon::STb_GP_cNotPt_nv_V4 : - case Hexagon::STb_GP_cdnPt_nv_V4 : - case Hexagon::STb_GP_cdnNotPt_nv_V4 : - case Hexagon::STh_GP_cPt_nv_V4 : - case Hexagon::STh_GP_cNotPt_nv_V4 : - case Hexagon::STh_GP_cdnPt_nv_V4 : - case Hexagon::STh_GP_cdnNotPt_nv_V4 : - case Hexagon::STw_GP_cPt_nv_V4 : - case Hexagon::STw_GP_cNotPt_nv_V4 : - case Hexagon::STw_GP_cdnPt_nv_V4 : - case Hexagon::STw_GP_cdnNotPt_nv_V4 : - - // TFR_FI - case Hexagon::TFR_FI_immext_V4: - return true; - - default: - return false; - } - return false; -} - -bool HexagonInstrInfo::isNewValueJump(const MachineInstr *MI) const { - switch (MI->getOpcode()) { - // JMP_EQri - case Hexagon::JMP_EQriPt_nv_V4: - case Hexagon::JMP_EQriPnt_nv_V4: - case Hexagon::JMP_EQriNotPt_nv_V4: - case Hexagon::JMP_EQriNotPnt_nv_V4: - case Hexagon::JMP_EQriPt_ie_nv_V4: - case Hexagon::JMP_EQriPnt_ie_nv_V4: - case Hexagon::JMP_EQriNotPt_ie_nv_V4: - case Hexagon::JMP_EQriNotPnt_ie_nv_V4: - - // JMP_EQri - with -1 - case Hexagon::JMP_EQriPtneg_nv_V4: - case Hexagon::JMP_EQriPntneg_nv_V4: - case Hexagon::JMP_EQriNotPtneg_nv_V4: - case Hexagon::JMP_EQriNotPntneg_nv_V4: - case Hexagon::JMP_EQriPtneg_ie_nv_V4: - case Hexagon::JMP_EQriPntneg_ie_nv_V4: - case Hexagon::JMP_EQriNotPtneg_ie_nv_V4: - case Hexagon::JMP_EQriNotPntneg_ie_nv_V4: - - // JMP_EQrr - case Hexagon::JMP_EQrrPt_nv_V4: - case Hexagon::JMP_EQrrPnt_nv_V4: - case Hexagon::JMP_EQrrNotPt_nv_V4: - case Hexagon::JMP_EQrrNotPnt_nv_V4: - case Hexagon::JMP_EQrrPt_ie_nv_V4: - case Hexagon::JMP_EQrrPnt_ie_nv_V4: - case Hexagon::JMP_EQrrNotPt_ie_nv_V4: - case Hexagon::JMP_EQrrNotPnt_ie_nv_V4: - - // JMP_GTri - case Hexagon::JMP_GTriPt_nv_V4: - case Hexagon::JMP_GTriPnt_nv_V4: - case Hexagon::JMP_GTriNotPt_nv_V4: - case Hexagon::JMP_GTriNotPnt_nv_V4: - case Hexagon::JMP_GTriPt_ie_nv_V4: - case Hexagon::JMP_GTriPnt_ie_nv_V4: - case Hexagon::JMP_GTriNotPt_ie_nv_V4: - case Hexagon::JMP_GTriNotPnt_ie_nv_V4: - - // JMP_GTri - with -1 - case Hexagon::JMP_GTriPtneg_nv_V4: - case Hexagon::JMP_GTriPntneg_nv_V4: - case Hexagon::JMP_GTriNotPtneg_nv_V4: - case Hexagon::JMP_GTriNotPntneg_nv_V4: - case Hexagon::JMP_GTriPtneg_ie_nv_V4: - case Hexagon::JMP_GTriPntneg_ie_nv_V4: - case Hexagon::JMP_GTriNotPtneg_ie_nv_V4: - case Hexagon::JMP_GTriNotPntneg_ie_nv_V4: - - // JMP_GTrr - case Hexagon::JMP_GTrrPt_nv_V4: - case Hexagon::JMP_GTrrPnt_nv_V4: - case Hexagon::JMP_GTrrNotPt_nv_V4: - case Hexagon::JMP_GTrrNotPnt_nv_V4: - case Hexagon::JMP_GTrrPt_ie_nv_V4: - case Hexagon::JMP_GTrrPnt_ie_nv_V4: - case Hexagon::JMP_GTrrNotPt_ie_nv_V4: - case Hexagon::JMP_GTrrNotPnt_ie_nv_V4: - - // JMP_GTrrdn - case Hexagon::JMP_GTrrdnPt_nv_V4: - case Hexagon::JMP_GTrrdnPnt_nv_V4: - case Hexagon::JMP_GTrrdnNotPt_nv_V4: - case Hexagon::JMP_GTrrdnNotPnt_nv_V4: - case Hexagon::JMP_GTrrdnPt_ie_nv_V4: - case Hexagon::JMP_GTrrdnPnt_ie_nv_V4: - case Hexagon::JMP_GTrrdnNotPt_ie_nv_V4: - case Hexagon::JMP_GTrrdnNotPnt_ie_nv_V4: - - // JMP_GTUri - case Hexagon::JMP_GTUriPt_nv_V4: - case Hexagon::JMP_GTUriPnt_nv_V4: - case Hexagon::JMP_GTUriNotPt_nv_V4: - case Hexagon::JMP_GTUriNotPnt_nv_V4: - case Hexagon::JMP_GTUriPt_ie_nv_V4: - case Hexagon::JMP_GTUriPnt_ie_nv_V4: - case Hexagon::JMP_GTUriNotPt_ie_nv_V4: - case Hexagon::JMP_GTUriNotPnt_ie_nv_V4: - - // JMP_GTUrr - case Hexagon::JMP_GTUrrPt_nv_V4: - case Hexagon::JMP_GTUrrPnt_nv_V4: - case Hexagon::JMP_GTUrrNotPt_nv_V4: - case Hexagon::JMP_GTUrrNotPnt_nv_V4: - case Hexagon::JMP_GTUrrPt_ie_nv_V4: - case Hexagon::JMP_GTUrrPnt_ie_nv_V4: - case Hexagon::JMP_GTUrrNotPt_ie_nv_V4: - case Hexagon::JMP_GTUrrNotPnt_ie_nv_V4: - - // JMP_GTUrrdn - case Hexagon::JMP_GTUrrdnPt_nv_V4: - case Hexagon::JMP_GTUrrdnPnt_nv_V4: - case Hexagon::JMP_GTUrrdnNotPt_nv_V4: - case Hexagon::JMP_GTUrrdnNotPnt_nv_V4: - case Hexagon::JMP_GTUrrdnPt_ie_nv_V4: - case Hexagon::JMP_GTUrrdnPnt_ie_nv_V4: - case Hexagon::JMP_GTUrrdnNotPt_ie_nv_V4: - case Hexagon::JMP_GTUrrdnNotPnt_ie_nv_V4: - return true; - - default: - return false; - } - return false; -} - -unsigned HexagonInstrInfo::getImmExtForm(const MachineInstr* MI) const { - switch(MI->getOpcode()) { - default: llvm_unreachable("Unknown type of instruction"); - - // JMP_EQri - case Hexagon::JMP_EQriPt_nv_V4: - return Hexagon::JMP_EQriPt_ie_nv_V4; - case Hexagon::JMP_EQriNotPt_nv_V4: - return Hexagon::JMP_EQriNotPt_ie_nv_V4; - case Hexagon::JMP_EQriPnt_nv_V4: - return Hexagon::JMP_EQriPnt_ie_nv_V4; - case Hexagon::JMP_EQriNotPnt_nv_V4: - return Hexagon::JMP_EQriNotPnt_ie_nv_V4; - - // JMP_EQri -- with -1 - case Hexagon::JMP_EQriPtneg_nv_V4: - return Hexagon::JMP_EQriPtneg_ie_nv_V4; - case Hexagon::JMP_EQriNotPtneg_nv_V4: - return Hexagon::JMP_EQriNotPtneg_ie_nv_V4; - case Hexagon::JMP_EQriPntneg_nv_V4: - return Hexagon::JMP_EQriPntneg_ie_nv_V4; - case Hexagon::JMP_EQriNotPntneg_nv_V4: - return Hexagon::JMP_EQriNotPntneg_ie_nv_V4; - - // JMP_EQrr - case Hexagon::JMP_EQrrPt_nv_V4: - return Hexagon::JMP_EQrrPt_ie_nv_V4; - case Hexagon::JMP_EQrrNotPt_nv_V4: - return Hexagon::JMP_EQrrNotPt_ie_nv_V4; - case Hexagon::JMP_EQrrPnt_nv_V4: - return Hexagon::JMP_EQrrPnt_ie_nv_V4; - case Hexagon::JMP_EQrrNotPnt_nv_V4: - return Hexagon::JMP_EQrrNotPnt_ie_nv_V4; - - // JMP_GTri - case Hexagon::JMP_GTriPt_nv_V4: - return Hexagon::JMP_GTriPt_ie_nv_V4; - case Hexagon::JMP_GTriNotPt_nv_V4: - return Hexagon::JMP_GTriNotPt_ie_nv_V4; - case Hexagon::JMP_GTriPnt_nv_V4: - return Hexagon::JMP_GTriPnt_ie_nv_V4; - case Hexagon::JMP_GTriNotPnt_nv_V4: - return Hexagon::JMP_GTriNotPnt_ie_nv_V4; - - // JMP_GTri -- with -1 - case Hexagon::JMP_GTriPtneg_nv_V4: - return Hexagon::JMP_GTriPtneg_ie_nv_V4; - case Hexagon::JMP_GTriNotPtneg_nv_V4: - return Hexagon::JMP_GTriNotPtneg_ie_nv_V4; - case Hexagon::JMP_GTriPntneg_nv_V4: - return Hexagon::JMP_GTriPntneg_ie_nv_V4; - case Hexagon::JMP_GTriNotPntneg_nv_V4: - return Hexagon::JMP_GTriNotPntneg_ie_nv_V4; - - // JMP_GTrr - case Hexagon::JMP_GTrrPt_nv_V4: - return Hexagon::JMP_GTrrPt_ie_nv_V4; - case Hexagon::JMP_GTrrNotPt_nv_V4: - return Hexagon::JMP_GTrrNotPt_ie_nv_V4; - case Hexagon::JMP_GTrrPnt_nv_V4: - return Hexagon::JMP_GTrrPnt_ie_nv_V4; - case Hexagon::JMP_GTrrNotPnt_nv_V4: - return Hexagon::JMP_GTrrNotPnt_ie_nv_V4; - - // JMP_GTrrdn - case Hexagon::JMP_GTrrdnPt_nv_V4: - return Hexagon::JMP_GTrrdnPt_ie_nv_V4; - case Hexagon::JMP_GTrrdnNotPt_nv_V4: - return Hexagon::JMP_GTrrdnNotPt_ie_nv_V4; - case Hexagon::JMP_GTrrdnPnt_nv_V4: - return Hexagon::JMP_GTrrdnPnt_ie_nv_V4; - case Hexagon::JMP_GTrrdnNotPnt_nv_V4: - return Hexagon::JMP_GTrrdnNotPnt_ie_nv_V4; - - // JMP_GTUri - case Hexagon::JMP_GTUriPt_nv_V4: - return Hexagon::JMP_GTUriPt_ie_nv_V4; - case Hexagon::JMP_GTUriNotPt_nv_V4: - return Hexagon::JMP_GTUriNotPt_ie_nv_V4; - case Hexagon::JMP_GTUriPnt_nv_V4: - return Hexagon::JMP_GTUriPnt_ie_nv_V4; - case Hexagon::JMP_GTUriNotPnt_nv_V4: - return Hexagon::JMP_GTUriNotPnt_ie_nv_V4; - - // JMP_GTUrr - case Hexagon::JMP_GTUrrPt_nv_V4: - return Hexagon::JMP_GTUrrPt_ie_nv_V4; - case Hexagon::JMP_GTUrrNotPt_nv_V4: - return Hexagon::JMP_GTUrrNotPt_ie_nv_V4; - case Hexagon::JMP_GTUrrPnt_nv_V4: - return Hexagon::JMP_GTUrrPnt_ie_nv_V4; - case Hexagon::JMP_GTUrrNotPnt_nv_V4: - return Hexagon::JMP_GTUrrNotPnt_ie_nv_V4; - - // JMP_GTUrrdn - case Hexagon::JMP_GTUrrdnPt_nv_V4: - return Hexagon::JMP_GTUrrdnPt_ie_nv_V4; - case Hexagon::JMP_GTUrrdnNotPt_nv_V4: - return Hexagon::JMP_GTUrrdnNotPt_ie_nv_V4; - case Hexagon::JMP_GTUrrdnPnt_nv_V4: - return Hexagon::JMP_GTUrrdnPnt_ie_nv_V4; - case Hexagon::JMP_GTUrrdnNotPnt_nv_V4: - return Hexagon::JMP_GTUrrdnNotPnt_ie_nv_V4; - case Hexagon::TFR_FI: - return Hexagon::TFR_FI_immext_V4; - - case Hexagon::MEMw_ADDSUBi_indexed_MEM_V4 : - case Hexagon::MEMw_ADDi_indexed_MEM_V4 : - case Hexagon::MEMw_SUBi_indexed_MEM_V4 : - case Hexagon::MEMw_ADDr_indexed_MEM_V4 : - case Hexagon::MEMw_SUBr_indexed_MEM_V4 : - case Hexagon::MEMw_ANDr_indexed_MEM_V4 : - case Hexagon::MEMw_ORr_indexed_MEM_V4 : - case Hexagon::MEMw_ADDSUBi_MEM_V4 : - case Hexagon::MEMw_ADDi_MEM_V4 : - case Hexagon::MEMw_SUBi_MEM_V4 : - case Hexagon::MEMw_ADDr_MEM_V4 : - case Hexagon::MEMw_SUBr_MEM_V4 : - case Hexagon::MEMw_ANDr_MEM_V4 : - case Hexagon::MEMw_ORr_MEM_V4 : - case Hexagon::MEMh_ADDSUBi_indexed_MEM_V4 : - case Hexagon::MEMh_ADDi_indexed_MEM_V4 : - case Hexagon::MEMh_SUBi_indexed_MEM_V4 : - case Hexagon::MEMh_ADDr_indexed_MEM_V4 : - case Hexagon::MEMh_SUBr_indexed_MEM_V4 : - case Hexagon::MEMh_ANDr_indexed_MEM_V4 : - case Hexagon::MEMh_ORr_indexed_MEM_V4 : - case Hexagon::MEMh_ADDSUBi_MEM_V4 : - case Hexagon::MEMh_ADDi_MEM_V4 : - case Hexagon::MEMh_SUBi_MEM_V4 : - case Hexagon::MEMh_ADDr_MEM_V4 : - case Hexagon::MEMh_SUBr_MEM_V4 : - case Hexagon::MEMh_ANDr_MEM_V4 : - case Hexagon::MEMh_ORr_MEM_V4 : - case Hexagon::MEMb_ADDSUBi_indexed_MEM_V4 : - case Hexagon::MEMb_ADDi_indexed_MEM_V4 : - case Hexagon::MEMb_SUBi_indexed_MEM_V4 : - case Hexagon::MEMb_ADDr_indexed_MEM_V4 : - case Hexagon::MEMb_SUBr_indexed_MEM_V4 : - case Hexagon::MEMb_ANDr_indexed_MEM_V4 : - case Hexagon::MEMb_ORr_indexed_MEM_V4 : - case Hexagon::MEMb_ADDSUBi_MEM_V4 : - case Hexagon::MEMb_ADDi_MEM_V4 : - case Hexagon::MEMb_SUBi_MEM_V4 : - case Hexagon::MEMb_ADDr_MEM_V4 : - case Hexagon::MEMb_SUBr_MEM_V4 : - case Hexagon::MEMb_ANDr_MEM_V4 : - case Hexagon::MEMb_ORr_MEM_V4 : - llvm_unreachable("Needs implementing"); - } -} - -unsigned HexagonInstrInfo::getNormalBranchForm(const MachineInstr* MI) const { - switch(MI->getOpcode()) { - default: llvm_unreachable("Unknown type of jump instruction"); - - // JMP_EQri - case Hexagon::JMP_EQriPt_ie_nv_V4: - return Hexagon::JMP_EQriPt_nv_V4; - case Hexagon::JMP_EQriNotPt_ie_nv_V4: - return Hexagon::JMP_EQriNotPt_nv_V4; - case Hexagon::JMP_EQriPnt_ie_nv_V4: - return Hexagon::JMP_EQriPnt_nv_V4; - case Hexagon::JMP_EQriNotPnt_ie_nv_V4: - return Hexagon::JMP_EQriNotPnt_nv_V4; - - // JMP_EQri -- with -1 - case Hexagon::JMP_EQriPtneg_ie_nv_V4: - return Hexagon::JMP_EQriPtneg_nv_V4; - case Hexagon::JMP_EQriNotPtneg_ie_nv_V4: - return Hexagon::JMP_EQriNotPtneg_nv_V4; - case Hexagon::JMP_EQriPntneg_ie_nv_V4: - return Hexagon::JMP_EQriPntneg_nv_V4; - case Hexagon::JMP_EQriNotPntneg_ie_nv_V4: - return Hexagon::JMP_EQriNotPntneg_nv_V4; - - // JMP_EQrr - case Hexagon::JMP_EQrrPt_ie_nv_V4: - return Hexagon::JMP_EQrrPt_nv_V4; - case Hexagon::JMP_EQrrNotPt_ie_nv_V4: - return Hexagon::JMP_EQrrNotPt_nv_V4; - case Hexagon::JMP_EQrrPnt_ie_nv_V4: - return Hexagon::JMP_EQrrPnt_nv_V4; - case Hexagon::JMP_EQrrNotPnt_ie_nv_V4: - return Hexagon::JMP_EQrrNotPnt_nv_V4; - - // JMP_GTri - case Hexagon::JMP_GTriPt_ie_nv_V4: - return Hexagon::JMP_GTriPt_nv_V4; - case Hexagon::JMP_GTriNotPt_ie_nv_V4: - return Hexagon::JMP_GTriNotPt_nv_V4; - case Hexagon::JMP_GTriPnt_ie_nv_V4: - return Hexagon::JMP_GTriPnt_nv_V4; - case Hexagon::JMP_GTriNotPnt_ie_nv_V4: - return Hexagon::JMP_GTriNotPnt_nv_V4; - - // JMP_GTri -- with -1 - case Hexagon::JMP_GTriPtneg_ie_nv_V4: - return Hexagon::JMP_GTriPtneg_nv_V4; - case Hexagon::JMP_GTriNotPtneg_ie_nv_V4: - return Hexagon::JMP_GTriNotPtneg_nv_V4; - case Hexagon::JMP_GTriPntneg_ie_nv_V4: - return Hexagon::JMP_GTriPntneg_nv_V4; - case Hexagon::JMP_GTriNotPntneg_ie_nv_V4: - return Hexagon::JMP_GTriNotPntneg_nv_V4; - - // JMP_GTrr - case Hexagon::JMP_GTrrPt_ie_nv_V4: - return Hexagon::JMP_GTrrPt_nv_V4; - case Hexagon::JMP_GTrrNotPt_ie_nv_V4: - return Hexagon::JMP_GTrrNotPt_nv_V4; - case Hexagon::JMP_GTrrPnt_ie_nv_V4: - return Hexagon::JMP_GTrrPnt_nv_V4; - case Hexagon::JMP_GTrrNotPnt_ie_nv_V4: - return Hexagon::JMP_GTrrNotPnt_nv_V4; - - // JMP_GTrrdn - case Hexagon::JMP_GTrrdnPt_ie_nv_V4: - return Hexagon::JMP_GTrrdnPt_nv_V4; - case Hexagon::JMP_GTrrdnNotPt_ie_nv_V4: - return Hexagon::JMP_GTrrdnNotPt_nv_V4; - case Hexagon::JMP_GTrrdnPnt_ie_nv_V4: - return Hexagon::JMP_GTrrdnPnt_nv_V4; - case Hexagon::JMP_GTrrdnNotPnt_ie_nv_V4: - return Hexagon::JMP_GTrrdnNotPnt_nv_V4; - - // JMP_GTUri - case Hexagon::JMP_GTUriPt_ie_nv_V4: - return Hexagon::JMP_GTUriPt_nv_V4; - case Hexagon::JMP_GTUriNotPt_ie_nv_V4: - return Hexagon::JMP_GTUriNotPt_nv_V4; - case Hexagon::JMP_GTUriPnt_ie_nv_V4: - return Hexagon::JMP_GTUriPnt_nv_V4; - case Hexagon::JMP_GTUriNotPnt_ie_nv_V4: - return Hexagon::JMP_GTUriNotPnt_nv_V4; - - // JMP_GTUrr - case Hexagon::JMP_GTUrrPt_ie_nv_V4: - return Hexagon::JMP_GTUrrPt_nv_V4; - case Hexagon::JMP_GTUrrNotPt_ie_nv_V4: - return Hexagon::JMP_GTUrrNotPt_nv_V4; - case Hexagon::JMP_GTUrrPnt_ie_nv_V4: - return Hexagon::JMP_GTUrrPnt_nv_V4; - case Hexagon::JMP_GTUrrNotPnt_ie_nv_V4: - return Hexagon::JMP_GTUrrNotPnt_nv_V4; - - // JMP_GTUrrdn - case Hexagon::JMP_GTUrrdnPt_ie_nv_V4: - return Hexagon::JMP_GTUrrdnPt_nv_V4; - case Hexagon::JMP_GTUrrdnNotPt_ie_nv_V4: - return Hexagon::JMP_GTUrrdnNotPt_nv_V4; - case Hexagon::JMP_GTUrrdnPnt_ie_nv_V4: - return Hexagon::JMP_GTUrrdnPnt_nv_V4; - case Hexagon::JMP_GTUrrdnNotPnt_ie_nv_V4: - return Hexagon::JMP_GTUrrdnNotPnt_nv_V4; - } -} - - -bool HexagonInstrInfo::isNewValueStore(const MachineInstr *MI) const { - switch (MI->getOpcode()) { - - // Store Byte - case Hexagon::STrib_nv_V4: - case Hexagon::STrib_indexed_nv_V4: - case Hexagon::STrib_indexed_shl_nv_V4: - case Hexagon::STrib_shl_nv_V4: - case Hexagon::STrib_GP_nv_V4: - case Hexagon::STb_GP_nv_V4: - case Hexagon::POST_STbri_nv_V4: - case Hexagon::STrib_cPt_nv_V4: - case Hexagon::STrib_cdnPt_nv_V4: - case Hexagon::STrib_cNotPt_nv_V4: - case Hexagon::STrib_cdnNotPt_nv_V4: - case Hexagon::STrib_indexed_cPt_nv_V4: - case Hexagon::STrib_indexed_cdnPt_nv_V4: - case Hexagon::STrib_indexed_cNotPt_nv_V4: - case Hexagon::STrib_indexed_cdnNotPt_nv_V4: - case Hexagon::STrib_indexed_shl_cPt_nv_V4: - case Hexagon::STrib_indexed_shl_cdnPt_nv_V4: - case Hexagon::STrib_indexed_shl_cNotPt_nv_V4: - case Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4: - case Hexagon::POST_STbri_cPt_nv_V4: - case Hexagon::POST_STbri_cdnPt_nv_V4: - case Hexagon::POST_STbri_cNotPt_nv_V4: - case Hexagon::POST_STbri_cdnNotPt_nv_V4: - case Hexagon::STb_GP_cPt_nv_V4: - case Hexagon::STb_GP_cNotPt_nv_V4: - case Hexagon::STb_GP_cdnPt_nv_V4: - case Hexagon::STb_GP_cdnNotPt_nv_V4: - case Hexagon::STrib_GP_cPt_nv_V4: - case Hexagon::STrib_GP_cNotPt_nv_V4: - case Hexagon::STrib_GP_cdnPt_nv_V4: - case Hexagon::STrib_GP_cdnNotPt_nv_V4: - case Hexagon::STrib_abs_nv_V4: - case Hexagon::STrib_abs_cPt_nv_V4: - case Hexagon::STrib_abs_cdnPt_nv_V4: - case Hexagon::STrib_abs_cNotPt_nv_V4: - case Hexagon::STrib_abs_cdnNotPt_nv_V4: - case Hexagon::STrib_imm_abs_nv_V4: - case Hexagon::STrib_imm_abs_cPt_nv_V4: - case Hexagon::STrib_imm_abs_cdnPt_nv_V4: - case Hexagon::STrib_imm_abs_cNotPt_nv_V4: - case Hexagon::STrib_imm_abs_cdnNotPt_nv_V4: - - // Store Halfword - case Hexagon::STrih_nv_V4: - case Hexagon::STrih_indexed_nv_V4: - case Hexagon::STrih_indexed_shl_nv_V4: - case Hexagon::STrih_shl_nv_V4: - case Hexagon::STrih_GP_nv_V4: - case Hexagon::STh_GP_nv_V4: - case Hexagon::POST_SThri_nv_V4: - case Hexagon::STrih_cPt_nv_V4: - case Hexagon::STrih_cdnPt_nv_V4: - case Hexagon::STrih_cNotPt_nv_V4: - case Hexagon::STrih_cdnNotPt_nv_V4: - case Hexagon::STrih_indexed_cPt_nv_V4: - case Hexagon::STrih_indexed_cdnPt_nv_V4: - case Hexagon::STrih_indexed_cNotPt_nv_V4: - case Hexagon::STrih_indexed_cdnNotPt_nv_V4: - case Hexagon::STrih_indexed_shl_cPt_nv_V4: - case Hexagon::STrih_indexed_shl_cdnPt_nv_V4: - case Hexagon::STrih_indexed_shl_cNotPt_nv_V4: - case Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4: - case Hexagon::POST_SThri_cPt_nv_V4: - case Hexagon::POST_SThri_cdnPt_nv_V4: - case Hexagon::POST_SThri_cNotPt_nv_V4: - case Hexagon::POST_SThri_cdnNotPt_nv_V4: - case Hexagon::STh_GP_cPt_nv_V4: - case Hexagon::STh_GP_cNotPt_nv_V4: - case Hexagon::STh_GP_cdnPt_nv_V4: - case Hexagon::STh_GP_cdnNotPt_nv_V4: - case Hexagon::STrih_GP_cPt_nv_V4: - case Hexagon::STrih_GP_cNotPt_nv_V4: - case Hexagon::STrih_GP_cdnPt_nv_V4: - case Hexagon::STrih_GP_cdnNotPt_nv_V4: - case Hexagon::STrih_abs_nv_V4: - case Hexagon::STrih_abs_cPt_nv_V4: - case Hexagon::STrih_abs_cdnPt_nv_V4: - case Hexagon::STrih_abs_cNotPt_nv_V4: - case Hexagon::STrih_abs_cdnNotPt_nv_V4: - case Hexagon::STrih_imm_abs_nv_V4: - case Hexagon::STrih_imm_abs_cPt_nv_V4: - case Hexagon::STrih_imm_abs_cdnPt_nv_V4: - case Hexagon::STrih_imm_abs_cNotPt_nv_V4: - case Hexagon::STrih_imm_abs_cdnNotPt_nv_V4: - - // Store Word - case Hexagon::STriw_nv_V4: - case Hexagon::STriw_indexed_nv_V4: - case Hexagon::STriw_indexed_shl_nv_V4: - case Hexagon::STriw_shl_nv_V4: - case Hexagon::STriw_GP_nv_V4: - case Hexagon::STw_GP_nv_V4: - case Hexagon::POST_STwri_nv_V4: - case Hexagon::STriw_cPt_nv_V4: - case Hexagon::STriw_cdnPt_nv_V4: - case Hexagon::STriw_cNotPt_nv_V4: - case Hexagon::STriw_cdnNotPt_nv_V4: - case Hexagon::STriw_indexed_cPt_nv_V4: - case Hexagon::STriw_indexed_cdnPt_nv_V4: - case Hexagon::STriw_indexed_cNotPt_nv_V4: - case Hexagon::STriw_indexed_cdnNotPt_nv_V4: - case Hexagon::STriw_indexed_shl_cPt_nv_V4: - case Hexagon::STriw_indexed_shl_cdnPt_nv_V4: - case Hexagon::STriw_indexed_shl_cNotPt_nv_V4: - case Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4: - case Hexagon::POST_STwri_cPt_nv_V4: - case Hexagon::POST_STwri_cdnPt_nv_V4: - case Hexagon::POST_STwri_cNotPt_nv_V4: - case Hexagon::POST_STwri_cdnNotPt_nv_V4: - case Hexagon::STw_GP_cPt_nv_V4: - case Hexagon::STw_GP_cNotPt_nv_V4: - case Hexagon::STw_GP_cdnPt_nv_V4: - case Hexagon::STw_GP_cdnNotPt_nv_V4: - case Hexagon::STriw_GP_cPt_nv_V4: - case Hexagon::STriw_GP_cNotPt_nv_V4: - case Hexagon::STriw_GP_cdnPt_nv_V4: - case Hexagon::STriw_GP_cdnNotPt_nv_V4: - case Hexagon::STriw_abs_nv_V4: - case Hexagon::STriw_abs_cPt_nv_V4: - case Hexagon::STriw_abs_cdnPt_nv_V4: - case Hexagon::STriw_abs_cNotPt_nv_V4: - case Hexagon::STriw_abs_cdnNotPt_nv_V4: - case Hexagon::STriw_imm_abs_nv_V4: - case Hexagon::STriw_imm_abs_cPt_nv_V4: - case Hexagon::STriw_imm_abs_cdnPt_nv_V4: - case Hexagon::STriw_imm_abs_cNotPt_nv_V4: - case Hexagon::STriw_imm_abs_cdnNotPt_nv_V4: - return true; - - default: - return false; - } - return false; -} - -bool HexagonInstrInfo::isPostIncrement (const MachineInstr* MI) const { - switch (MI->getOpcode()) - { - // Load Byte - case Hexagon::POST_LDrib: - case Hexagon::POST_LDrib_cPt: - case Hexagon::POST_LDrib_cNotPt: - case Hexagon::POST_LDrib_cdnPt_V4: - case Hexagon::POST_LDrib_cdnNotPt_V4: - - // Load unsigned byte - case Hexagon::POST_LDriub: - case Hexagon::POST_LDriub_cPt: - case Hexagon::POST_LDriub_cNotPt: - case Hexagon::POST_LDriub_cdnPt_V4: - case Hexagon::POST_LDriub_cdnNotPt_V4: - - // Load halfword - case Hexagon::POST_LDrih: - case Hexagon::POST_LDrih_cPt: - case Hexagon::POST_LDrih_cNotPt: - case Hexagon::POST_LDrih_cdnPt_V4: - case Hexagon::POST_LDrih_cdnNotPt_V4: - - // Load unsigned halfword - case Hexagon::POST_LDriuh: - case Hexagon::POST_LDriuh_cPt: - case Hexagon::POST_LDriuh_cNotPt: - case Hexagon::POST_LDriuh_cdnPt_V4: - case Hexagon::POST_LDriuh_cdnNotPt_V4: - - // Load word - case Hexagon::POST_LDriw: - case Hexagon::POST_LDriw_cPt: - case Hexagon::POST_LDriw_cNotPt: - case Hexagon::POST_LDriw_cdnPt_V4: - case Hexagon::POST_LDriw_cdnNotPt_V4: - - // Load double word - case Hexagon::POST_LDrid: - case Hexagon::POST_LDrid_cPt: - case Hexagon::POST_LDrid_cNotPt: - case Hexagon::POST_LDrid_cdnPt_V4: - case Hexagon::POST_LDrid_cdnNotPt_V4: - - // Store byte - case Hexagon::POST_STbri: - case Hexagon::POST_STbri_cPt: - case Hexagon::POST_STbri_cNotPt: - case Hexagon::POST_STbri_cdnPt_V4: - case Hexagon::POST_STbri_cdnNotPt_V4: - - // Store halfword - case Hexagon::POST_SThri: - case Hexagon::POST_SThri_cPt: - case Hexagon::POST_SThri_cNotPt: - case Hexagon::POST_SThri_cdnPt_V4: - case Hexagon::POST_SThri_cdnNotPt_V4: - - // Store word - case Hexagon::POST_STwri: - case Hexagon::POST_STwri_cPt: - case Hexagon::POST_STwri_cNotPt: - case Hexagon::POST_STwri_cdnPt_V4: - case Hexagon::POST_STwri_cdnNotPt_V4: - - // Store double word - case Hexagon::POST_STdri: - case Hexagon::POST_STdri_cPt: - case Hexagon::POST_STdri_cNotPt: - case Hexagon::POST_STdri_cdnPt_V4: - case Hexagon::POST_STdri_cdnNotPt_V4: - return true; - - default: - return false; - } -} - -bool HexagonInstrInfo::isSaveCalleeSavedRegsCall(const MachineInstr *MI) const { - return MI->getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4; -} bool HexagonInstrInfo::isPredicable(MachineInstr *MI) const { bool isPred = MI->getDesc().isPredicable(); @@ -2445,24 +1587,6 @@ isSpillPredRegOp(const MachineInstr *MI) const { return false; } -bool HexagonInstrInfo:: -isConditionalTransfer (const MachineInstr *MI) const { - switch (MI->getOpcode()) { - case Hexagon::TFR_cPt: - case Hexagon::TFR_cNotPt: - case Hexagon::TFRI_cPt: - case Hexagon::TFRI_cNotPt: - case Hexagon::TFR_cdnPt: - case Hexagon::TFR_cdnNotPt: - case Hexagon::TFRI_cdnPt: - case Hexagon::TFRI_cdnNotPt: - return true; - - default: - return false; - } - return false; -} bool HexagonInstrInfo::isConditionalALU32 (const MachineInstr* MI) const { const HexagonRegisterInfo& QRI = getRegisterInfo(); @@ -2502,6 +1626,7 @@ bool HexagonInstrInfo::isConditionalALU32 (const MachineInstr* MI) const { } } + bool HexagonInstrInfo:: isConditionalLoad (const MachineInstr* MI) const { const HexagonRegisterInfo& QRI = getRegisterInfo(); @@ -2575,136 +1700,6 @@ isConditionalLoad (const MachineInstr* MI) const { } } -// Returns true if an instruction is a conditional store. -// -// Note: It doesn't include conditional new-value stores as they can't be -// converted to .new predicate. -// -// p.new NV store [ if(p0.new)memw(R0+#0)=R2.new ] -// ^ ^ -// / \ (not OK. it will cause new-value store to be -// / X conditional on p0.new while R2 producer is -// / \ on p0) -// / \. -// p.new store p.old NV store -// [if(p0.new)memw(R0+#0)=R2] [if(p0)memw(R0+#0)=R2.new] -// ^ ^ -// \ / -// \ / -// \ / -// p.old store -// [if (p0)memw(R0+#0)=R2] -// -// The above diagram shows the steps involoved in the conversion of a predicated -// store instruction to its .new predicated new-value form. -// -// The following set of instructions further explains the scenario where -// conditional new-value store becomes invalid when promoted to .new predicate -// form. -// -// { 1) if (p0) r0 = add(r1, r2) -// 2) p0 = cmp.eq(r3, #0) } -// -// 3) if (p0) memb(r1+#0) = r0 --> this instruction can't be grouped with -// the first two instructions because in instr 1, r0 is conditional on old value -// of p0 but its use in instr 3 is conditional on p0 modified by instr 2 which -// is not valid for new-value stores. -bool HexagonInstrInfo:: -isConditionalStore (const MachineInstr* MI) const { - const HexagonRegisterInfo& QRI = getRegisterInfo(); - switch (MI->getOpcode()) - { - case Hexagon::STrib_imm_cPt_V4 : - case Hexagon::STrib_imm_cNotPt_V4 : - case Hexagon::STrib_indexed_shl_cPt_V4 : - case Hexagon::STrib_indexed_shl_cNotPt_V4 : - case Hexagon::STrib_cPt : - case Hexagon::STrib_cNotPt : - case Hexagon::POST_STbri_cPt : - case Hexagon::POST_STbri_cNotPt : - case Hexagon::STrid_indexed_cPt : - case Hexagon::STrid_indexed_cNotPt : - case Hexagon::STrid_indexed_shl_cPt_V4 : - case Hexagon::POST_STdri_cPt : - case Hexagon::POST_STdri_cNotPt : - case Hexagon::STrih_cPt : - case Hexagon::STrih_cNotPt : - case Hexagon::STrih_indexed_cPt : - case Hexagon::STrih_indexed_cNotPt : - case Hexagon::STrih_imm_cPt_V4 : - case Hexagon::STrih_imm_cNotPt_V4 : - case Hexagon::STrih_indexed_shl_cPt_V4 : - case Hexagon::STrih_indexed_shl_cNotPt_V4 : - case Hexagon::POST_SThri_cPt : - case Hexagon::POST_SThri_cNotPt : - case Hexagon::STriw_cPt : - case Hexagon::STriw_cNotPt : - case Hexagon::STriw_indexed_cPt : - case Hexagon::STriw_indexed_cNotPt : - case Hexagon::STriw_imm_cPt_V4 : - case Hexagon::STriw_imm_cNotPt_V4 : - case Hexagon::STriw_indexed_shl_cPt_V4 : - case Hexagon::STriw_indexed_shl_cNotPt_V4 : - case Hexagon::POST_STwri_cPt : - case Hexagon::POST_STwri_cNotPt : - return QRI.Subtarget.hasV4TOps(); - - // V4 global address store before promoting to dot new. - case Hexagon::STrid_GP_cPt_V4 : - case Hexagon::STrid_GP_cNotPt_V4 : - case Hexagon::STrib_GP_cPt_V4 : - case Hexagon::STrib_GP_cNotPt_V4 : - case Hexagon::STrih_GP_cPt_V4 : - case Hexagon::STrih_GP_cNotPt_V4 : - case Hexagon::STriw_GP_cPt_V4 : - case Hexagon::STriw_GP_cNotPt_V4 : - case Hexagon::STd_GP_cPt_V4 : - case Hexagon::STd_GP_cNotPt_V4 : - case Hexagon::STb_GP_cPt_V4 : - case Hexagon::STb_GP_cNotPt_V4 : - case Hexagon::STh_GP_cPt_V4 : - case Hexagon::STh_GP_cNotPt_V4 : - case Hexagon::STw_GP_cPt_V4 : - case Hexagon::STw_GP_cNotPt_V4 : - return QRI.Subtarget.hasV4TOps(); - - // Predicated new value stores (i.e. if (p0) memw(..)=r0.new) are excluded - // from the "Conditional Store" list. Because a predicated new value store - // would NOT be promoted to a double dot new store. See diagram below: - // This function returns yes for those stores that are predicated but not - // yet promoted to predicate dot new instructions. - // - // +---------------------+ - // /-----| if (p0) memw(..)=r0 |---------\~ - // || +---------------------+ || - // promote || /\ /\ || promote - // || /||\ /||\ || - // \||/ demote || \||/ - // \/ || || \/ - // +-------------------------+ || +-------------------------+ - // | if (p0.new) memw(..)=r0 | || | if (p0) memw(..)=r0.new | - // +-------------------------+ || +-------------------------+ - // || || || - // || demote \||/ - // promote || \/ NOT possible - // || || /\~ - // \||/ || /||\~ - // \/ || || - // +-----------------------------+ - // | if (p0.new) memw(..)=r0.new | - // +-----------------------------+ - // Double Dot New Store - // - - default: - return false; - - } - return false; -} - - - DFAPacketizer *HexagonInstrInfo:: CreateTargetScheduleState(const TargetMachine *TM, const ScheduleDAG *DAG) const { diff --git a/lib/Target/Hexagon/HexagonInstrInfo.h b/lib/Target/Hexagon/HexagonInstrInfo.h index 6a45871b67e2..730687036c81 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.h +++ b/lib/Target/Hexagon/HexagonInstrInfo.h @@ -160,20 +160,10 @@ public: bool isS8_Immediate(const int value) const; bool isS6_Immediate(const int value) const; - bool isSaveCalleeSavedRegsCall(const MachineInstr* MI) const; - bool isConditionalTransfer(const MachineInstr* MI) const; bool isConditionalALU32 (const MachineInstr* MI) const; bool isConditionalLoad (const MachineInstr* MI) const; - bool isConditionalStore(const MachineInstr* MI) const; bool isDeallocRet(const MachineInstr *MI) const; unsigned getInvertedPredicatedOpcode(const int Opc) const; - bool isExtendable(const MachineInstr* MI) const; - bool isExtended(const MachineInstr* MI) const; - bool isPostIncrement(const MachineInstr* MI) const; - bool isNewValueStore(const MachineInstr* MI) const; - bool isNewValueJump(const MachineInstr* MI) const; - unsigned getImmExtForm(const MachineInstr* MI) const; - unsigned getNormalBranchForm(const MachineInstr* MI) const; private: int getMatchingCondBranchOpcode(int Opc, bool sense) const; diff --git a/lib/Target/Hexagon/HexagonInstrInfo.td b/lib/Target/Hexagon/HexagonInstrInfo.td index fd5adef0f63f..b563ac3c613d 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.td +++ b/lib/Target/Hexagon/HexagonInstrInfo.td @@ -3046,7 +3046,3 @@ include "HexagonInstrInfoV3.td" //===----------------------------------------------------------------------===// include "HexagonInstrInfoV4.td" - -//===----------------------------------------------------------------------===// -// V4 Instructions - -//===----------------------------------------------------------------------===// diff --git a/lib/Target/Hexagon/HexagonInstrInfoV3.td b/lib/Target/Hexagon/HexagonInstrInfoV3.td index 2bd6770efd7d..a73897ee3451 100644 --- a/lib/Target/Hexagon/HexagonInstrInfoV3.td +++ b/lib/Target/Hexagon/HexagonInstrInfoV3.td @@ -41,11 +41,10 @@ let isCall = 1, neverHasSideEffects = 1, } -// Jump to address from register // if(p?.new) jumpr:t r? let isReturn = 1, isTerminator = 1, isBarrier = 1, Defs = [PC], Uses = [R31] in { - def JMPR_cdnPt_V3: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2), + def JMPR_cPnewt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2), "if ($src1.new) jumpr:t $src2", []>, Requires<[HasV3T]>; } @@ -53,7 +52,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1, // if (!p?.new) jumpr:t r? let isReturn = 1, isTerminator = 1, isBarrier = 1, Defs = [PC], Uses = [R31] in { - def JMPR_cdnNotPt_V3: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2), + def JMPR_cNotPnewt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2), "if (!$src1.new) jumpr:t $src2", []>, Requires<[HasV3T]>; } @@ -62,7 +61,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1, // if(p?.new) jumpr:nt r? let isReturn = 1, isTerminator = 1, isBarrier = 1, Defs = [PC], Uses = [R31] in { - def JMPR_cdnPnt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2), + def JMPR_cPnewNt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2), "if ($src1.new) jumpr:nt $src2", []>, Requires<[HasV3T]>; } @@ -70,7 +69,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1, // if (!p?.new) jumpr:nt r? let isReturn = 1, isTerminator = 1, isBarrier = 1, Defs = [PC], Uses = [R31] in { - def JMPR_cdnNotPnt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2), + def JMPR_cNotPnewNt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2), "if (!$src1.new) jumpr:nt $src2", []>, Requires<[HasV3T]>; } @@ -87,22 +86,20 @@ let AddedComplexity = 200 in def MAXw_dd : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2), "$dst = max($src2, $src1)", - [(set (i64 DoubleRegs:$dst), - (i64 (select (i1 (setlt (i64 DoubleRegs:$src2), - (i64 DoubleRegs:$src1))), - (i64 DoubleRegs:$src1), - (i64 DoubleRegs:$src2))))]>, + [(set DoubleRegs:$dst, (select (i1 (setlt DoubleRegs:$src2, + DoubleRegs:$src1)), + DoubleRegs:$src1, + DoubleRegs:$src2))]>, Requires<[HasV3T]>; let AddedComplexity = 200 in def MINw_dd : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2), "$dst = min($src2, $src1)", - [(set (i64 DoubleRegs:$dst), - (i64 (select (i1 (setgt (i64 DoubleRegs:$src2), - (i64 DoubleRegs:$src1))), - (i64 DoubleRegs:$src1), - (i64 DoubleRegs:$src2))))]>, + [(set DoubleRegs:$dst, (select (i1 (setgt DoubleRegs:$src2, + DoubleRegs:$src1)), + DoubleRegs:$src1, + DoubleRegs:$src2))]>, Requires<[HasV3T]>; //===----------------------------------------------------------------------===// @@ -112,25 +109,25 @@ Requires<[HasV3T]>; -//def : Pat <(brcond (i1 (seteq (i32 IntRegs:$src1), 0)), bb:$offset), -// (JMP_RegEzt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>; +//def : Pat <(brcond (i1 (seteq IntRegs:$src1, 0)), bb:$offset), +// (JMP_RegEzt IntRegs:$src1, bb:$offset)>, Requires<[HasV3T]>; -//def : Pat <(brcond (i1 (setne (i32 IntRegs:$src1), 0)), bb:$offset), -// (JMP_RegNzt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>; +//def : Pat <(brcond (i1 (setne IntRegs:$src1, 0)), bb:$offset), +// (JMP_RegNzt IntRegs:$src1, bb:$offset)>, Requires<[HasV3T]>; -//def : Pat <(brcond (i1 (setle (i32 IntRegs:$src1), 0)), bb:$offset), -// (JMP_RegLezt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>; +//def : Pat <(brcond (i1 (setle IntRegs:$src1, 0)), bb:$offset), +// (JMP_RegLezt IntRegs:$src1, bb:$offset)>, Requires<[HasV3T]>; -//def : Pat <(brcond (i1 (setge (i32 IntRegs:$src1), 0)), bb:$offset), -// (JMP_RegGezt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>; +//def : Pat <(brcond (i1 (setge IntRegs:$src1, 0)), bb:$offset), +// (JMP_RegGezt IntRegs:$src1, bb:$offset)>, Requires<[HasV3T]>; -//def : Pat <(brcond (i1 (setgt (i32 IntRegs:$src1), -1)), bb:$offset), -// (JMP_RegGezt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>; +//def : Pat <(brcond (i1 (setgt IntRegs:$src1, -1)), bb:$offset), +// (JMP_RegGezt IntRegs:$src1, bb:$offset)>, Requires<[HasV3T]>; // Map call instruction -def : Pat<(call (i32 IntRegs:$dst)), - (CALLRv3 (i32 IntRegs:$dst))>, Requires<[HasV3T]>; +def : Pat<(call IntRegs:$dst), + (CALLRv3 IntRegs:$dst)>, Requires<[HasV3T]>; def : Pat<(call tglobaladdr:$dst), (CALLv3 tglobaladdr:$dst)>, Requires<[HasV3T]>; def : Pat<(call texternalsym:$dst), diff --git a/lib/Target/Hexagon/HexagonInstrInfoV4.td b/lib/Target/Hexagon/HexagonInstrInfoV4.td index f507e4f37c18..9e60cf26d08f 100644 --- a/lib/Target/Hexagon/HexagonInstrInfoV4.td +++ b/lib/Target/Hexagon/HexagonInstrInfoV4.td @@ -11,11 +11,6 @@ // //===----------------------------------------------------------------------===// -def IMMEXT : Immext<(outs), (ins), - "##immext //should never emit this", - []>, - Requires<[HasV4T]>; - // Hexagon V4 Architecture spec defines 8 instruction classes: // LD ST ALU32 XTYPE J JR MEMOP NV CR SYSTEM(system is not implemented in the // compiler) @@ -255,151 +250,23 @@ def ZXTH_cdnNotPt_V4 : ALU32_rr<(outs IntRegs:$dst), []>, Requires<[HasV4T]>; -// Generate frame index addresses. -let neverHasSideEffects = 1, isReMaterializable = 1 in -def TFR_FI_immext_V4 : ALU32_ri<(outs IntRegs:$dst), - (ins IntRegs:$src1, s32Imm:$offset), - "$dst = add($src1, ##$offset)", - []>, - Requires<[HasV4T]>; - //===----------------------------------------------------------------------===// // ALU32 - //===----------------------------------------------------------------------===// -//===----------------------------------------------------------------------===// -// ALU32/PERM + -//===----------------------------------------------------------------------===// - -// Combine -// Rdd=combine(Rs, #s8) -let neverHasSideEffects = 1 in -def COMBINE_ri_V4 : ALU32_ri<(outs DoubleRegs:$dst), - (ins IntRegs:$src1, s8Imm:$src2), - "$dst = combine($src1, #$src2)", - []>, - Requires<[HasV4T]>; -// Rdd=combine(#s8, Rs) -let neverHasSideEffects = 1 in -def COMBINE_ir_V4 : ALU32_ir<(outs DoubleRegs:$dst), - (ins s8Imm:$src1, IntRegs:$src2), - "$dst = combine(#$src1, $src2)", - []>, - Requires<[HasV4T]>; -//===----------------------------------------------------------------------===// -// ALU32/PERM + -//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // LD + //===----------------------------------------------------------------------===// -// -// These absolute set addressing mode instructions accept immediate as -// an operand. We have duplicated these patterns to take global address. - -let neverHasSideEffects = 1 in -def LDrid_abs_setimm_V4 : LDInst<(outs DoubleRegs:$dst1, IntRegs:$dst2), - (ins u6Imm:$addr), - "$dst1 = memd($dst2=#$addr)", - []>, - Requires<[HasV4T]>; - -// Rd=memb(Re=#U6) -let neverHasSideEffects = 1 in -def LDrib_abs_setimm_V4 : LDInst<(outs IntRegs:$dst1, IntRegs:$dst2), - (ins u6Imm:$addr), - "$dst1 = memb($dst2=#$addr)", - []>, - Requires<[HasV4T]>; - -// Rd=memh(Re=#U6) -let neverHasSideEffects = 1 in -def LDrih_abs_setimm_V4 : LDInst<(outs IntRegs:$dst1, IntRegs:$dst2), - (ins u6Imm:$addr), - "$dst1 = memh($dst2=#$addr)", - []>, - Requires<[HasV4T]>; - -// Rd=memub(Re=#U6) -let neverHasSideEffects = 1 in -def LDriub_abs_setimm_V4 : LDInst<(outs IntRegs:$dst1, IntRegs:$dst2), - (ins u6Imm:$addr), - "$dst1 = memub($dst2=#$addr)", - []>, - Requires<[HasV4T]>; - -// Rd=memuh(Re=#U6) -let neverHasSideEffects = 1 in -def LDriuh_abs_setimm_V4 : LDInst<(outs IntRegs:$dst1, IntRegs:$dst2), - (ins u6Imm:$addr), - "$dst1 = memuh($dst2=#$addr)", - []>, - Requires<[HasV4T]>; - -// Rd=memw(Re=#U6) -let neverHasSideEffects = 1 in -def LDriw_abs_setimm_V4 : LDInst<(outs IntRegs:$dst1, IntRegs:$dst2), - (ins u6Imm:$addr), - "$dst1 = memw($dst2=#$addr)", - []>, - Requires<[HasV4T]>; - -// Following patterns are defined for absolute set addressing mode -// instruction which take global address as operand. -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrid_abs_set_V4 : LDInst<(outs DoubleRegs:$dst1, IntRegs:$dst2), - (ins globaladdress:$addr), - "$dst1 = memd($dst2=##$addr)", - []>, - Requires<[HasV4T]>; - -// Rd=memb(Re=#U6) -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrib_abs_set_V4 : LDInst<(outs IntRegs:$dst1, IntRegs:$dst2), - (ins globaladdress:$addr), - "$dst1 = memb($dst2=##$addr)", - []>, - Requires<[HasV4T]>; - -// Rd=memh(Re=#U6) -let mayLoad = 1, neverHasSideEffects = 1 in -def LDrih_abs_set_V4 : LDInst<(outs IntRegs:$dst1, IntRegs:$dst2), - (ins globaladdress:$addr), - "$dst1 = memh($dst2=##$addr)", - []>, - Requires<[HasV4T]>; - -// Rd=memub(Re=#U6) -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriub_abs_set_V4 : LDInst<(outs IntRegs:$dst1, IntRegs:$dst2), - (ins globaladdress:$addr), - "$dst1 = memub($dst2=##$addr)", - []>, - Requires<[HasV4T]>; - -// Rd=memuh(Re=#U6) -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriuh_abs_set_V4 : LDInst<(outs IntRegs:$dst1, IntRegs:$dst2), - (ins globaladdress:$addr), - "$dst1 = memuh($dst2=##$addr)", - []>, - Requires<[HasV4T]>; - -// Rd=memw(Re=#U6) -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriw_abs_set_V4 : LDInst<(outs IntRegs:$dst1, IntRegs:$dst2), - (ins globaladdress:$addr), - "$dst1 = memw($dst2=##$addr)", - []>, - Requires<[HasV4T]>; +/// +/// Make sure that in post increment load, the first operand is always the post +/// increment operand. +/// +//// Load doubleword. +// Rdd=memd(Re=#U6) -// Load doubleword. -// -// Make sure that in post increment load, the first operand is always the post -// increment operand. -// // Rdd=memd(Rs+Rt<<#u2) // Special case pattern for indexed load without offset which is easier to // match. AddedComplexity of this pattern should be lower than base+offset load @@ -409,19 +276,17 @@ let AddedComplexity = 10, isPredicable = 1 in def LDrid_indexed_V4 : LDInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst=memd($src1+$src2<<#0)", - [(set (i64 DoubleRegs:$dst), - (i64 (load (add (i32 IntRegs:$src1), - (i32 IntRegs:$src2)))))]>, + [(set DoubleRegs:$dst, (load (add IntRegs:$src1, + IntRegs:$src2)))]>, Requires<[HasV4T]>; let AddedComplexity = 40, isPredicable = 1 in def LDrid_indexed_shl_V4 : LDInst<(outs DoubleRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset), "$dst=memd($src1+$src2<<#$offset)", - [(set (i64 DoubleRegs:$dst), - (i64 (load (add (i32 IntRegs:$src1), - (shl (i32 IntRegs:$src2), - u2ImmPred:$offset)))))]>, + [(set DoubleRegs:$dst, (load (add IntRegs:$src1, + (shl IntRegs:$src2, + u2ImmPred:$offset))))]>, Requires<[HasV4T]>; //// Load doubleword conditionally. @@ -497,62 +362,60 @@ def LDrid_indexed_shl_cdnNotPt_V4 : LDInst<(outs DoubleRegs:$dst), // Rdd=memd(Rt<<#u2+#U6) //// Load byte. +// Rd=memb(Re=#U6) + // Rd=memb(Rs+Rt<<#u2) let AddedComplexity = 10, isPredicable = 1 in def LDrib_indexed_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst=memb($src1+$src2<<#0)", - [(set (i32 IntRegs:$dst), - (i32 (sextloadi8 (add (i32 IntRegs:$src1), - (i32 IntRegs:$src2)))))]>, + [(set IntRegs:$dst, (sextloadi8 (add IntRegs:$src1, + IntRegs:$src2)))]>, Requires<[HasV4T]>; let AddedComplexity = 10, isPredicable = 1 in def LDriub_indexed_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst=memub($src1+$src2<<#0)", - [(set (i32 IntRegs:$dst), - (i32 (zextloadi8 (add (i32 IntRegs:$src1), - (i32 IntRegs:$src2)))))]>, + [(set IntRegs:$dst, (zextloadi8 (add IntRegs:$src1, + IntRegs:$src2)))]>, Requires<[HasV4T]>; let AddedComplexity = 10, isPredicable = 1 in def LDriub_ae_indexed_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst=memub($src1+$src2<<#0)", - [(set (i32 IntRegs:$dst), - (i32 (extloadi8 (add (i32 IntRegs:$src1), - (i32 IntRegs:$src2)))))]>, + [(set IntRegs:$dst, (extloadi8 (add IntRegs:$src1, + IntRegs:$src2)))]>, Requires<[HasV4T]>; let AddedComplexity = 40, isPredicable = 1 in def LDrib_indexed_shl_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset), "$dst=memb($src1+$src2<<#$offset)", - [(set (i32 IntRegs:$dst), - (i32 (sextloadi8 (add (i32 IntRegs:$src1), - (shl (i32 IntRegs:$src2), - u2ImmPred:$offset)))))]>, + [(set IntRegs:$dst, + (sextloadi8 (add IntRegs:$src1, + (shl IntRegs:$src2, + u2ImmPred:$offset))))]>, Requires<[HasV4T]>; let AddedComplexity = 40, isPredicable = 1 in def LDriub_indexed_shl_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset), "$dst=memub($src1+$src2<<#$offset)", - [(set (i32 IntRegs:$dst), - (i32 (zextloadi8 (add (i32 IntRegs:$src1), - (shl (i32 IntRegs:$src2), - u2ImmPred:$offset)))))]>, + [(set IntRegs:$dst, + (zextloadi8 (add IntRegs:$src1, + (shl IntRegs:$src2, + u2ImmPred:$offset))))]>, Requires<[HasV4T]>; let AddedComplexity = 40, isPredicable = 1 in def LDriub_ae_indexed_shl_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset), "$dst=memub($src1+$src2<<#$offset)", - [(set (i32 IntRegs:$dst), - (i32 (extloadi8 (add (i32 IntRegs:$src1), - (shl (i32 IntRegs:$src2), - u2ImmPred:$offset)))))]>, + [(set IntRegs:$dst, (extloadi8 (add IntRegs:$src1, + (shl IntRegs:$src2, + u2ImmPred:$offset))))]>, Requires<[HasV4T]>; //// Load byte conditionally. @@ -698,32 +561,31 @@ def LDriub_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), // Rd=memb(Rt<<#u2+#U6) //// Load halfword +// Rd=memh(Re=#U6) + // Rd=memh(Rs+Rt<<#u2) let AddedComplexity = 10, isPredicable = 1 in def LDrih_indexed_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst=memh($src1+$src2<<#0)", - [(set (i32 IntRegs:$dst), - (i32 (sextloadi16 (add (i32 IntRegs:$src1), - (i32 IntRegs:$src2)))))]>, + [(set IntRegs:$dst, (sextloadi16 (add IntRegs:$src1, + IntRegs:$src2)))]>, Requires<[HasV4T]>; let AddedComplexity = 10, isPredicable = 1 in def LDriuh_indexed_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst=memuh($src1+$src2<<#0)", - [(set (i32 IntRegs:$dst), - (i32 (zextloadi16 (add (i32 IntRegs:$src1), - (i32 IntRegs:$src2)))))]>, + [(set IntRegs:$dst, (zextloadi16 (add IntRegs:$src1, + IntRegs:$src2)))]>, Requires<[HasV4T]>; let AddedComplexity = 10, isPredicable = 1 in def LDriuh_ae_indexed_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst=memuh($src1+$src2<<#0)", - [(set (i32 IntRegs:$dst), - (i32 (extloadi16 (add (i32 IntRegs:$src1), - (i32 IntRegs:$src2)))))]>, + [(set IntRegs:$dst, (extloadi16 (add IntRegs:$src1, + IntRegs:$src2)))]>, Requires<[HasV4T]>; // Rd=memh(Rs+Rt<<#u2) @@ -731,30 +593,30 @@ let AddedComplexity = 40, isPredicable = 1 in def LDrih_indexed_shl_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset), "$dst=memh($src1+$src2<<#$offset)", - [(set (i32 IntRegs:$dst), - (i32 (sextloadi16 (add (i32 IntRegs:$src1), - (shl (i32 IntRegs:$src2), - u2ImmPred:$offset)))))]>, + [(set IntRegs:$dst, + (sextloadi16 (add IntRegs:$src1, + (shl IntRegs:$src2, + u2ImmPred:$offset))))]>, Requires<[HasV4T]>; let AddedComplexity = 40, isPredicable = 1 in def LDriuh_indexed_shl_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset), "$dst=memuh($src1+$src2<<#$offset)", - [(set (i32 IntRegs:$dst), - (i32 (zextloadi16 (add (i32 IntRegs:$src1), - (shl (i32 IntRegs:$src2), - u2ImmPred:$offset)))))]>, + [(set IntRegs:$dst, + (zextloadi16 (add IntRegs:$src1, + (shl IntRegs:$src2, + u2ImmPred:$offset))))]>, Requires<[HasV4T]>; let AddedComplexity = 40, isPredicable = 1 in def LDriuh_ae_indexed_shl_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset), "$dst=memuh($src1+$src2<<#$offset)", - [(set (i32 IntRegs:$dst), - (i32 (extloadi16 (add (i32 IntRegs:$src1), - (shl (i32 IntRegs:$src2), - u2ImmPred:$offset)))))]>, + [(set IntRegs:$dst, + (extloadi16 (add IntRegs:$src1, + (shl IntRegs:$src2, + u2ImmPred:$offset))))]>, Requires<[HasV4T]>; //// Load halfword conditionally. @@ -900,14 +762,6 @@ def LDriuh_indexed_shl_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), // Rd=memh(Rt<<#u2+#U6) //// Load word. -// Load predicate: Fix for bug 5279. -let mayLoad = 1, neverHasSideEffects = 1 in -def LDriw_pred_V4 : LDInst<(outs PredRegs:$dst), - (ins MEMri:$addr), - "Error; should not emit", - []>, - Requires<[HasV4T]>; - // Rd=memw(Re=#U6) // Rd=memw(Rs+Rt<<#u2) @@ -915,9 +769,8 @@ let AddedComplexity = 10, isPredicable = 1 in def LDriw_indexed_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst=memw($src1+$src2<<#0)", - [(set (i32 IntRegs:$dst), - (i32 (load (add (i32 IntRegs:$src1), - (i32 IntRegs:$src2)))))]>, + [(set IntRegs:$dst, (load (add IntRegs:$src1, + IntRegs:$src2)))]>, Requires<[HasV4T]>; // Rd=memw(Rs+Rt<<#u2) @@ -925,10 +778,9 @@ let AddedComplexity = 40, isPredicable = 1 in def LDriw_indexed_shl_V4 : LDInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$offset), "$dst=memw($src1+$src2<<#$offset)", - [(set (i32 IntRegs:$dst), - (i32 (load (add (i32 IntRegs:$src1), - (shl (i32 IntRegs:$src2), - u2ImmPred:$offset)))))]>, + [(set IntRegs:$dst, (load (add IntRegs:$src1, + (shl IntRegs:$src2, + u2ImmPred:$offset))))]>, Requires<[HasV4T]>; //// Load word conditionally. @@ -1103,633 +955,6 @@ def POST_LDriw_cdnNotPt_V4 : LDInstPI<(outs IntRegs:$dst1, IntRegs:$dst2), "$src2 = $dst2">, Requires<[HasV4T]>; -/// Load from global offset - -let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in -def LDrid_GP_V4 : LDInst<(outs DoubleRegs:$dst), - (ins globaladdress:$global, u16Imm:$offset), - "$dst=memd(#$global+$offset)", - []>, - Requires<[HasV4T]>; - -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDrid_GP_cPt_V4 : LDInst<(outs DoubleRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), - "if ($src1) $dst=memd(##$global+$offset)", - []>, - Requires<[HasV4T]>; - -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDrid_GP_cNotPt_V4 : LDInst<(outs DoubleRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), - "if (!$src1) $dst=memd(##$global+$offset)", - []>, - Requires<[HasV4T]>; - -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDrid_GP_cdnPt_V4 : LDInst<(outs DoubleRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), - "if ($src1.new) $dst=memd(##$global+$offset)", - []>, - Requires<[HasV4T]>; - -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDrid_GP_cdnNotPt_V4 : LDInst<(outs DoubleRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), - "if (!$src1.new) $dst=memd(##$global+$offset)", - []>, - Requires<[HasV4T]>; - -let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in -def LDrib_GP_V4 : LDInst<(outs IntRegs:$dst), - (ins globaladdress:$global, u16Imm:$offset), - "$dst=memb(#$global+$offset)", - []>, - Requires<[HasV4T]>; - -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDrib_GP_cPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), - "if ($src1) $dst=memb(##$global+$offset)", - []>, - Requires<[HasV4T]>; - -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDrib_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), - "if (!$src1) $dst=memb(##$global+$offset)", - []>, - Requires<[HasV4T]>; - -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDrib_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), - "if ($src1.new) $dst=memb(##$global+$offset)", - []>, - Requires<[HasV4T]>; - -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDrib_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), - "if (!$src1.new) $dst=memb(##$global+$offset)", - []>, - Requires<[HasV4T]>; - - -let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in -def LDriub_GP_V4 : LDInst<(outs IntRegs:$dst), - (ins globaladdress:$global, u16Imm:$offset), - "$dst=memub(#$global+$offset)", - []>, - Requires<[HasV4T]>; - - -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDriub_GP_cPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), - "if ($src1) $dst=memub(##$global+$offset)", - []>, - Requires<[HasV4T]>; - -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDriub_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), - "if (!$src1) $dst=memub(##$global+$offset)", - []>, - Requires<[HasV4T]>; - -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDriub_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), - "if ($src1.new) $dst=memub(##$global+$offset)", - []>, - Requires<[HasV4T]>; - -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDriub_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), - "if (!$src1.new) $dst=memub(##$global+$offset)", - []>, - Requires<[HasV4T]>; - - -let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in -def LDrih_GP_V4 : LDInst<(outs IntRegs:$dst), - (ins globaladdress:$global, u16Imm:$offset), - "$dst=memh(#$global+$offset)", - []>, - Requires<[HasV4T]>; - - -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDrih_GP_cPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), - "if ($src1) $dst=memh(##$global+$offset)", - []>, - Requires<[HasV4T]>; - -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDrih_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), - "if (!$src1) $dst=memh(##$global+$offset)", - []>, - Requires<[HasV4T]>; - -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDrih_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), - "if ($src1.new) $dst=memh(##$global+$offset)", - []>, - Requires<[HasV4T]>; - -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDrih_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), - "if (!$src1.new) $dst=memh(##$global+$offset)", - []>, - Requires<[HasV4T]>; - - -let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in -def LDriuh_GP_V4 : LDInst<(outs IntRegs:$dst), - (ins globaladdress:$global, u16Imm:$offset), - "$dst=memuh(#$global+$offset)", - []>, - Requires<[HasV4T]>; - -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDriuh_GP_cPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), - "if ($src1) $dst=memuh(##$global+$offset)", - []>, - Requires<[HasV4T]>; - -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDriuh_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), - "if (!$src1) $dst=memuh(##$global+$offset)", - []>, - Requires<[HasV4T]>; - -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDriuh_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), - "if ($src1.new) $dst=memuh(##$global+$offset)", - []>, - Requires<[HasV4T]>; - -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDriuh_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), - "if (!$src1.new) $dst=memuh(##$global+$offset)", - []>, - Requires<[HasV4T]>; - -let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in -def LDriw_GP_V4 : LDInst<(outs IntRegs:$dst), - (ins globaladdress:$global, u16Imm:$offset), - "$dst=memw(#$global+$offset)", - []>, - Requires<[HasV4T]>; - - -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDriw_GP_cPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), - "if ($src1) $dst=memw(##$global+$offset)", - []>, - Requires<[HasV4T]>; - -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDriw_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), - "if (!$src1) $dst=memw(##$global+$offset)", - []>, - Requires<[HasV4T]>; - - -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDriw_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), - "if ($src1.new) $dst=memw(##$global+$offset)", - []>, - Requires<[HasV4T]>; - -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDriw_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset), - "if (!$src1.new) $dst=memw(##$global+$offset)", - []>, - Requires<[HasV4T]>; - - -let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in -def LDd_GP_V4 : LDInst<(outs DoubleRegs:$dst), - (ins globaladdress:$global), - "$dst=memd(#$global)", - []>, - Requires<[HasV4T]>; - -// if (Pv) Rtt=memd(##global) -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDd_GP_cPt_V4 : LDInst<(outs DoubleRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if ($src1) $dst=memd(##$global)", - []>, - Requires<[HasV4T]>; - - -// if (!Pv) Rtt=memd(##global) -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDd_GP_cNotPt_V4 : LDInst<(outs DoubleRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if (!$src1) $dst=memd(##$global)", - []>, - Requires<[HasV4T]>; - -// if (Pv) Rtt=memd(##global) -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDd_GP_cdnPt_V4 : LDInst<(outs DoubleRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if ($src1.new) $dst=memd(##$global)", - []>, - Requires<[HasV4T]>; - - -// if (!Pv) Rtt=memd(##global) -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDd_GP_cdnNotPt_V4 : LDInst<(outs DoubleRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if (!$src1.new) $dst=memd(##$global)", - []>, - Requires<[HasV4T]>; - -let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in -def LDb_GP_V4 : LDInst<(outs IntRegs:$dst), - (ins globaladdress:$global), - "$dst=memb(#$global)", - []>, - Requires<[HasV4T]>; - -// if (Pv) Rt=memb(##global) -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDb_GP_cPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if ($src1) $dst=memb(##$global)", - []>, - Requires<[HasV4T]>; - -// if (!Pv) Rt=memb(##global) -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDb_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if (!$src1) $dst=memb(##$global)", - []>, - Requires<[HasV4T]>; - -// if (Pv) Rt=memb(##global) -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDb_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if ($src1.new) $dst=memb(##$global)", - []>, - Requires<[HasV4T]>; - -// if (!Pv) Rt=memb(##global) -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDb_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if (!$src1.new) $dst=memb(##$global)", - []>, - Requires<[HasV4T]>; - -let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in -def LDub_GP_V4 : LDInst<(outs IntRegs:$dst), - (ins globaladdress:$global), - "$dst=memub(#$global)", - []>, - Requires<[HasV4T]>; - -// if (Pv) Rt=memub(##global) -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDub_GP_cPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if ($src1) $dst=memub(##$global)", - []>, - Requires<[HasV4T]>; - - -// if (!Pv) Rt=memub(##global) -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDub_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if (!$src1) $dst=memub(##$global)", - []>, - Requires<[HasV4T]>; - -// if (Pv) Rt=memub(##global) -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDub_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if ($src1.new) $dst=memub(##$global)", - []>, - Requires<[HasV4T]>; - - -// if (!Pv) Rt=memub(##global) -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDub_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if (!$src1.new) $dst=memub(##$global)", - []>, - Requires<[HasV4T]>; - -let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in -def LDh_GP_V4 : LDInst<(outs IntRegs:$dst), - (ins globaladdress:$global), - "$dst=memh(#$global)", - []>, - Requires<[HasV4T]>; - -// if (Pv) Rt=memh(##global) -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDh_GP_cPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if ($src1) $dst=memh(##$global)", - []>, - Requires<[HasV4T]>; - -// if (!Pv) Rt=memh(##global) -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDh_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if (!$src1) $dst=memh(##$global)", - []>, - Requires<[HasV4T]>; - -// if (Pv) Rt=memh(##global) -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDh_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if ($src1.new) $dst=memh(##$global)", - []>, - Requires<[HasV4T]>; - -// if (!Pv) Rt=memh(##global) -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDh_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if (!$src1.new) $dst=memh(##$global)", - []>, - Requires<[HasV4T]>; - -let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in -def LDuh_GP_V4 : LDInst<(outs IntRegs:$dst), - (ins globaladdress:$global), - "$dst=memuh(#$global)", - []>, - Requires<[HasV4T]>; - -// if (Pv) Rt=memuh(##global) -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDuh_GP_cPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if ($src1) $dst=memuh(##$global)", - []>, - Requires<[HasV4T]>; - -// if (!Pv) Rt=memuh(##global) -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDuh_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if (!$src1) $dst=memuh(##$global)", - []>, - Requires<[HasV4T]>; - -// if (Pv) Rt=memuh(##global) -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDuh_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if ($src1.new) $dst=memuh(##$global)", - []>, - Requires<[HasV4T]>; - -// if (!Pv) Rt=memuh(##global) -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDuh_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if (!$src1.new) $dst=memuh(##$global)", - []>, - Requires<[HasV4T]>; - -let isPredicable = 1, mayLoad = 1, neverHasSideEffects = 1 in -def LDw_GP_V4 : LDInst<(outs IntRegs:$dst), - (ins globaladdress:$global), - "$dst=memw(#$global)", - []>, - Requires<[HasV4T]>; - -// if (Pv) Rt=memw(##global) -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDw_GP_cPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if ($src1) $dst=memw(##$global)", - []>, - Requires<[HasV4T]>; - - -// if (!Pv) Rt=memw(##global) -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDw_GP_cNotPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if (!$src1) $dst=memw(##$global)", - []>, - Requires<[HasV4T]>; - -// if (Pv) Rt=memw(##global) -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDw_GP_cdnPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if ($src1.new) $dst=memw(##$global)", - []>, - Requires<[HasV4T]>; - - -// if (!Pv) Rt=memw(##global) -let mayLoad = 1, neverHasSideEffects = 1, isPredicated = 1 in -def LDw_GP_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$global), - "if (!$src1.new) $dst=memw(##$global)", - []>, - Requires<[HasV4T]>; - - - -def : Pat <(atomic_load_64 (HexagonCONST32_GP tglobaladdr:$global)), - (i64 (LDd_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -def : Pat <(atomic_load_32 (HexagonCONST32_GP tglobaladdr:$global)), - (i32 (LDw_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -def : Pat <(atomic_load_16 (HexagonCONST32_GP tglobaladdr:$global)), - (i32 (LDuh_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -def : Pat <(atomic_load_8 (HexagonCONST32_GP tglobaladdr:$global)), - (i32 (LDub_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -// Map from load(globaladdress) -> memw(#foo + 0) -let AddedComplexity = 100 in -def : Pat <(i64 (load (HexagonCONST32_GP tglobaladdr:$global))), - (i64 (LDd_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -// Map from Pd = load(globaladdress) -> Rd = memb(globaladdress), Pd = Rd -let AddedComplexity = 100 in -def : Pat <(i1 (load (HexagonCONST32_GP tglobaladdr:$global))), - (i1 (TFR_PdRs (i32 (LDb_GP_V4 tglobaladdr:$global))))>, - Requires<[HasV4T]>; - -// When the Interprocedural Global Variable optimizer realizes that a certain -// global variable takes only two constant values, it shrinks the global to -// a boolean. Catch those loads here in the following 3 patterns. -let AddedComplexity = 100 in -def : Pat <(i32 (extloadi1 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDb_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -let AddedComplexity = 100 in -def : Pat <(i32 (sextloadi1 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDb_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -// Map from load(globaladdress) -> memb(#foo) -let AddedComplexity = 100 in -def : Pat <(i32 (extloadi8 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDb_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -// Map from load(globaladdress) -> memb(#foo) -let AddedComplexity = 100 in -def : Pat <(i32 (sextloadi8 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDb_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -let AddedComplexity = 100 in -def : Pat <(i32 (zextloadi1 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDub_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -// Map from load(globaladdress) -> memub(#foo) -let AddedComplexity = 100 in -def : Pat <(i32 (zextloadi8 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDub_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -// Map from load(globaladdress) -> memh(#foo) -let AddedComplexity = 100 in -def : Pat <(i32 (extloadi16 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDh_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -// Map from load(globaladdress) -> memh(#foo) -let AddedComplexity = 100 in -def : Pat <(i32 (sextloadi16 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDh_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -// Map from load(globaladdress) -> memuh(#foo) -let AddedComplexity = 100 in -def : Pat <(i32 (zextloadi16 (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDuh_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -// Map from load(globaladdress) -> memw(#foo) -let AddedComplexity = 100 in -def : Pat <(i32 (load (HexagonCONST32_GP tglobaladdr:$global))), - (i32 (LDw_GP_V4 tglobaladdr:$global))>, - Requires<[HasV4T]>; - -def : Pat <(atomic_load_64 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (i64 (LDrid_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[HasV4T]>; - -def : Pat <(atomic_load_32 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (i32 (LDriw_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[HasV4T]>; - -def : Pat <(atomic_load_16 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (i32 (LDriuh_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[HasV4T]>; - -def : Pat <(atomic_load_8 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (i32 (LDriub_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[HasV4T]>; - -// Map from load(globaladdress + x) -> memd(#foo + x) -let AddedComplexity = 100 in -def : Pat <(i64 (load (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset))), - (i64 (LDrid_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[HasV4T]>; - -// Map from load(globaladdress + x) -> memb(#foo + x) -let AddedComplexity = 100 in -def : Pat <(i32 (extloadi8 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset))), - (i32 (LDrib_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[HasV4T]>; - -// Map from load(globaladdress + x) -> memb(#foo + x) -let AddedComplexity = 100 in -def : Pat <(i32 (sextloadi8 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset))), - (i32 (LDrib_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[HasV4T]>; - -// Map from load(globaladdress + x) -> memub(#foo + x) -let AddedComplexity = 100 in -def : Pat <(i32 (zextloadi8 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset))), - (i32 (LDriub_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[HasV4T]>; - -// Map from load(globaladdress + x) -> memuh(#foo + x) -let AddedComplexity = 100 in -def : Pat <(i32 (extloadi16 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset))), - (i32 (LDrih_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[HasV4T]>; - -// Map from load(globaladdress + x) -> memh(#foo + x) -let AddedComplexity = 100 in -def : Pat <(i32 (sextloadi16 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset))), - (i32 (LDrih_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[HasV4T]>; - - -// Map from load(globaladdress + x) -> memuh(#foo + x) -let AddedComplexity = 100 in -def : Pat <(i32 (zextloadi16 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset))), - (i32 (LDriuh_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[HasV4T]>; - -// Map from load(globaladdress + x) -> memw(#foo + x) -let AddedComplexity = 100 in -def : Pat <(i32 (load (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset))), - (i32 (LDriw_GP_V4 tglobaladdr:$global, u16ImmPred:$offset))>, - Requires<[HasV4T]>; - //===----------------------------------------------------------------------===// // LD - @@ -1746,70 +971,18 @@ def : Pat <(i32 (load (add (HexagonCONST32_GP tglobaladdr:$global), /// last operand. /// +// Store doubleword. // memd(Re=#U6)=Rtt -def STrid_abs_setimm_V4 : STInst<(outs IntRegs:$dst1), - (ins DoubleRegs:$src1, u6Imm:$src2), - "memd($dst1=#$src2) = $src1", - []>, - Requires<[HasV4T]>; - -// memb(Re=#U6)=Rs -def STrib_abs_setimm_V4 : STInst<(outs IntRegs:$dst1), - (ins IntRegs:$src1, u6Imm:$src2), - "memb($dst1=#$src2) = $src1", - []>, - Requires<[HasV4T]>; - -// memh(Re=#U6)=Rs -def STrih_abs_setimm_V4 : STInst<(outs IntRegs:$dst1), - (ins IntRegs:$src1, u6Imm:$src2), - "memh($dst1=#$src2) = $src1", - []>, - Requires<[HasV4T]>; - -// memw(Re=#U6)=Rs -def STriw_abs_setimm_V4 : STInst<(outs IntRegs:$dst1), - (ins IntRegs:$src1, u6Imm:$src2), - "memw($dst1=#$src2) = $src1", - []>, - Requires<[HasV4T]>; - -// memd(Re=#U6)=Rtt -def STrid_abs_set_V4 : STInst<(outs IntRegs:$dst1), - (ins DoubleRegs:$src1, globaladdress:$src2), - "memd($dst1=##$src2) = $src1", - []>, - Requires<[HasV4T]>; - -// memb(Re=#U6)=Rs -def STrib_abs_set_V4 : STInst<(outs IntRegs:$dst1), - (ins IntRegs:$src1, globaladdress:$src2), - "memb($dst1=##$src2) = $src1", - []>, - Requires<[HasV4T]>; - -// memh(Re=#U6)=Rs -def STrih_abs_set_V4 : STInst<(outs IntRegs:$dst1), - (ins IntRegs:$src1, globaladdress:$src2), - "memh($dst1=##$src2) = $src1", - []>, - Requires<[HasV4T]>; - -// memw(Re=#U6)=Rs -def STriw_abs_set_V4 : STInst<(outs IntRegs:$dst1), - (ins IntRegs:$src1, globaladdress:$src2), - "memw($dst1=##$src2) = $src1", - []>, - Requires<[HasV4T]>; +// TODO: needs to be implemented +// memd(Rs+#s11:3)=Rtt // memd(Rs+Ru<<#u2)=Rtt let AddedComplexity = 10, isPredicable = 1 in def STrid_indexed_shl_V4 : STInst<(outs), (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3, DoubleRegs:$src4), "memd($src1+$src2<<#$src3) = $src4", - [(store (i64 DoubleRegs:$src4), - (add (i32 IntRegs:$src1), - (shl (i32 IntRegs:$src2), u2ImmPred:$src3)))]>, + [(store DoubleRegs:$src4, (add IntRegs:$src1, + (shl IntRegs:$src2, u2ImmPred:$src3)))]>, Requires<[HasV4T]>; // memd(Ru<<#u2+#U6)=Rtt @@ -1817,9 +990,9 @@ let AddedComplexity = 10 in def STrid_shl_V4 : STInst<(outs), (ins IntRegs:$src1, u2Imm:$src2, u6Imm:$src3, DoubleRegs:$src4), "memd($src1<<#$src2+#$src3) = $src4", - [(store (i64 DoubleRegs:$src4), - (add (shl (i32 IntRegs:$src1), u2ImmPred:$src2), - u6ImmPred:$src3))]>, + [(store DoubleRegs:$src4, (shl IntRegs:$src1, + (add u2ImmPred:$src2, + u6ImmPred:$src3)))]>, Requires<[HasV4T]>; // memd(Rx++#s4:3)=Rtt @@ -1836,8 +1009,7 @@ def STrid_shl_V4 : STInst<(outs), // if ([!]Pv[.new]) memd(Rs+#u6:3)=Rtt // if (Pv) memd(Rs+#u6:3)=Rtt // if (Pv.new) memd(Rs+#u6:3)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in def STrid_cdnPt_V4 : STInst<(outs), (ins PredRegs:$src1, MEMri:$addr, DoubleRegs:$src2), "if ($src1.new) memd($addr) = $src2", @@ -1846,8 +1018,7 @@ def STrid_cdnPt_V4 : STInst<(outs), // if (!Pv) memd(Rs+#u6:3)=Rtt // if (!Pv.new) memd(Rs+#u6:3)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in def STrid_cdnNotPt_V4 : STInst<(outs), (ins PredRegs:$src1, MEMri:$addr, DoubleRegs:$src2), "if (!$src1.new) memd($addr) = $src2", @@ -1856,8 +1027,7 @@ def STrid_cdnNotPt_V4 : STInst<(outs), // if (Pv) memd(Rs+#u6:3)=Rtt // if (Pv.new) memd(Rs+#u6:3)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in def STrid_indexed_cdnPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3, DoubleRegs:$src4), @@ -1867,8 +1037,7 @@ def STrid_indexed_cdnPt_V4 : STInst<(outs), // if (!Pv) memd(Rs+#u6:3)=Rtt // if (!Pv.new) memd(Rs+#u6:3)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in def STrid_indexed_cdnNotPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_3Imm:$src3, DoubleRegs:$src4), @@ -1878,8 +1047,7 @@ def STrid_indexed_cdnNotPt_V4 : STInst<(outs), // if ([!]Pv[.new]) memd(Rs+Ru<<#u2)=Rtt // if (Pv) memd(Rs+Ru<<#u2)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in def STrid_indexed_shl_cPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, DoubleRegs:$src5), @@ -1888,17 +1056,15 @@ def STrid_indexed_shl_cPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (Pv.new) memd(Rs+Ru<<#u2)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in def STrid_indexed_shl_cdnPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, DoubleRegs:$src5), - "if ($src1.new) memd($src2+$src3<<#$src4) = $src5", + "if ($src1) memd($src2+$src3<<#$src4) = $src5", []>, Requires<[HasV4T]>; // if (!Pv) memd(Rs+Ru<<#u2)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in def STrid_indexed_shl_cNotPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, DoubleRegs:$src5), @@ -1906,8 +1072,7 @@ def STrid_indexed_shl_cNotPt_V4 : STInst<(outs), []>, Requires<[HasV4T]>; // if (!Pv.new) memd(Rs+Ru<<#u2)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in def STrid_indexed_shl_cdnNotPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, DoubleRegs:$src5), @@ -1918,8 +1083,7 @@ def STrid_indexed_shl_cdnNotPt_V4 : STInst<(outs), // if ([!]Pv[.new]) memd(Rx++#s4:3)=Rtt // if (Pv) memd(Rx++#s4:3)=Rtt // if (Pv.new) memd(Rx++#s4:3)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in def POST_STdri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst), (ins PredRegs:$src1, DoubleRegs:$src2, IntRegs:$src3, s4_3Imm:$offset), @@ -1930,8 +1094,7 @@ def POST_STdri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst), // if (!Pv) memd(Rx++#s4:3)=Rtt // if (!Pv.new) memd(Rx++#s4:3)=Rtt -let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let AddedComplexity = 10, mayStore = 1, neverHasSideEffects = 1 in def POST_STdri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst), (ins PredRegs:$src1, DoubleRegs:$src2, IntRegs:$src3, s4_3Imm:$offset), @@ -1942,12 +1105,15 @@ def POST_STdri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst), // Store byte. +// memb(Re=#U6)=Rt +// TODO: needs to be implemented. +// memb(Rs+#s11:0)=Rt // memb(Rs+#u6:0)=#S8 let AddedComplexity = 10, isPredicable = 1 in def STrib_imm_V4 : STInst<(outs), (ins IntRegs:$src1, u6_0Imm:$src2, s8Imm:$src3), "memb($src1+#$src2) = #$src3", - [(truncstorei8 s8ImmPred:$src3, (add (i32 IntRegs:$src1), + [(truncstorei8 s8ImmPred:$src3, (add IntRegs:$src1, u6_0ImmPred:$src2))]>, Requires<[HasV4T]>; @@ -1956,10 +1122,9 @@ let AddedComplexity = 10, isPredicable = 1 in def STrib_indexed_shl_V4 : STInst<(outs), (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3, IntRegs:$src4), "memb($src1+$src2<<#$src3) = $src4", - [(truncstorei8 (i32 IntRegs:$src4), - (add (i32 IntRegs:$src1), - (shl (i32 IntRegs:$src2), - u2ImmPred:$src3)))]>, + [(truncstorei8 IntRegs:$src4, (add IntRegs:$src1, + (shl IntRegs:$src2, + u2ImmPred:$src3)))]>, Requires<[HasV4T]>; // memb(Ru<<#u2+#U6)=Rt @@ -1967,9 +1132,9 @@ let AddedComplexity = 10 in def STrib_shl_V4 : STInst<(outs), (ins IntRegs:$src1, u2Imm:$src2, u6Imm:$src3, IntRegs:$src4), "memb($src1<<#$src2+#$src3) = $src4", - [(truncstorei8 (i32 IntRegs:$src4), - (add (shl (i32 IntRegs:$src1), u2ImmPred:$src2), - u6ImmPred:$src3))]>, + [(truncstorei8 IntRegs:$src4, (shl IntRegs:$src1, + (add u2ImmPred:$src2, + u6ImmPred:$src3)))]>, Requires<[HasV4T]>; // memb(Rx++#s4:0:circ(Mu))=Rt @@ -1983,8 +1148,7 @@ def STrib_shl_V4 : STInst<(outs), // if ([!]Pv[.new]) memb(#u6)=Rt // if ([!]Pv[.new]) memb(Rs+#u6:0)=#S6 // if (Pv) memb(Rs+#u6:0)=#S6 -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrib_imm_cPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, s6Imm:$src4), "if ($src1) memb($src2+#$src3) = #$src4", @@ -1992,8 +1156,7 @@ def STrib_imm_cPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (Pv.new) memb(Rs+#u6:0)=#S6 -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrib_imm_cdnPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, s6Imm:$src4), "if ($src1.new) memb($src2+#$src3) = #$src4", @@ -2001,8 +1164,7 @@ def STrib_imm_cdnPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (!Pv) memb(Rs+#u6:0)=#S6 -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrib_imm_cNotPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, s6Imm:$src4), "if (!$src1) memb($src2+#$src3) = #$src4", @@ -2010,8 +1172,7 @@ def STrib_imm_cNotPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (!Pv.new) memb(Rs+#u6:0)=#S6 -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrib_imm_cdnNotPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, s6Imm:$src4), "if (!$src1.new) memb($src2+#$src3) = #$src4", @@ -2021,8 +1182,7 @@ def STrib_imm_cdnNotPt_V4 : STInst<(outs), // if ([!]Pv[.new]) memb(Rs+#u6:0)=Rt // if (Pv) memb(Rs+#u6:0)=Rt // if (Pv.new) memb(Rs+#u6:0)=Rt -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrib_cdnPt_V4 : STInst<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if ($src1.new) memb($addr) = $src2", @@ -2031,8 +1191,7 @@ def STrib_cdnPt_V4 : STInst<(outs), // if (!Pv) memb(Rs+#u6:0)=Rt // if (!Pv.new) memb(Rs+#u6:0)=Rt -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrib_cdnNotPt_V4 : STInst<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if (!$src1.new) memb($addr) = $src2", @@ -2042,8 +1201,7 @@ def STrib_cdnNotPt_V4 : STInst<(outs), // if (Pv) memb(Rs+#u6:0)=Rt // if (!Pv) memb(Rs+#u6:0)=Rt // if (Pv.new) memb(Rs+#u6:0)=Rt -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrib_indexed_cdnPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4), "if ($src1.new) memb($src2+#$src3) = $src4", @@ -2051,8 +1209,7 @@ def STrib_indexed_cdnPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (!Pv.new) memb(Rs+#u6:0)=Rt -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrib_indexed_cdnNotPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4), "if (!$src1.new) memb($src2+#$src3) = $src4", @@ -2061,8 +1218,7 @@ def STrib_indexed_cdnNotPt_V4 : STInst<(outs), // if ([!]Pv[.new]) memb(Rs+Ru<<#u2)=Rt // if (Pv) memb(Rs+Ru<<#u2)=Rt -let mayStore = 1, AddedComplexity = 10, - isPredicated = 1 in +let mayStore = 1, AddedComplexity = 10 in def STrib_indexed_shl_cPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -2071,8 +1227,7 @@ def STrib_indexed_shl_cPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (Pv.new) memb(Rs+Ru<<#u2)=Rt -let mayStore = 1, AddedComplexity = 10, - isPredicated = 1 in +let mayStore = 1, AddedComplexity = 10 in def STrib_indexed_shl_cdnPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -2081,8 +1236,7 @@ def STrib_indexed_shl_cdnPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (!Pv) memb(Rs+Ru<<#u2)=Rt -let mayStore = 1, AddedComplexity = 10, - isPredicated = 1 in +let mayStore = 1, AddedComplexity = 10 in def STrib_indexed_shl_cNotPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -2091,8 +1245,7 @@ def STrib_indexed_shl_cNotPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (!Pv.new) memb(Rs+Ru<<#u2)=Rt -let mayStore = 1, AddedComplexity = 10, - isPredicated = 1 in +let mayStore = 1, AddedComplexity = 10 in def STrib_indexed_shl_cdnNotPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -2103,8 +1256,7 @@ def STrib_indexed_shl_cdnNotPt_V4 : STInst<(outs), // if ([!]Pv[.new]) memb(Rx++#s4:0)=Rt // if (Pv) memb(Rx++#s4:0)=Rt // if (Pv.new) memb(Rx++#s4:0)=Rt -let mayStore = 1, hasCtrlDep = 1, - isPredicated = 1 in +let mayStore = 1, hasCtrlDep = 1 in def POST_STbri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset), "if ($src1.new) memb($src3++#$offset) = $src2", @@ -2113,8 +1265,7 @@ def POST_STbri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst), // if (!Pv) memb(Rx++#s4:0)=Rt // if (!Pv.new) memb(Rx++#s4:0)=Rt -let mayStore = 1, hasCtrlDep = 1, - isPredicated = 1 in +let mayStore = 1, hasCtrlDep = 1 in def POST_STbri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset), "if (!$src1.new) memb($src3++#$offset) = $src2", @@ -2123,15 +1274,20 @@ def POST_STbri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst), // Store halfword. -// TODO: needs to be implemented // memh(Re=#U6)=Rt.H +// TODO: needs to be implemented + +// memh(Re=#U6)=Rt +// TODO: needs to be implemented + // memh(Rs+#s11:1)=Rt.H +// memh(Rs+#s11:1)=Rt // memh(Rs+#u6:1)=#S8 let AddedComplexity = 10, isPredicable = 1 in def STrih_imm_V4 : STInst<(outs), (ins IntRegs:$src1, u6_1Imm:$src2, s8Imm:$src3), "memh($src1+#$src2) = #$src3", - [(truncstorei16 s8ImmPred:$src3, (add (i32 IntRegs:$src1), + [(truncstorei16 s8ImmPred:$src3, (add IntRegs:$src1, u6_1ImmPred:$src2))]>, Requires<[HasV4T]>; @@ -2143,10 +1299,9 @@ let AddedComplexity = 10, isPredicable = 1 in def STrih_indexed_shl_V4 : STInst<(outs), (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3, IntRegs:$src4), "memh($src1+$src2<<#$src3) = $src4", - [(truncstorei16 (i32 IntRegs:$src4), - (add (i32 IntRegs:$src1), - (shl (i32 IntRegs:$src2), - u2ImmPred:$src3)))]>, + [(truncstorei16 IntRegs:$src4, (add IntRegs:$src1, + (shl IntRegs:$src2, + u2ImmPred:$src3)))]>, Requires<[HasV4T]>; // memh(Ru<<#u2+#U6)=Rt.H @@ -2155,9 +1310,9 @@ let AddedComplexity = 10 in def STrih_shl_V4 : STInst<(outs), (ins IntRegs:$src1, u2Imm:$src2, u6Imm:$src3, IntRegs:$src4), "memh($src1<<#$src2+#$src3) = $src4", - [(truncstorei16 (i32 IntRegs:$src4), - (add (shl (i32 IntRegs:$src1), u2ImmPred:$src2), - u6ImmPred:$src3))]>, + [(truncstorei16 IntRegs:$src4, (shl IntRegs:$src1, + (add u2ImmPred:$src2, + u6ImmPred:$src3)))]>, Requires<[HasV4T]>; // memh(Rx++#s4:1:circ(Mu))=Rt.H @@ -2168,14 +1323,17 @@ def STrih_shl_V4 : STInst<(outs), // memh(Rx++Mu)=Rt // memh(Rx++Mu:brev)=Rt.H // memh(Rx++Mu:brev)=Rt +// memh(gp+#u16:1)=Rt.H // memh(gp+#u16:1)=Rt + + +// Store halfword conditionally. // if ([!]Pv[.new]) memh(#u6)=Rt.H // if ([!]Pv[.new]) memh(#u6)=Rt // if ([!]Pv[.new]) memh(Rs+#u6:1)=#S6 // if (Pv) memh(Rs+#u6:1)=#S6 -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrih_imm_cPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, s6Imm:$src4), "if ($src1) memh($src2+#$src3) = #$src4", @@ -2183,8 +1341,7 @@ def STrih_imm_cPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (Pv.new) memh(Rs+#u6:1)=#S6 -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrih_imm_cdnPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, s6Imm:$src4), "if ($src1.new) memh($src2+#$src3) = #$src4", @@ -2192,8 +1349,7 @@ def STrih_imm_cdnPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (!Pv) memh(Rs+#u6:1)=#S6 -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrih_imm_cNotPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, s6Imm:$src4), "if (!$src1) memh($src2+#$src3) = #$src4", @@ -2201,8 +1357,7 @@ def STrih_imm_cNotPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (!Pv.new) memh(Rs+#u6:1)=#S6 -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrih_imm_cdnNotPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, s6Imm:$src4), "if (!$src1.new) memh($src2+#$src3) = #$src4", @@ -2215,8 +1370,7 @@ def STrih_imm_cdnNotPt_V4 : STInst<(outs), // if ([!]Pv[.new]) memh(Rs+#u6:1)=Rt // if (Pv) memh(Rs+#u6:1)=Rt // if (Pv.new) memh(Rs+#u6:1)=Rt -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrih_cdnPt_V4 : STInst<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if ($src1.new) memh($addr) = $src2", @@ -2225,8 +1379,7 @@ def STrih_cdnPt_V4 : STInst<(outs), // if (!Pv) memh(Rs+#u6:1)=Rt // if (!Pv.new) memh(Rs+#u6:1)=Rt -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrih_cdnNotPt_V4 : STInst<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if (!$src1.new) memh($addr) = $src2", @@ -2234,8 +1387,7 @@ def STrih_cdnNotPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (Pv.new) memh(Rs+#u6:1)=Rt -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrih_indexed_cdnPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4), "if ($src1.new) memh($src2+#$src3) = $src4", @@ -2243,8 +1395,7 @@ def STrih_indexed_cdnPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (!Pv.new) memh(Rs+#u6:1)=Rt -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrih_indexed_cdnNotPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4), "if (!$src1.new) memh($src2+#$src3) = $src4", @@ -2254,8 +1405,7 @@ def STrih_indexed_cdnNotPt_V4 : STInst<(outs), // if ([!]Pv[.new]) memh(Rs+Ru<<#u2)=Rt.H // if ([!]Pv[.new]) memh(Rs+Ru<<#u2)=Rt // if (Pv) memh(Rs+Ru<<#u2)=Rt -let mayStore = 1, AddedComplexity = 10, - isPredicated = 1 in +let mayStore = 1, AddedComplexity = 10 in def STrih_indexed_shl_cPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -2264,8 +1414,6 @@ def STrih_indexed_shl_cPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (Pv.new) memh(Rs+Ru<<#u2)=Rt -let mayStore = 1, AddedComplexity = 10, - isPredicated = 1 in def STrih_indexed_shl_cdnPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -2274,8 +1422,7 @@ def STrih_indexed_shl_cdnPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (!Pv) memh(Rs+Ru<<#u2)=Rt -let mayStore = 1, AddedComplexity = 10, - isPredicated = 1 in +let mayStore = 1, AddedComplexity = 10 in def STrih_indexed_shl_cNotPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -2284,8 +1431,7 @@ def STrih_indexed_shl_cNotPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (!Pv.new) memh(Rs+Ru<<#u2)=Rt -let mayStore = 1, AddedComplexity = 10, - isPredicated = 1 in +let mayStore = 1, AddedComplexity = 10 in def STrih_indexed_shl_cdnNotPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -2299,8 +1445,7 @@ def STrih_indexed_shl_cdnNotPt_V4 : STInst<(outs), // if ([!]Pv[.new]) memh(Rx++#s4:1)=Rt // if (Pv) memh(Rx++#s4:1)=Rt // if (Pv.new) memh(Rx++#s4:1)=Rt -let mayStore = 1, hasCtrlDep = 1, - isPredicated = 1 in +let mayStore = 1, hasCtrlDep = 1 in def POST_SThri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset), "if ($src1.new) memh($src3++#$offset) = $src2", @@ -2309,8 +1454,7 @@ def POST_SThri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst), // if (!Pv) memh(Rx++#s4:1)=Rt // if (!Pv.new) memh(Rx++#s4:1)=Rt -let mayStore = 1, hasCtrlDep = 1, - isPredicated = 1 in +let mayStore = 1, hasCtrlDep = 1 in def POST_SThri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset), "if (!$src1.new) memh($src3++#$offset) = $src2", @@ -2322,22 +1466,13 @@ def POST_SThri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst), // memw(Re=#U6)=Rt // TODO: Needs to be implemented. -// Store predicate: -let mayStore = 1, neverHasSideEffects = 1 in -def STriw_pred_V4 : STInst<(outs), - (ins MEMri:$addr, PredRegs:$src1), - "Error; should not emit", - []>, - Requires<[HasV4T]>; - - +// memw(Rs+#s11:2)=Rt // memw(Rs+#u6:2)=#S8 let AddedComplexity = 10, isPredicable = 1 in def STriw_imm_V4 : STInst<(outs), (ins IntRegs:$src1, u6_2Imm:$src2, s8Imm:$src3), "memw($src1+#$src2) = #$src3", - [(store s8ImmPred:$src3, (add (i32 IntRegs:$src1), - u6_2ImmPred:$src2))]>, + [(store s8ImmPred:$src3, (add IntRegs:$src1, u6_2ImmPred:$src2))]>, Requires<[HasV4T]>; // memw(Rs+Ru<<#u2)=Rt @@ -2345,9 +1480,8 @@ let AddedComplexity = 10, isPredicable = 1 in def STriw_indexed_shl_V4 : STInst<(outs), (ins IntRegs:$src1, IntRegs:$src2, u2Imm:$src3, IntRegs:$src4), "memw($src1+$src2<<#$src3) = $src4", - [(store (i32 IntRegs:$src4), (add (i32 IntRegs:$src1), - (shl (i32 IntRegs:$src2), - u2ImmPred:$src3)))]>, + [(store IntRegs:$src4, (add IntRegs:$src1, + (shl IntRegs:$src2, u2ImmPred:$src3)))]>, Requires<[HasV4T]>; // memw(Ru<<#u2+#U6)=Rt @@ -2355,9 +1489,8 @@ let AddedComplexity = 10 in def STriw_shl_V4 : STInst<(outs), (ins IntRegs:$src1, u2Imm:$src2, u6Imm:$src3, IntRegs:$src4), "memw($src1<<#$src2+#$src3) = $src4", - [(store (i32 IntRegs:$src4), - (add (shl (i32 IntRegs:$src1), u2ImmPred:$src2), - u6ImmPred:$src3))]>, + [(store IntRegs:$src4, (shl IntRegs:$src1, + (add u2ImmPred:$src2, u6ImmPred:$src3)))]>, Requires<[HasV4T]>; // memw(Rx++#s4:2)=Rt @@ -2369,11 +1502,12 @@ def STriw_shl_V4 : STInst<(outs), // Store word conditionally. +// if ([!]Pv[.new]) memw(#u6)=Rt +// TODO: Needs to be implemented. // if ([!]Pv[.new]) memw(Rs+#u6:2)=#S6 // if (Pv) memw(Rs+#u6:2)=#S6 -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STriw_imm_cPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, s6Imm:$src4), "if ($src1) memw($src2+#$src3) = #$src4", @@ -2381,8 +1515,7 @@ def STriw_imm_cPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (Pv.new) memw(Rs+#u6:2)=#S6 -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STriw_imm_cdnPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, s6Imm:$src4), "if ($src1.new) memw($src2+#$src3) = #$src4", @@ -2390,8 +1523,7 @@ def STriw_imm_cdnPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (!Pv) memw(Rs+#u6:2)=#S6 -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STriw_imm_cNotPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, s6Imm:$src4), "if (!$src1) memw($src2+#$src3) = #$src4", @@ -2399,8 +1531,7 @@ def STriw_imm_cNotPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (!Pv.new) memw(Rs+#u6:2)=#S6 -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STriw_imm_cdnNotPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, s6Imm:$src4), "if (!$src1.new) memw($src2+#$src3) = #$src4", @@ -2410,8 +1541,7 @@ def STriw_imm_cdnNotPt_V4 : STInst<(outs), // if ([!]Pv[.new]) memw(Rs+#u6:2)=Rt // if (Pv) memw(Rs+#u6:2)=Rt // if (Pv.new) memw(Rs+#u6:2)=Rt -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STriw_cdnPt_V4 : STInst<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if ($src1.new) memw($addr) = $src2", @@ -2420,8 +1550,7 @@ def STriw_cdnPt_V4 : STInst<(outs), // if (!Pv) memw(Rs+#u6:2)=Rt // if (!Pv.new) memw(Rs+#u6:2)=Rt -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STriw_cdnNotPt_V4 : STInst<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if (!$src1.new) memw($addr) = $src2", @@ -2431,8 +1560,7 @@ def STriw_cdnNotPt_V4 : STInst<(outs), // if (Pv) memw(Rs+#u6:2)=Rt // if (!Pv) memw(Rs+#u6:2)=Rt // if (Pv.new) memw(Rs+#u6:2)=Rt -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STriw_indexed_cdnPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4), "if ($src1.new) memw($src2+#$src3) = $src4", @@ -2440,8 +1568,7 @@ def STriw_indexed_cdnPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (!Pv.new) memw(Rs+#u6:2)=Rt -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STriw_indexed_cdnNotPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4), "if (!$src1.new) memw($src2+#$src3) = $src4", @@ -2450,8 +1577,7 @@ def STriw_indexed_cdnNotPt_V4 : STInst<(outs), // if ([!]Pv[.new]) memw(Rs+Ru<<#u2)=Rt // if (Pv) memw(Rs+Ru<<#u2)=Rt -let mayStore = 1, AddedComplexity = 10, - isPredicated = 1 in +let mayStore = 1, AddedComplexity = 10 in def STriw_indexed_shl_cPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -2460,8 +1586,7 @@ def STriw_indexed_shl_cPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (Pv.new) memw(Rs+Ru<<#u2)=Rt -let mayStore = 1, AddedComplexity = 10, - isPredicated = 1 in +let mayStore = 1, AddedComplexity = 10 in def STriw_indexed_shl_cdnPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -2470,8 +1595,7 @@ def STriw_indexed_shl_cdnPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (!Pv) memw(Rs+Ru<<#u2)=Rt -let mayStore = 1, AddedComplexity = 10, - isPredicated = 1 in +let mayStore = 1, AddedComplexity = 10 in def STriw_indexed_shl_cNotPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -2480,8 +1604,7 @@ def STriw_indexed_shl_cNotPt_V4 : STInst<(outs), Requires<[HasV4T]>; // if (!Pv.new) memw(Rs+Ru<<#u2)=Rt -let mayStore = 1, AddedComplexity = 10, - isPredicated = 1 in +let mayStore = 1, AddedComplexity = 10 in def STriw_indexed_shl_cdnNotPt_V4 : STInst<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -2492,8 +1615,7 @@ def STriw_indexed_shl_cdnNotPt_V4 : STInst<(outs), // if ([!]Pv[.new]) memw(Rx++#s4:2)=Rt // if (Pv) memw(Rx++#s4:2)=Rt // if (Pv.new) memw(Rx++#s4:2)=Rt -let mayStore = 1, hasCtrlDep = 1, - isPredicated = 1 in +let mayStore = 1, hasCtrlDep = 1 in def POST_STwri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset), "if ($src1.new) memw($src3++#$offset) = $src2", @@ -2502,8 +1624,7 @@ def POST_STwri_cdnPt_V4 : STInstPI<(outs IntRegs:$dst), // if (!Pv) memw(Rx++#s4:2)=Rt // if (!Pv.new) memw(Rx++#s4:2)=Rt -let mayStore = 1, hasCtrlDep = 1, - isPredicated = 1 in +let mayStore = 1, hasCtrlDep = 1 in def POST_STwri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset), "if (!$src1.new) memw($src3++#$offset) = $src2", @@ -2511,439 +1632,6 @@ def POST_STwri_cdnNotPt_V4 : STInstPI<(outs IntRegs:$dst), Requires<[HasV4T]>; -/// store to global address - -let isPredicable = 1, mayStore = 1, neverHasSideEffects = 1 in -def STrid_GP_V4 : STInst<(outs), - (ins globaladdress:$global, u16Imm:$offset, DoubleRegs:$src), - "memd(#$global+$offset) = $src", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STrid_GP_cPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - DoubleRegs:$src2), - "if ($src1) memd(##$global+$offset) = $src2", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STrid_GP_cNotPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - DoubleRegs:$src2), - "if (!$src1) memd(##$global+$offset) = $src2", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STrid_GP_cdnPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - DoubleRegs:$src2), - "if ($src1.new) memd(##$global+$offset) = $src2", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STrid_GP_cdnNotPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - DoubleRegs:$src2), - "if (!$src1.new) memd(##$global+$offset) = $src2", - []>, - Requires<[HasV4T]>; - -let isPredicable = 1, mayStore = 1, neverHasSideEffects = 1 in -def STrib_GP_V4 : STInst<(outs), - (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src), - "memb(#$global+$offset) = $src", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STrib_GP_cPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - IntRegs:$src2), - "if ($src1) memb(##$global+$offset) = $src2", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STrib_GP_cNotPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - IntRegs:$src2), - "if (!$src1) memb(##$global+$offset) = $src2", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STrib_GP_cdnPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - IntRegs:$src2), - "if ($src1.new) memb(##$global+$offset) = $src2", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STrib_GP_cdnNotPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - IntRegs:$src2), - "if (!$src1.new) memb(##$global+$offset) = $src2", - []>, - Requires<[HasV4T]>; - -let isPredicable = 1, mayStore = 1, neverHasSideEffects = 1 in -def STrih_GP_V4 : STInst<(outs), - (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src), - "memh(#$global+$offset) = $src", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STrih_GP_cPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - IntRegs:$src2), - "if ($src1) memh(##$global+$offset) = $src2", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STrih_GP_cNotPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - IntRegs:$src2), - "if (!$src1) memh(##$global+$offset) = $src2", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STrih_GP_cdnPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - IntRegs:$src2), - "if ($src1.new) memh(##$global+$offset) = $src2", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STrih_GP_cdnNotPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - IntRegs:$src2), - "if (!$src1.new) memh(##$global+$offset) = $src2", - []>, - Requires<[HasV4T]>; - -let isPredicable = 1, mayStore = 1, neverHasSideEffects = 1 in -def STriw_GP_V4 : STInst<(outs), - (ins globaladdress:$global, u16Imm:$offset, IntRegs:$src), - "memw(#$global+$offset) = $src", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STriw_GP_cPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - IntRegs:$src2), - "if ($src1) memw(##$global+$offset) = $src2", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STriw_GP_cNotPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - IntRegs:$src2), - "if (!$src1) memw(##$global+$offset) = $src2", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STriw_GP_cdnPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - IntRegs:$src2), - "if ($src1.new) memw(##$global+$offset) = $src2", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STriw_GP_cdnNotPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - IntRegs:$src2), - "if (!$src1.new) memw(##$global+$offset) = $src2", - []>, - Requires<[HasV4T]>; - -// memd(#global)=Rtt -let isPredicable = 1, mayStore = 1, neverHasSideEffects = 1 in -def STd_GP_V4 : STInst<(outs), - (ins globaladdress:$global, DoubleRegs:$src), - "memd(#$global) = $src", - []>, - Requires<[HasV4T]>; - -// if (Pv) memd(##global) = Rtt -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STd_GP_cPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, DoubleRegs:$src2), - "if ($src1) memd(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memd(##global) = Rtt -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STd_GP_cNotPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, DoubleRegs:$src2), - "if (!$src1) memd(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// if (Pv) memd(##global) = Rtt -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STd_GP_cdnPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, DoubleRegs:$src2), - "if ($src1.new) memd(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memd(##global) = Rtt -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STd_GP_cdnNotPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, DoubleRegs:$src2), - "if (!$src1.new) memd(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// memb(#global)=Rt -let isPredicable = 1, mayStore = 1, neverHasSideEffects = 1 in -def STb_GP_V4 : STInst<(outs), - (ins globaladdress:$global, IntRegs:$src), - "memb(#$global) = $src", - []>, - Requires<[HasV4T]>; - -// if (Pv) memb(##global) = Rt -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STb_GP_cPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if ($src1) memb(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memb(##global) = Rt -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STb_GP_cNotPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if (!$src1) memb(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// if (Pv) memb(##global) = Rt -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STb_GP_cdnPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if ($src1.new) memb(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memb(##global) = Rt -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STb_GP_cdnNotPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if (!$src1.new) memb(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// memh(#global)=Rt -let isPredicable = 1, mayStore = 1, neverHasSideEffects = 1 in -def STh_GP_V4 : STInst<(outs), - (ins globaladdress:$global, IntRegs:$src), - "memh(#$global) = $src", - []>, - Requires<[HasV4T]>; - -// if (Pv) memh(##global) = Rt -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STh_GP_cPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if ($src1) memh(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memh(##global) = Rt -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STh_GP_cNotPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if (!$src1) memh(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// if (Pv) memh(##global) = Rt -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STh_GP_cdnPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if ($src1.new) memh(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memh(##global) = Rt -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STh_GP_cdnNotPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if (!$src1.new) memh(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// memw(#global)=Rt -let isPredicable = 1, mayStore = 1, neverHasSideEffects = 1 in -def STw_GP_V4 : STInst<(outs), - (ins globaladdress:$global, IntRegs:$src), - "memw(#$global) = $src", - []>, - Requires<[HasV4T]>; - -// if (Pv) memw(##global) = Rt -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STw_GP_cPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if ($src1) memw(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memw(##global) = Rt -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STw_GP_cNotPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if (!$src1) memw(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// if (Pv) memw(##global) = Rt -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STw_GP_cdnPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if ($src1.new) memw(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memw(##global) = Rt -let mayStore = 1, neverHasSideEffects = 1, isPredicated = 1 in -def STw_GP_cdnNotPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if (!$src1.new) memw(##$global) = $src2", - []>, - Requires<[HasV4T]>; - -// 64 bit atomic store -def : Pat <(atomic_store_64 (HexagonCONST32_GP tglobaladdr:$global), - (i64 DoubleRegs:$src1)), - (STd_GP_V4 tglobaladdr:$global, (i64 DoubleRegs:$src1))>, - Requires<[HasV4T]>; - -// Map from store(globaladdress) -> memd(#foo) -let AddedComplexity = 100 in -def : Pat <(store (i64 DoubleRegs:$src1), (HexagonCONST32_GP tglobaladdr:$global)), - (STd_GP_V4 tglobaladdr:$global, (i64 DoubleRegs:$src1))>, - Requires<[HasV4T]>; - -// 8 bit atomic store -def : Pat < (atomic_store_8 (HexagonCONST32_GP tglobaladdr:$global), - (i32 IntRegs:$src1)), - (STb_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>, - Requires<[HasV4T]>; - -// Map from store(globaladdress) -> memb(#foo) -let AddedComplexity = 100 in -def : Pat<(truncstorei8 (i32 IntRegs:$src1), - (HexagonCONST32_GP tglobaladdr:$global)), - (STb_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>, - Requires<[HasV4T]>; - -// Map from "i1 = constant<-1>; memw(CONST32(#foo)) = i1" -// to "r0 = 1; memw(#foo) = r0" -let AddedComplexity = 100 in -def : Pat<(store (i1 -1), (HexagonCONST32_GP tglobaladdr:$global)), - (STb_GP_V4 tglobaladdr:$global, (TFRI 1))>, - Requires<[HasV4T]>; - -def : Pat<(atomic_store_16 (HexagonCONST32_GP tglobaladdr:$global), - (i32 IntRegs:$src1)), - (STh_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>, - Requires<[HasV4T]>; - -// Map from store(globaladdress) -> memh(#foo) -let AddedComplexity = 100 in -def : Pat<(truncstorei16 (i32 IntRegs:$src1), - (HexagonCONST32_GP tglobaladdr:$global)), - (STh_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>, - Requires<[HasV4T]>; - -// 32 bit atomic store -def : Pat<(atomic_store_32 (HexagonCONST32_GP tglobaladdr:$global), - (i32 IntRegs:$src1)), - (STw_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>, - Requires<[HasV4T]>; - -// Map from store(globaladdress) -> memw(#foo) -let AddedComplexity = 100 in -def : Pat<(store (i32 IntRegs:$src1), (HexagonCONST32_GP tglobaladdr:$global)), - (STw_GP_V4 tglobaladdr:$global, (i32 IntRegs:$src1))>, - Requires<[HasV4T]>; - -def : Pat<(atomic_store_64 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset), - (i64 DoubleRegs:$src1)), - (STrid_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, - (i64 DoubleRegs:$src1))>, - Requires<[HasV4T]>; - -def : Pat<(atomic_store_32 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset), - (i32 IntRegs:$src1)), - (STriw_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>, - Requires<[HasV4T]>; - -def : Pat<(atomic_store_16 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset), - (i32 IntRegs:$src1)), - (STrih_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>, - Requires<[HasV4T]>; - -def : Pat<(atomic_store_8 (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset), - (i32 IntRegs:$src1)), - (STrib_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>, - Requires<[HasV4T]>; - -// Map from store(globaladdress + x) -> memd(#foo + x) -let AddedComplexity = 100 in -def : Pat<(store (i64 DoubleRegs:$src1), (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (STrid_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, - (i64 DoubleRegs:$src1))>, - Requires<[HasV4T]>; - -// Map from store(globaladdress + x) -> memb(#foo + x) -let AddedComplexity = 100 in -def : Pat<(truncstorei8 (i32 IntRegs:$src1), - (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (STrib_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>, - Requires<[HasV4T]>; - -// Map from store(globaladdress + x) -> memh(#foo + x) -let AddedComplexity = 100 in -def : Pat<(truncstorei16 (i32 IntRegs:$src1), - (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (STrih_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>, - Requires<[HasV4T]>; - -// Map from store(globaladdress + x) -> memw(#foo + x) -let AddedComplexity = 100 in -def : Pat<(store (i32 IntRegs:$src1), - (add (HexagonCONST32_GP tglobaladdr:$global), - u16ImmPred:$offset)), - (STriw_GP_V4 tglobaladdr:$global, u16ImmPred:$offset, (i32 IntRegs:$src1))>, - Requires<[HasV4T]>; - - - //===----------------------------------------------------------------------=== // ST - //===----------------------------------------------------------------------=== @@ -3008,19 +1696,11 @@ def STrib_GP_nv_V4 : NVInst_V4<(outs), []>, Requires<[HasV4T]>; -// memb(#global)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in -def STb_GP_nv_V4 : NVInst_V4<(outs), - (ins globaladdress:$global, IntRegs:$src), - "memb(#$global) = $src.new", - []>, - Requires<[HasV4T]>; // Store new-value byte conditionally. // if ([!]Pv[.new]) memb(#u6)=Nt.new // if (Pv) memb(Rs+#u6:0)=Nt.new -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrib_cPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if ($src1) memb($addr) = $src2.new", @@ -3028,8 +1708,7 @@ def STrib_cPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (Pv.new) memb(Rs+#u6:0)=Nt.new -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrib_cdnPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if ($src1.new) memb($addr) = $src2.new", @@ -3037,8 +1716,7 @@ def STrib_cdnPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv) memb(Rs+#u6:0)=Nt.new -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrib_cNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if (!$src1) memb($addr) = $src2.new", @@ -3046,8 +1724,7 @@ def STrib_cNotPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv.new) memb(Rs+#u6:0)=Nt.new -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrib_cdnNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if (!$src1.new) memb($addr) = $src2.new", @@ -3055,8 +1732,7 @@ def STrib_cdnNotPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (Pv) memb(Rs+#u6:0)=Nt.new -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrib_indexed_cPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4), "if ($src1) memb($src2+#$src3) = $src4.new", @@ -3064,8 +1740,7 @@ def STrib_indexed_cPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (Pv.new) memb(Rs+#u6:0)=Nt.new -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrib_indexed_cdnPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4), "if ($src1.new) memb($src2+#$src3) = $src4.new", @@ -3073,8 +1748,7 @@ def STrib_indexed_cdnPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv) memb(Rs+#u6:0)=Nt.new -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrib_indexed_cNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4), "if (!$src1) memb($src2+#$src3) = $src4.new", @@ -3082,8 +1756,7 @@ def STrib_indexed_cNotPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv.new) memb(Rs+#u6:0)=Nt.new -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrib_indexed_cdnNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_0Imm:$src3, IntRegs:$src4), "if (!$src1.new) memb($src2+#$src3) = $src4.new", @@ -3093,8 +1766,7 @@ def STrib_indexed_cdnNotPt_nv_V4 : NVInst_V4<(outs), // if ([!]Pv[.new]) memb(Rs+Ru<<#u2)=Nt.new // if (Pv) memb(Rs+Ru<<#u2)=Nt.new -let mayStore = 1, AddedComplexity = 10, - isPredicated = 1 in +let mayStore = 1, AddedComplexity = 10 in def STrib_indexed_shl_cPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -3103,8 +1775,7 @@ def STrib_indexed_shl_cPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (Pv.new) memb(Rs+Ru<<#u2)=Nt.new -let mayStore = 1, AddedComplexity = 10, - isPredicated = 1 in +let mayStore = 1, AddedComplexity = 10 in def STrib_indexed_shl_cdnPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -3113,8 +1784,7 @@ def STrib_indexed_shl_cdnPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv) memb(Rs+Ru<<#u2)=Nt.new -let mayStore = 1, AddedComplexity = 10, - isPredicated = 1 in +let mayStore = 1, AddedComplexity = 10 in def STrib_indexed_shl_cNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -3123,8 +1793,7 @@ def STrib_indexed_shl_cNotPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv.new) memb(Rs+Ru<<#u2)=Nt.new -let mayStore = 1, AddedComplexity = 10, - isPredicated = 1 in +let mayStore = 1, AddedComplexity = 10 in def STrib_indexed_shl_cdnNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -3134,8 +1803,7 @@ def STrib_indexed_shl_cdnNotPt_nv_V4 : NVInst_V4<(outs), // if ([!]Pv[.new]) memb(Rx++#s4:0)=Nt.new // if (Pv) memb(Rx++#s4:0)=Nt.new -let mayStore = 1, hasCtrlDep = 1, - isPredicated = 1 in +let mayStore = 1, hasCtrlDep = 1 in def POST_STbri_cPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset), "if ($src1) memb($src3++#$offset) = $src2.new", @@ -3143,8 +1811,7 @@ def POST_STbri_cPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (Pv.new) memb(Rx++#s4:0)=Nt.new -let mayStore = 1, hasCtrlDep = 1, - isPredicated = 1 in +let mayStore = 1, hasCtrlDep = 1 in def POST_STbri_cdnPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset), "if ($src1.new) memb($src3++#$offset) = $src2.new", @@ -3152,8 +1819,7 @@ def POST_STbri_cdnPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (!Pv) memb(Rx++#s4:0)=Nt.new -let mayStore = 1, hasCtrlDep = 1, - isPredicated = 1 in +let mayStore = 1, hasCtrlDep = 1 in def POST_STbri_cNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset), "if (!$src1) memb($src3++#$offset) = $src2.new", @@ -3161,8 +1827,7 @@ def POST_STbri_cNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (!Pv.new) memb(Rx++#s4:0)=Nt.new -let mayStore = 1, hasCtrlDep = 1, - isPredicated = 1 in +let mayStore = 1, hasCtrlDep = 1 in def POST_STbri_cdnNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_0Imm:$offset), "if (!$src1.new) memb($src3++#$offset) = $src2.new", @@ -3224,14 +1889,6 @@ def STrih_GP_nv_V4 : NVInst_V4<(outs), []>, Requires<[HasV4T]>; -// memh(#global)=Nt.new -let mayStore = 1, neverHasSideEffects = 1 in -def STh_GP_nv_V4 : NVInst_V4<(outs), - (ins globaladdress:$global, IntRegs:$src), - "memh(#$global) = $src.new", - []>, - Requires<[HasV4T]>; - // Store new-value halfword conditionally. @@ -3239,8 +1896,7 @@ def STh_GP_nv_V4 : NVInst_V4<(outs), // if ([!]Pv[.new]) memh(Rs+#u6:1)=Nt.new // if (Pv) memh(Rs+#u6:1)=Nt.new -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrih_cPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if ($src1) memh($addr) = $src2.new", @@ -3248,8 +1904,7 @@ def STrih_cPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (Pv.new) memh(Rs+#u6:1)=Nt.new -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrih_cdnPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if ($src1.new) memh($addr) = $src2.new", @@ -3257,8 +1912,7 @@ def STrih_cdnPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv) memh(Rs+#u6:1)=Nt.new -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrih_cNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if (!$src1) memh($addr) = $src2.new", @@ -3266,8 +1920,7 @@ def STrih_cNotPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv.new) memh(Rs+#u6:1)=Nt.new -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrih_cdnNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if (!$src1.new) memh($addr) = $src2.new", @@ -3275,8 +1928,7 @@ def STrih_cdnNotPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (Pv) memh(Rs+#u6:1)=Nt.new -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrih_indexed_cPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4), "if ($src1) memh($src2+#$src3) = $src4.new", @@ -3284,8 +1936,7 @@ def STrih_indexed_cPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (Pv.new) memh(Rs+#u6:1)=Nt.new -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrih_indexed_cdnPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4), "if ($src1.new) memh($src2+#$src3) = $src4.new", @@ -3293,8 +1944,7 @@ def STrih_indexed_cdnPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv) memh(Rs+#u6:1)=Nt.new -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrih_indexed_cNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4), "if (!$src1) memh($src2+#$src3) = $src4.new", @@ -3302,8 +1952,7 @@ def STrih_indexed_cNotPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv.new) memh(Rs+#u6:1)=Nt.new -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STrih_indexed_cdnNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_1Imm:$src3, IntRegs:$src4), "if (!$src1.new) memh($src2+#$src3) = $src4.new", @@ -3312,8 +1961,7 @@ def STrih_indexed_cdnNotPt_nv_V4 : NVInst_V4<(outs), // if ([!]Pv[.new]) memh(Rs+Ru<<#u2)=Nt.new // if (Pv) memh(Rs+Ru<<#u2)=Nt.new -let mayStore = 1, AddedComplexity = 10, - isPredicated = 1 in +let mayStore = 1, AddedComplexity = 10 in def STrih_indexed_shl_cPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -3322,8 +1970,7 @@ def STrih_indexed_shl_cPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (Pv.new) memh(Rs+Ru<<#u2)=Nt.new -let mayStore = 1, AddedComplexity = 10, - isPredicated = 1 in +let mayStore = 1, AddedComplexity = 10 in def STrih_indexed_shl_cdnPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -3332,8 +1979,7 @@ def STrih_indexed_shl_cdnPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv) memh(Rs+Ru<<#u2)=Nt.new -let mayStore = 1, AddedComplexity = 10, - isPredicated = 1 in +let mayStore = 1, AddedComplexity = 10 in def STrih_indexed_shl_cNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -3342,8 +1988,7 @@ def STrih_indexed_shl_cNotPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv.new) memh(Rs+Ru<<#u2)=Nt.new -let mayStore = 1, AddedComplexity = 10, - isPredicated = 1 in +let mayStore = 1, AddedComplexity = 10 in def STrih_indexed_shl_cdnNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -3353,8 +1998,7 @@ def STrih_indexed_shl_cdnNotPt_nv_V4 : NVInst_V4<(outs), // if ([!]Pv[]) memh(Rx++#s4:1)=Nt.new // if (Pv) memh(Rx++#s4:1)=Nt.new -let mayStore = 1, hasCtrlDep = 1, - isPredicated = 1 in +let mayStore = 1, hasCtrlDep = 1 in def POST_SThri_cPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset), "if ($src1) memh($src3++#$offset) = $src2.new", @@ -3362,8 +2006,7 @@ def POST_SThri_cPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (Pv.new) memh(Rx++#s4:1)=Nt.new -let mayStore = 1, hasCtrlDep = 1, - isPredicated = 1 in +let mayStore = 1, hasCtrlDep = 1 in def POST_SThri_cdnPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset), "if ($src1.new) memh($src3++#$offset) = $src2.new", @@ -3371,8 +2014,7 @@ def POST_SThri_cdnPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (!Pv) memh(Rx++#s4:1)=Nt.new -let mayStore = 1, hasCtrlDep = 1, - isPredicated = 1 in +let mayStore = 1, hasCtrlDep = 1 in def POST_SThri_cNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset), "if (!$src1) memh($src3++#$offset) = $src2.new", @@ -3380,8 +2022,7 @@ def POST_SThri_cNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (!Pv.new) memh(Rx++#s4:1)=Nt.new -let mayStore = 1, hasCtrlDep = 1, - isPredicated = 1 in +let mayStore = 1, hasCtrlDep = 1 in def POST_SThri_cdnNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_1Imm:$offset), "if (!$src1.new) memh($src3++#$offset) = $src2.new", @@ -3444,12 +2085,6 @@ def STriw_GP_nv_V4 : NVInst_V4<(outs), []>, Requires<[HasV4T]>; -let mayStore = 1, neverHasSideEffects = 1 in -def STw_GP_nv_V4 : NVInst_V4<(outs), - (ins globaladdress:$global, IntRegs:$src), - "memw(#$global) = $src.new", - []>, - Requires<[HasV4T]>; // Store new-value word conditionally. @@ -3457,8 +2092,7 @@ def STw_GP_nv_V4 : NVInst_V4<(outs), // if ([!]Pv[.new]) memw(Rs+#u6:2)=Nt.new // if (Pv) memw(Rs+#u6:2)=Nt.new -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STriw_cPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if ($src1) memw($addr) = $src2.new", @@ -3466,8 +2100,7 @@ def STriw_cPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (Pv.new) memw(Rs+#u6:2)=Nt.new -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STriw_cdnPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if ($src1.new) memw($addr) = $src2.new", @@ -3475,8 +2108,7 @@ def STriw_cdnPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv) memw(Rs+#u6:2)=Nt.new -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STriw_cNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if (!$src1) memw($addr) = $src2.new", @@ -3484,8 +2116,7 @@ def STriw_cNotPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv.new) memw(Rs+#u6:2)=Nt.new -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STriw_cdnNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, MEMri:$addr, IntRegs:$src2), "if (!$src1.new) memw($addr) = $src2.new", @@ -3493,8 +2124,7 @@ def STriw_cdnNotPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (Pv) memw(Rs+#u6:2)=Nt.new -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STriw_indexed_cPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4), "if ($src1) memw($src2+#$src3) = $src4.new", @@ -3502,8 +2132,7 @@ def STriw_indexed_cPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (Pv.new) memw(Rs+#u6:2)=Nt.new -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STriw_indexed_cdnPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4), "if ($src1.new) memw($src2+#$src3) = $src4.new", @@ -3511,8 +2140,7 @@ def STriw_indexed_cdnPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv) memw(Rs+#u6:2)=Nt.new -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STriw_indexed_cNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4), "if (!$src1) memw($src2+#$src3) = $src4.new", @@ -3520,8 +2148,7 @@ def STriw_indexed_cNotPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv.new) memw(Rs+#u6:2)=Nt.new -let mayStore = 1, neverHasSideEffects = 1, - isPredicated = 1 in +let mayStore = 1, neverHasSideEffects = 1 in def STriw_indexed_cdnNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, u6_2Imm:$src3, IntRegs:$src4), "if (!$src1.new) memw($src2+#$src3) = $src4.new", @@ -3531,8 +2158,7 @@ def STriw_indexed_cdnNotPt_nv_V4 : NVInst_V4<(outs), // if ([!]Pv[.new]) memw(Rs+Ru<<#u2)=Nt.new // if (Pv) memw(Rs+Ru<<#u2)=Nt.new -let mayStore = 1, AddedComplexity = 10, - isPredicated = 1 in +let mayStore = 1, AddedComplexity = 10 in def STriw_indexed_shl_cPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -3541,8 +2167,7 @@ def STriw_indexed_shl_cPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (Pv.new) memw(Rs+Ru<<#u2)=Nt.new -let mayStore = 1, AddedComplexity = 10, - isPredicated = 1 in +let mayStore = 1, AddedComplexity = 10 in def STriw_indexed_shl_cdnPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -3551,8 +2176,7 @@ def STriw_indexed_shl_cdnPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv) memw(Rs+Ru<<#u2)=Nt.new -let mayStore = 1, AddedComplexity = 10, - isPredicated = 1 in +let mayStore = 1, AddedComplexity = 10 in def STriw_indexed_shl_cNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -3561,8 +2185,7 @@ def STriw_indexed_shl_cNotPt_nv_V4 : NVInst_V4<(outs), Requires<[HasV4T]>; // if (!Pv.new) memw(Rs+Ru<<#u2)=Nt.new -let mayStore = 1, AddedComplexity = 10, - isPredicated = 1 in +let mayStore = 1, AddedComplexity = 10 in def STriw_indexed_shl_cdnNotPt_nv_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, u2Imm:$src4, IntRegs:$src5), @@ -3572,8 +2195,7 @@ def STriw_indexed_shl_cdnNotPt_nv_V4 : NVInst_V4<(outs), // if ([!]Pv[.new]) memw(Rx++#s4:2)=Nt.new // if (Pv) memw(Rx++#s4:2)=Nt.new -let mayStore = 1, hasCtrlDep = 1, - isPredicated = 1 in +let mayStore = 1, hasCtrlDep = 1 in def POST_STwri_cPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset), "if ($src1) memw($src3++#$offset) = $src2.new", @@ -3581,8 +2203,7 @@ def POST_STwri_cPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (Pv.new) memw(Rx++#s4:2)=Nt.new -let mayStore = 1, hasCtrlDep = 1, - isPredicated = 1 in +let mayStore = 1, hasCtrlDep = 1 in def POST_STwri_cdnPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset), "if ($src1.new) memw($src3++#$offset) = $src2.new", @@ -3590,8 +2211,7 @@ def POST_STwri_cdnPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (!Pv) memw(Rx++#s4:2)=Nt.new -let mayStore = 1, hasCtrlDep = 1, - isPredicated = 1 in +let mayStore = 1, hasCtrlDep = 1 in def POST_STwri_cNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset), "if (!$src1) memw($src3++#$offset) = $src2.new", @@ -3599,8 +2219,7 @@ def POST_STwri_cNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), Requires<[HasV4T]>; // if (!Pv.new) memw(Rx++#s4:2)=Nt.new -let mayStore = 1, hasCtrlDep = 1, - isPredicated = 1 in +let mayStore = 1, hasCtrlDep = 1 in def POST_STwri_cdnNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3, s4_2Imm:$offset), "if (!$src1.new) memw($src3++#$offset) = $src2.new", @@ -3608,199 +2227,6 @@ def POST_STwri_cdnNotPt_nv_V4 : NVInstPI_V4<(outs IntRegs:$dst), Requires<[HasV4T]>; - -// if (Pv) memb(##global) = Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STb_GP_cPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if ($src1) memb(##$global) = $src2.new", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memb(##global) = Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STb_GP_cNotPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if (!$src1) memb(##$global) = $src2.new", - []>, - Requires<[HasV4T]>; - -// if (Pv) memb(##global) = Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STb_GP_cdnPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if ($src1.new) memb(##$global) = $src2.new", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memb(##global) = Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STb_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if (!$src1.new) memb(##$global) = $src2.new", - []>, - Requires<[HasV4T]>; - -// if (Pv) memh(##global) = Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STh_GP_cPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if ($src1) memh(##$global) = $src2.new", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memh(##global) = Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STh_GP_cNotPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if (!$src1) memh(##$global) = $src2.new", - []>, - Requires<[HasV4T]>; - -// if (Pv) memh(##global) = Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STh_GP_cdnPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if ($src1.new) memh(##$global) = $src2.new", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memh(##global) = Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STh_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if (!$src1.new) memh(##$global) = $src2.new", - []>, - Requires<[HasV4T]>; - -// if (Pv) memw(##global) = Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STw_GP_cPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if ($src1) memw(##$global) = $src2.new", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memw(##global) = Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STw_GP_cNotPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if (!$src1) memw(##$global) = $src2.new", - []>, - Requires<[HasV4T]>; - -// if (Pv) memw(##global) = Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STw_GP_cdnPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if ($src1.new) memw(##$global) = $src2.new", - []>, - Requires<[HasV4T]>; - -// if (!Pv) memw(##global) = Rt -let mayStore = 1, neverHasSideEffects = 1 in -def STw_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, IntRegs:$src2), - "if (!$src1.new) memw(##$global) = $src2.new", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1 in -def STrib_GP_cPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - IntRegs:$src2), - "if ($src1) memb(##$global+$offset) = $src2.new", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1 in -def STrib_GP_cNotPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - IntRegs:$src2), - "if (!$src1) memb(##$global+$offset) = $src2.new", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1 in -def STrib_GP_cdnPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - IntRegs:$src2), - "if ($src1.new) memb(##$global+$offset) = $src2.new", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1 in -def STrib_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - IntRegs:$src2), - "if (!$src1.new) memb(##$global+$offset) = $src2.new", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1 in -def STrih_GP_cPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - IntRegs:$src2), - "if ($src1) memh(##$global+$offset) = $src2.new", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1 in -def STrih_GP_cNotPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - IntRegs:$src2), - "if (!$src1) memh(##$global+$offset) = $src2.new", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1 in -def STrih_GP_cdnPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - IntRegs:$src2), - "if ($src1.new) memh(##$global+$offset) = $src2.new", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1 in -def STrih_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - IntRegs:$src2), - "if (!$src1.new) memh(##$global+$offset) = $src2.new", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1 in -def STriw_GP_cPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - IntRegs:$src2), - "if ($src1) memw(##$global+$offset) = $src2.new", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1 in -def STriw_GP_cNotPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - IntRegs:$src2), - "if (!$src1) memw(##$global+$offset) = $src2.new", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1 in -def STriw_GP_cdnPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - IntRegs:$src2), - "if ($src1.new) memw(##$global+$offset) = $src2.new", - []>, - Requires<[HasV4T]>; - -let mayStore = 1, neverHasSideEffects = 1 in -def STriw_GP_cdnNotPt_nv_V4 : NVInst_V4<(outs), - (ins PredRegs:$src1, globaladdress:$global, u16Imm:$offset, - IntRegs:$src2), - "if (!$src1.new) memw(##$global+$offset) = $src2.new", - []>, - Requires<[HasV4T]>; - //===----------------------------------------------------------------------===// // NV/ST - //===----------------------------------------------------------------------===// @@ -3990,18 +2416,16 @@ let isBranch = 1, isTerminator=1, neverHasSideEffects = 1, Defs = [PC] in { def ADDr_ADDri_V4 : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, s6Imm:$src3), "$dst = add($src1, add($src2, #$src3))", - [(set (i32 IntRegs:$dst), - (add (i32 IntRegs:$src1), (add (i32 IntRegs:$src2), - s6ImmPred:$src3)))]>, + [(set IntRegs:$dst, + (add IntRegs:$src1, (add IntRegs:$src2, s6ImmPred:$src3)))]>, Requires<[HasV4T]>; // Rd=add(Rs,sub(#s6,Ru)) def ADDr_SUBri_V4 : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s6Imm:$src2, IntRegs:$src3), "$dst = add($src1, sub(#$src2, $src3))", - [(set (i32 IntRegs:$dst), - (add (i32 IntRegs:$src1), (sub s6ImmPred:$src2, - (i32 IntRegs:$src3))))]>, + [(set IntRegs:$dst, + (add IntRegs:$src1, (sub s6ImmPred:$src2, IntRegs:$src3)))]>, Requires<[HasV4T]>; // Generates the same instruction as ADDr_SUBri_V4 but matches different @@ -4010,9 +2434,8 @@ def ADDr_SUBri_V4 : MInst<(outs IntRegs:$dst), def ADDri_SUBr_V4 : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, s6Imm:$src2, IntRegs:$src3), "$dst = add($src1, sub(#$src2, $src3))", - [(set (i32 IntRegs:$dst), - (sub (add (i32 IntRegs:$src1), s6ImmPred:$src2), - (i32 IntRegs:$src3)))]>, + [(set IntRegs:$dst, + (sub (add IntRegs:$src1, s6ImmPred:$src2), IntRegs:$src3))]>, Requires<[HasV4T]>; @@ -4028,16 +2451,16 @@ def ADDri_SUBr_V4 : MInst<(outs IntRegs:$dst), def ANDd_NOTd_V4 : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2), "$dst = and($src1, ~$src2)", - [(set (i64 DoubleRegs:$dst), (and (i64 DoubleRegs:$src1), - (not (i64 DoubleRegs:$src2))))]>, + [(set DoubleRegs:$dst, (and DoubleRegs:$src1, + (not DoubleRegs:$src2)))]>, Requires<[HasV4T]>; // Rdd=or(Rtt,~Rss) def ORd_NOTd_V4 : MInst<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2), "$dst = or($src1, ~$src2)", - [(set (i64 DoubleRegs:$dst), - (or (i64 DoubleRegs:$src1), (not (i64 DoubleRegs:$src2))))]>, + [(set DoubleRegs:$dst, + (or DoubleRegs:$src1, (not DoubleRegs:$src2)))]>, Requires<[HasV4T]>; @@ -4046,9 +2469,8 @@ def ORd_NOTd_V4 : MInst<(outs DoubleRegs:$dst), def XORd_XORdd: MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2, DoubleRegs:$src3), "$dst ^= xor($src2, $src3)", - [(set (i64 DoubleRegs:$dst), - (xor (i64 DoubleRegs:$src1), (xor (i64 DoubleRegs:$src2), - (i64 DoubleRegs:$src3))))], + [(set DoubleRegs:$dst, + (xor DoubleRegs:$src1, (xor DoubleRegs:$src2, DoubleRegs:$src3)))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -4058,9 +2480,8 @@ def XORd_XORdd: MInst_acc<(outs DoubleRegs:$dst), def ORr_ANDri_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, s10Imm:$src3), "$dst = or($src1, and($src2, #$src3))", - [(set (i32 IntRegs:$dst), - (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2), - s10ImmPred:$src3)))], + [(set IntRegs:$dst, + (or IntRegs:$src1, (and IntRegs:$src2, s10ImmPred:$src3)))], "$src2 = $dst">, Requires<[HasV4T]>; @@ -4069,9 +2490,8 @@ def ORr_ANDri_V4 : MInst_acc<(outs IntRegs:$dst), def ANDr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3), "$dst &= and($src2, $src3)", - [(set (i32 IntRegs:$dst), - (and (i32 IntRegs:$src1), (and (i32 IntRegs:$src2), - (i32 IntRegs:$src3))))], + [(set IntRegs:$dst, + (and IntRegs:$src1, (and IntRegs:$src2, IntRegs:$src3)))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -4079,9 +2499,8 @@ def ANDr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst), def ORr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3), "$dst |= and($src2, $src3)", - [(set (i32 IntRegs:$dst), - (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2), - (i32 IntRegs:$src3))))], + [(set IntRegs:$dst, + (or IntRegs:$src1, (and IntRegs:$src2, IntRegs:$src3)))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -4089,9 +2508,8 @@ def ORr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst), def XORr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3), "$dst ^= and($src2, $src3)", - [(set (i32 IntRegs:$dst), - (xor (i32 IntRegs:$src1), (and (i32 IntRegs:$src2), - (i32 IntRegs:$src3))))], + [(set IntRegs:$dst, + (xor IntRegs:$src1, (and IntRegs:$src2, IntRegs:$src3)))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -4100,9 +2518,8 @@ def XORr_ANDrr_V4 : MInst_acc<(outs IntRegs:$dst), def ANDr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3), "$dst &= and($src2, ~$src3)", - [(set (i32 IntRegs:$dst), - (and (i32 IntRegs:$src1), (and (i32 IntRegs:$src2), - (not (i32 IntRegs:$src3)))))], + [(set IntRegs:$dst, + (and IntRegs:$src1, (and IntRegs:$src2, (not IntRegs:$src3))))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -4110,9 +2527,8 @@ def ANDr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst), def ORr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3), "$dst |= and($src2, ~$src3)", - [(set (i32 IntRegs:$dst), - (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2), - (not (i32 IntRegs:$src3)))))], + [(set IntRegs:$dst, + (or IntRegs:$src1, (and IntRegs:$src2, (not IntRegs:$src3))))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -4120,9 +2536,8 @@ def ORr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst), def XORr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3), "$dst ^= and($src2, ~$src3)", - [(set (i32 IntRegs:$dst), - (xor (i32 IntRegs:$src1), (and (i32 IntRegs:$src2), - (not (i32 IntRegs:$src3)))))], + [(set IntRegs:$dst, + (xor IntRegs:$src1, (and IntRegs:$src2, (not IntRegs:$src3))))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -4131,9 +2546,8 @@ def XORr_ANDr_NOTr_V4 : MInst_acc<(outs IntRegs:$dst), def ANDr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3), "$dst &= or($src2, $src3)", - [(set (i32 IntRegs:$dst), - (and (i32 IntRegs:$src1), (or (i32 IntRegs:$src2), - (i32 IntRegs:$src3))))], + [(set IntRegs:$dst, + (and IntRegs:$src1, (or IntRegs:$src2, IntRegs:$src3)))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -4141,9 +2555,8 @@ def ANDr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst), def ORr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3), "$dst |= or($src2, $src3)", - [(set (i32 IntRegs:$dst), - (or (i32 IntRegs:$src1), (or (i32 IntRegs:$src2), - (i32 IntRegs:$src3))))], + [(set IntRegs:$dst, + (or IntRegs:$src1, (or IntRegs:$src2, IntRegs:$src3)))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -4151,9 +2564,8 @@ def ORr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst), def XORr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3), "$dst ^= or($src2, $src3)", - [(set (i32 IntRegs:$dst), - (xor (i32 IntRegs:$src1), (or (i32 IntRegs:$src2), - (i32 IntRegs:$src3))))], + [(set IntRegs:$dst, + (xor IntRegs:$src1, (or IntRegs:$src2, IntRegs:$src3)))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -4162,9 +2574,8 @@ def XORr_ORrr_V4 : MInst_acc<(outs IntRegs:$dst), def ANDr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3), "$dst &= xor($src2, $src3)", - [(set (i32 IntRegs:$dst), - (and (i32 IntRegs:$src1), (xor (i32 IntRegs:$src2), - (i32 IntRegs:$src3))))], + [(set IntRegs:$dst, + (and IntRegs:$src1, (xor IntRegs:$src2, IntRegs:$src3)))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -4172,9 +2583,8 @@ def ANDr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst), def ORr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3), "$dst |= xor($src2, $src3)", - [(set (i32 IntRegs:$dst), - (and (i32 IntRegs:$src1), (xor (i32 IntRegs:$src2), - (i32 IntRegs:$src3))))], + [(set IntRegs:$dst, + (and IntRegs:$src1, (xor IntRegs:$src2, IntRegs:$src3)))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -4182,9 +2592,8 @@ def ORr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst), def XORr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, IntRegs:$src3), "$dst ^= xor($src2, $src3)", - [(set (i32 IntRegs:$dst), - (and (i32 IntRegs:$src1), (xor (i32 IntRegs:$src2), - (i32 IntRegs:$src3))))], + [(set IntRegs:$dst, + (and IntRegs:$src1, (xor IntRegs:$src2, IntRegs:$src3)))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -4192,9 +2601,8 @@ def XORr_XORrr_V4 : MInst_acc<(outs IntRegs:$dst), def ORr_ANDri2_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, s10Imm:$src3), "$dst |= and($src2, #$src3)", - [(set (i32 IntRegs:$dst), - (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2), - s10ImmPred:$src3)))], + [(set IntRegs:$dst, + (or IntRegs:$src1, (and IntRegs:$src2, s10ImmPred:$src3)))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -4202,9 +2610,8 @@ def ORr_ANDri2_V4 : MInst_acc<(outs IntRegs:$dst), def ORr_ORri_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs: $src2, s10Imm:$src3), "$dst |= or($src2, #$src3)", - [(set (i32 IntRegs:$dst), - (or (i32 IntRegs:$src1), (and (i32 IntRegs:$src2), - s10ImmPred:$src3)))], + [(set IntRegs:$dst, + (or IntRegs:$src1, (and IntRegs:$src2, s10ImmPred:$src3)))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -4256,9 +2663,8 @@ def ORr_ORri_V4 : MInst_acc<(outs IntRegs:$dst), def ADDi_MPYri_V4 : MInst<(outs IntRegs:$dst), (ins u6Imm:$src1, IntRegs:$src2, u6Imm:$src3), "$dst = add(#$src1, mpyi($src2, #$src3))", - [(set (i32 IntRegs:$dst), - (add (mul (i32 IntRegs:$src2), u6ImmPred:$src3), - u6ImmPred:$src1))]>, + [(set IntRegs:$dst, + (add (mul IntRegs:$src2, u6ImmPred:$src3), u6ImmPred:$src1))]>, Requires<[HasV4T]>; // Rd=add(#u6,mpyi(Rs,Rt)) @@ -4266,36 +2672,32 @@ def ADDi_MPYri_V4 : MInst<(outs IntRegs:$dst), def ADDi_MPYrr_V4 : MInst<(outs IntRegs:$dst), (ins u6Imm:$src1, IntRegs:$src2, IntRegs:$src3), "$dst = add(#$src1, mpyi($src2, $src3))", - [(set (i32 IntRegs:$dst), - (add (mul (i32 IntRegs:$src2), (i32 IntRegs:$src3)), - u6ImmPred:$src1))]>, + [(set IntRegs:$dst, + (add (mul IntRegs:$src2, IntRegs:$src3), u6ImmPred:$src1))]>, Requires<[HasV4T]>; // Rd=add(Ru,mpyi(#u6:2,Rs)) def ADDr_MPYir_V4 : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, u6Imm:$src2, IntRegs:$src3), "$dst = add($src1, mpyi(#$src2, $src3))", - [(set (i32 IntRegs:$dst), - (add (i32 IntRegs:$src1), (mul (i32 IntRegs:$src3), - u6_2ImmPred:$src2)))]>, + [(set IntRegs:$dst, + (add IntRegs:$src1, (mul IntRegs:$src3, u6_2ImmPred:$src2)))]>, Requires<[HasV4T]>; // Rd=add(Ru,mpyi(Rs,#u6)) def ADDr_MPYri_V4 : MInst<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, u6Imm:$src3), "$dst = add($src1, mpyi($src2, #$src3))", - [(set (i32 IntRegs:$dst), - (add (i32 IntRegs:$src1), (mul (i32 IntRegs:$src2), - u6ImmPred:$src3)))]>, + [(set IntRegs:$dst, + (add IntRegs:$src1, (mul IntRegs:$src2, u6ImmPred:$src3)))]>, Requires<[HasV4T]>; // Rx=add(Ru,mpyi(Rx,Rs)) def ADDr_MPYrr_V4 : MInst_acc<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2, IntRegs:$src3), "$dst = add($src1, mpyi($src2, $src3))", - [(set (i32 IntRegs:$dst), - (add (i32 IntRegs:$src1), (mul (i32 IntRegs:$src2), - (i32 IntRegs:$src3))))], + [(set IntRegs:$dst, + (add IntRegs:$src1, (mul IntRegs:$src2, IntRegs:$src3)))], "$src2 = $dst">, Requires<[HasV4T]>; @@ -4343,9 +2745,8 @@ def ADDr_MPYrr_V4 : MInst_acc<(outs IntRegs:$dst), def ADDi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst), (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3), "$dst = add(#$src1, asl($src2, #$src3))", - [(set (i32 IntRegs:$dst), - (add (shl (i32 IntRegs:$src2), u5ImmPred:$src3), - u8ImmPred:$src1))], + [(set IntRegs:$dst, + (add (shl IntRegs:$src2, u5ImmPred:$src3), u8ImmPred:$src1))], "$src2 = $dst">, Requires<[HasV4T]>; @@ -4353,9 +2754,8 @@ def ADDi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst), def ADDi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst), (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3), "$dst = add(#$src1, lsr($src2, #$src3))", - [(set (i32 IntRegs:$dst), - (add (srl (i32 IntRegs:$src2), u5ImmPred:$src3), - u8ImmPred:$src1))], + [(set IntRegs:$dst, + (add (srl IntRegs:$src2, u5ImmPred:$src3), u8ImmPred:$src1))], "$src2 = $dst">, Requires<[HasV4T]>; @@ -4363,9 +2763,8 @@ def ADDi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst), def SUBi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst), (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3), "$dst = sub(#$src1, asl($src2, #$src3))", - [(set (i32 IntRegs:$dst), - (sub (shl (i32 IntRegs:$src2), u5ImmPred:$src3), - u8ImmPred:$src1))], + [(set IntRegs:$dst, + (sub (shl IntRegs:$src2, u5ImmPred:$src3), u8ImmPred:$src1))], "$src2 = $dst">, Requires<[HasV4T]>; @@ -4373,9 +2772,8 @@ def SUBi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst), def SUBi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst), (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3), "$dst = sub(#$src1, lsr($src2, #$src3))", - [(set (i32 IntRegs:$dst), - (sub (srl (i32 IntRegs:$src2), u5ImmPred:$src3), - u8ImmPred:$src1))], + [(set IntRegs:$dst, + (sub (srl IntRegs:$src2, u5ImmPred:$src3), u8ImmPred:$src1))], "$src2 = $dst">, Requires<[HasV4T]>; @@ -4385,9 +2783,8 @@ def SUBi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst), def ANDi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst), (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3), "$dst = and(#$src1, asl($src2, #$src3))", - [(set (i32 IntRegs:$dst), - (and (shl (i32 IntRegs:$src2), u5ImmPred:$src3), - u8ImmPred:$src1))], + [(set IntRegs:$dst, + (and (shl IntRegs:$src2, u5ImmPred:$src3), u8ImmPred:$src1))], "$src2 = $dst">, Requires<[HasV4T]>; @@ -4395,31 +2792,26 @@ def ANDi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst), def ANDi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst), (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3), "$dst = and(#$src1, lsr($src2, #$src3))", - [(set (i32 IntRegs:$dst), - (and (srl (i32 IntRegs:$src2), u5ImmPred:$src3), - u8ImmPred:$src1))], + [(set IntRegs:$dst, + (and (srl IntRegs:$src2, u5ImmPred:$src3), u8ImmPred:$src1))], "$src2 = $dst">, Requires<[HasV4T]>; //Rx=or(#u8,asl(Rx,#U5)) -let AddedComplexity = 30 in def ORi_ASLri_V4 : MInst_acc<(outs IntRegs:$dst), (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3), "$dst = or(#$src1, asl($src2, #$src3))", - [(set (i32 IntRegs:$dst), - (or (shl (i32 IntRegs:$src2), u5ImmPred:$src3), - u8ImmPred:$src1))], + [(set IntRegs:$dst, + (or (shl IntRegs:$src2, u5ImmPred:$src3), u8ImmPred:$src1))], "$src2 = $dst">, Requires<[HasV4T]>; //Rx=or(#u8,lsr(Rx,#U5)) -let AddedComplexity = 30 in def ORi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst), (ins u8Imm:$src1, IntRegs:$src2, u5Imm:$src3), "$dst = or(#$src1, lsr($src2, #$src3))", - [(set (i32 IntRegs:$dst), - (or (srl (i32 IntRegs:$src2), u5ImmPred:$src3), - u8ImmPred:$src1))], + [(set IntRegs:$dst, + (or (srl IntRegs:$src2, u5ImmPred:$src3), u8ImmPred:$src1))], "$src2 = $dst">, Requires<[HasV4T]>; @@ -4428,8 +2820,7 @@ def ORi_LSRri_V4 : MInst_acc<(outs IntRegs:$dst), //Rd=lsl(#s6,Rt) def LSLi_V4 : MInst<(outs IntRegs:$dst), (ins s6Imm:$src1, IntRegs:$src2), "$dst = lsl(#$src1, $src2)", - [(set (i32 IntRegs:$dst), (shl s6ImmPred:$src1, - (i32 IntRegs:$src2)))]>, + [(set IntRegs:$dst, (shl s6ImmPred:$src1, IntRegs:$src2))]>, Requires<[HasV4T]>; @@ -4438,9 +2829,8 @@ def LSLi_V4 : MInst<(outs IntRegs:$dst), (ins s6Imm:$src1, IntRegs:$src2), def ASLd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3), "$dst ^= asl($src2, $src3)", - [(set (i64 DoubleRegs:$dst), - (xor (i64 DoubleRegs:$src1), (shl (i64 DoubleRegs:$src2), - (i32 IntRegs:$src3))))], + [(set DoubleRegs:$dst, + (xor DoubleRegs:$src1, (shl DoubleRegs:$src2, IntRegs:$src3)))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -4448,9 +2838,8 @@ def ASLd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst), def ASRd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3), "$dst ^= asr($src2, $src3)", - [(set (i64 DoubleRegs:$dst), - (xor (i64 DoubleRegs:$src1), (sra (i64 DoubleRegs:$src2), - (i32 IntRegs:$src3))))], + [(set DoubleRegs:$dst, + (xor DoubleRegs:$src1, (sra DoubleRegs:$src2, IntRegs:$src3)))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -4458,9 +2847,8 @@ def ASRd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst), def LSLd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3), "$dst ^= lsl($src2, $src3)", - [(set (i64 DoubleRegs:$dst), (xor (i64 DoubleRegs:$src1), - (shl (i64 DoubleRegs:$src2), - (i32 IntRegs:$src3))))], + [(set DoubleRegs:$dst, + (xor DoubleRegs:$src1, (shl DoubleRegs:$src2, IntRegs:$src3)))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -4468,9 +2856,8 @@ def LSLd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst), def LSRd_rr_xor_V4 : MInst_acc<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1, DoubleRegs:$src2, IntRegs:$src3), "$dst ^= lsr($src2, $src3)", - [(set (i64 DoubleRegs:$dst), - (xor (i64 DoubleRegs:$src1), (srl (i64 DoubleRegs:$src2), - (i32 IntRegs:$src3))))], + [(set DoubleRegs:$dst, + (xor DoubleRegs:$src1, (srl DoubleRegs:$src2, IntRegs:$src3)))], "$src1 = $dst">, Requires<[HasV4T]>; @@ -4516,16 +2903,16 @@ let AddedComplexity = 30 in def MEMw_ADDSUBi_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_2Imm:$offset, m6Imm:$addend), "Error; should not emit", - [(store (add (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)), - m6ImmPred:$addend), - (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>, + [(store (add (load (add IntRegs:$base, u6_2ImmPred:$offset)), +m6ImmPred:$addend), + (add IntRegs:$base, u6_2ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // memw(Rs+#u6:2) += #U5 let AddedComplexity = 30 in def MEMw_ADDi_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_2Imm:$offset, u5Imm:$addend), - "memw($base+#$offset) += #$addend", + "memw($base+#$offset) += $addend", []>, Requires<[HasV4T, UseMEMOP]>; @@ -4533,7 +2920,7 @@ def MEMw_ADDi_indexed_MEM_V4 : MEMInst_V4<(outs), let AddedComplexity = 30 in def MEMw_SUBi_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_2Imm:$offset, u5Imm:$subend), - "memw($base+#$offset) -= #$subend", + "memw($base+#$offset) -= $subend", []>, Requires<[HasV4T, UseMEMOP]>; @@ -4542,9 +2929,9 @@ let AddedComplexity = 30 in def MEMw_ADDr_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$addend), "memw($base+#$offset) += $addend", - [(store (add (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)), - (i32 IntRegs:$addend)), - (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>, + [(store (add (load (add IntRegs:$base, u6_2ImmPred:$offset)), +IntRegs:$addend), + (add IntRegs:$base, u6_2ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // memw(Rs+#u6:2) -= Rt @@ -4552,19 +2939,19 @@ let AddedComplexity = 30 in def MEMw_SUBr_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$subend), "memw($base+#$offset) -= $subend", - [(store (sub (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)), - (i32 IntRegs:$subend)), - (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>, + [(store (sub (load (add IntRegs:$base, u6_2ImmPred:$offset)), +IntRegs:$subend), + (add IntRegs:$base, u6_2ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // memw(Rs+#u6:2) &= Rt let AddedComplexity = 30 in def MEMw_ANDr_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$andend), - "memw($base+#$offset) &= $andend", - [(store (and (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)), - (i32 IntRegs:$andend)), - (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>, + "memw($base+#$offset) += $andend", + [(store (and (load (add IntRegs:$base, u6_2ImmPred:$offset)), +IntRegs:$andend), + (add IntRegs:$base, u6_2ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // memw(Rs+#u6:2) |= Rt @@ -4572,9 +2959,9 @@ let AddedComplexity = 30 in def MEMw_ORr_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_2Imm:$offset, IntRegs:$orend), "memw($base+#$offset) |= $orend", - [(store (or (load (add (i32 IntRegs:$base), u6_2ImmPred:$offset)), - (i32 IntRegs:$orend)), - (add (i32 IntRegs:$base), u6_2ImmPred:$offset))]>, + [(store (or (load (add IntRegs:$base, u6_2ImmPred:$offset)), + IntRegs:$orend), + (add IntRegs:$base, u6_2ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // MEMw_ADDSUBi_V4: @@ -4609,7 +2996,7 @@ let AddedComplexity = 30 in def MEMw_ADDr_MEM_V4 : MEMInst_V4<(outs), (ins MEMri:$addr, IntRegs:$addend), "memw($addr) += $addend", - [(store (add (load ADDRriU6_2:$addr), (i32 IntRegs:$addend)), + [(store (add (load ADDRriU6_2:$addr), IntRegs:$addend), ADDRriU6_2:$addr)]>, Requires<[HasV4T, UseMEMOP]>; @@ -4618,7 +3005,7 @@ let AddedComplexity = 30 in def MEMw_SUBr_MEM_V4 : MEMInst_V4<(outs), (ins MEMri:$addr, IntRegs:$subend), "memw($addr) -= $subend", - [(store (sub (load ADDRriU6_2:$addr), (i32 IntRegs:$subend)), + [(store (sub (load ADDRriU6_2:$addr), IntRegs:$subend), ADDRriU6_2:$addr)]>, Requires<[HasV4T, UseMEMOP]>; @@ -4627,7 +3014,7 @@ let AddedComplexity = 30 in def MEMw_ANDr_MEM_V4 : MEMInst_V4<(outs), (ins MEMri:$addr, IntRegs:$andend), "memw($addr) &= $andend", - [(store (and (load ADDRriU6_2:$addr), (i32 IntRegs:$andend)), + [(store (and (load ADDRriU6_2:$addr), IntRegs:$andend), ADDRriU6_2:$addr)]>, Requires<[HasV4T, UseMEMOP]>; @@ -4636,8 +3023,8 @@ let AddedComplexity = 30 in def MEMw_ORr_MEM_V4 : MEMInst_V4<(outs), (ins MEMri:$addr, IntRegs:$orend), "memw($addr) |= $orend", - [(store (or (load ADDRriU6_2:$addr), (i32 IntRegs:$orend)), - ADDRriU6_2:$addr)]>, + [(store (or (load ADDRriU6_2:$addr), IntRegs:$orend), +ADDRriU6_2:$addr)]>, Requires<[HasV4T, UseMEMOP]>; //===----------------------------------------------------------------------===// @@ -4673,10 +3060,10 @@ let AddedComplexity = 30 in def MEMh_ADDSUBi_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_1Imm:$offset, m6Imm:$addend), "Error; should not emit", - [(truncstorei16 (add (sextloadi16 (add (i32 IntRegs:$base), + [(truncstorei16 (add (sextloadi16 (add IntRegs:$base, u6_1ImmPred:$offset)), m6ImmPred:$addend), - (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>, + (add IntRegs:$base, u6_1ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // memh(Rs+#u6:1) += #U5 @@ -4700,10 +3087,10 @@ let AddedComplexity = 30 in def MEMh_ADDr_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$addend), "memh($base+#$offset) += $addend", - [(truncstorei16 (add (sextloadi16 (add (i32 IntRegs:$base), + [(truncstorei16 (add (sextloadi16 (add IntRegs:$base, u6_1ImmPred:$offset)), - (i32 IntRegs:$addend)), - (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>, + IntRegs:$addend), + (add IntRegs:$base, u6_1ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // memh(Rs+#u6:1) -= Rt @@ -4711,10 +3098,10 @@ let AddedComplexity = 30 in def MEMh_SUBr_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$subend), "memh($base+#$offset) -= $subend", - [(truncstorei16 (sub (sextloadi16 (add (i32 IntRegs:$base), + [(truncstorei16 (sub (sextloadi16 (add IntRegs:$base, u6_1ImmPred:$offset)), - (i32 IntRegs:$subend)), - (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>, + IntRegs:$subend), + (add IntRegs:$base, u6_1ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // memh(Rs+#u6:1) &= Rt @@ -4722,10 +3109,10 @@ let AddedComplexity = 30 in def MEMh_ANDr_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$andend), "memh($base+#$offset) += $andend", - [(truncstorei16 (and (sextloadi16 (add (i32 IntRegs:$base), + [(truncstorei16 (and (sextloadi16 (add IntRegs:$base, u6_1ImmPred:$offset)), - (i32 IntRegs:$andend)), - (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>, + IntRegs:$andend), + (add IntRegs:$base, u6_1ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // memh(Rs+#u6:1) |= Rt @@ -4733,10 +3120,10 @@ let AddedComplexity = 30 in def MEMh_ORr_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_1Imm:$offset, IntRegs:$orend), "memh($base+#$offset) |= $orend", - [(truncstorei16 (or (sextloadi16 (add (i32 IntRegs:$base), + [(truncstorei16 (or (sextloadi16 (add IntRegs:$base, u6_1ImmPred:$offset)), - (i32 IntRegs:$orend)), - (add (i32 IntRegs:$base), u6_1ImmPred:$offset))]>, + IntRegs:$orend), + (add IntRegs:$base, u6_1ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // MEMh_ADDSUBi_V4: @@ -4772,7 +3159,7 @@ def MEMh_ADDr_MEM_V4 : MEMInst_V4<(outs), (ins MEMri:$addr, IntRegs:$addend), "memh($addr) += $addend", [(truncstorei16 (add (sextloadi16 ADDRriU6_1:$addr), - (i32 IntRegs:$addend)), ADDRriU6_1:$addr)]>, + IntRegs:$addend), ADDRriU6_1:$addr)]>, Requires<[HasV4T, UseMEMOP]>; // memh(Rs+#u6:1) -= Rt @@ -4781,7 +3168,7 @@ def MEMh_SUBr_MEM_V4 : MEMInst_V4<(outs), (ins MEMri:$addr, IntRegs:$subend), "memh($addr) -= $subend", [(truncstorei16 (sub (sextloadi16 ADDRriU6_1:$addr), - (i32 IntRegs:$subend)), ADDRriU6_1:$addr)]>, + IntRegs:$subend), ADDRriU6_1:$addr)]>, Requires<[HasV4T, UseMEMOP]>; // memh(Rs+#u6:1) &= Rt @@ -4790,7 +3177,7 @@ def MEMh_ANDr_MEM_V4 : MEMInst_V4<(outs), (ins MEMri:$addr, IntRegs:$andend), "memh($addr) &= $andend", [(truncstorei16 (and (sextloadi16 ADDRriU6_1:$addr), - (i32 IntRegs:$andend)), ADDRriU6_1:$addr)]>, + IntRegs:$andend), ADDRriU6_1:$addr)]>, Requires<[HasV4T, UseMEMOP]>; // memh(Rs+#u6:1) |= Rt @@ -4799,7 +3186,7 @@ def MEMh_ORr_MEM_V4 : MEMInst_V4<(outs), (ins MEMri:$addr, IntRegs:$orend), "memh($addr) |= $orend", [(truncstorei16 (or (sextloadi16 ADDRriU6_1:$addr), - (i32 IntRegs:$orend)), ADDRriU6_1:$addr)]>, + IntRegs:$orend), ADDRriU6_1:$addr)]>, Requires<[HasV4T, UseMEMOP]>; @@ -4836,10 +3223,10 @@ let AddedComplexity = 30 in def MEMb_ADDSUBi_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_0Imm:$offset, m6Imm:$addend), "Error; should not emit", - [(truncstorei8 (add (sextloadi8 (add (i32 IntRegs:$base), + [(truncstorei8 (add (sextloadi8 (add IntRegs:$base, u6_0ImmPred:$offset)), m6ImmPred:$addend), - (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>, + (add IntRegs:$base, u6_0ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // memb(Rs+#u6:0) += #U5 @@ -4863,10 +3250,10 @@ let AddedComplexity = 30 in def MEMb_ADDr_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$addend), "memb($base+#$offset) += $addend", - [(truncstorei8 (add (sextloadi8 (add (i32 IntRegs:$base), + [(truncstorei8 (add (sextloadi8 (add IntRegs:$base, u6_0ImmPred:$offset)), - (i32 IntRegs:$addend)), - (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>, + IntRegs:$addend), + (add IntRegs:$base, u6_0ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // memb(Rs+#u6:0) -= Rt @@ -4874,10 +3261,10 @@ let AddedComplexity = 30 in def MEMb_SUBr_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$subend), "memb($base+#$offset) -= $subend", - [(truncstorei8 (sub (sextloadi8 (add (i32 IntRegs:$base), + [(truncstorei8 (sub (sextloadi8 (add IntRegs:$base, u6_0ImmPred:$offset)), - (i32 IntRegs:$subend)), - (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>, + IntRegs:$subend), + (add IntRegs:$base, u6_0ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // memb(Rs+#u6:0) &= Rt @@ -4885,10 +3272,10 @@ let AddedComplexity = 30 in def MEMb_ANDr_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$andend), "memb($base+#$offset) += $andend", - [(truncstorei8 (and (sextloadi8 (add (i32 IntRegs:$base), + [(truncstorei8 (and (sextloadi8 (add IntRegs:$base, u6_0ImmPred:$offset)), - (i32 IntRegs:$andend)), - (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>, + IntRegs:$andend), + (add IntRegs:$base, u6_0ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // memb(Rs+#u6:0) |= Rt @@ -4896,10 +3283,10 @@ let AddedComplexity = 30 in def MEMb_ORr_indexed_MEM_V4 : MEMInst_V4<(outs), (ins IntRegs:$base, u6_0Imm:$offset, IntRegs:$orend), "memb($base+#$offset) |= $orend", - [(truncstorei8 (or (sextloadi8 (add (i32 IntRegs:$base), + [(truncstorei8 (or (sextloadi8 (add IntRegs:$base, u6_0ImmPred:$offset)), - (i32 IntRegs:$orend)), - (add (i32 IntRegs:$base), u6_0ImmPred:$offset))]>, + IntRegs:$orend), + (add IntRegs:$base, u6_0ImmPred:$offset))]>, Requires<[HasV4T, UseMEMOP]>; // MEMb_ADDSUBi_V4: @@ -4935,7 +3322,7 @@ def MEMb_ADDr_MEM_V4 : MEMInst_V4<(outs), (ins MEMri:$addr, IntRegs:$addend), "memb($addr) += $addend", [(truncstorei8 (add (sextloadi8 ADDRriU6_0:$addr), - (i32 IntRegs:$addend)), ADDRriU6_0:$addr)]>, + IntRegs:$addend), ADDRriU6_0:$addr)]>, Requires<[HasV4T, UseMEMOP]>; // memb(Rs+#u6:0) -= Rt @@ -4944,7 +3331,7 @@ def MEMb_SUBr_MEM_V4 : MEMInst_V4<(outs), (ins MEMri:$addr, IntRegs:$subend), "memb($addr) -= $subend", [(truncstorei8 (sub (sextloadi8 ADDRriU6_0:$addr), - (i32 IntRegs:$subend)), ADDRriU6_0:$addr)]>, + IntRegs:$subend), ADDRriU6_0:$addr)]>, Requires<[HasV4T, UseMEMOP]>; // memb(Rs+#u6:0) &= Rt @@ -4953,7 +3340,7 @@ def MEMb_ANDr_MEM_V4 : MEMInst_V4<(outs), (ins MEMri:$addr, IntRegs:$andend), "memb($addr) &= $andend", [(truncstorei8 (and (sextloadi8 ADDRriU6_0:$addr), - (i32 IntRegs:$andend)), ADDRriU6_0:$addr)]>, + IntRegs:$andend), ADDRriU6_0:$addr)]>, Requires<[HasV4T, UseMEMOP]>; // memb(Rs+#u6:0) |= Rt @@ -4962,7 +3349,7 @@ def MEMb_ORr_MEM_V4 : MEMInst_V4<(outs), (ins MEMri:$addr, IntRegs:$orend), "memb($addr) |= $orend", [(truncstorei8 (or (sextloadi8 ADDRriU6_0:$addr), - (i32 IntRegs:$orend)), ADDRriU6_0:$addr)]>, + IntRegs:$orend), ADDRriU6_0:$addr)]>, Requires<[HasV4T, UseMEMOP]>; @@ -4977,16 +3364,13 @@ def MEMb_ORr_MEM_V4 : MEMInst_V4<(outs), // The implemented patterns are: EQ/GT/GTU. // Missing patterns are: GE/GEU/LT/LTU/LE/LEU. -// Following instruction is not being extended as it results into the -// incorrect code for negative numbers. // Pd=cmpb.eq(Rs,#u8) - let isCompare = 1 in def CMPbEQri_V4 : MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, u8Imm:$src2), "$dst = cmpb.eq($src1, #$src2)", - [(set (i1 PredRegs:$dst), - (seteq (and (i32 IntRegs:$src1), 255), u8ImmPred:$src2))]>, + [(set PredRegs:$dst, (seteq (and IntRegs:$src1, 255), + u8ImmPred:$src2))]>, Requires<[HasV4T]>; // Pd=cmpb.eq(Rs,Rt) @@ -4994,9 +3378,10 @@ let isCompare = 1 in def CMPbEQrr_ubub_V4 : MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = cmpb.eq($src1, $src2)", - [(set (i1 PredRegs:$dst), - (seteq (and (xor (i32 IntRegs:$src1), - (i32 IntRegs:$src2)), 255), 0))]>, + [(set PredRegs:$dst, (seteq (and (xor IntRegs:$src1, + IntRegs:$src2), + 255), + 0))]>, Requires<[HasV4T]>; // Pd=cmpb.eq(Rs,Rt) @@ -5004,31 +3389,26 @@ let isCompare = 1 in def CMPbEQrr_sbsb_V4 : MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = cmpb.eq($src1, $src2)", - [(set (i1 PredRegs:$dst), - (seteq (shl (i32 IntRegs:$src1), (i32 24)), - (shl (i32 IntRegs:$src2), (i32 24))))]>, + [(set PredRegs:$dst, (seteq (shl IntRegs:$src1, (i32 24)), + (shl IntRegs:$src2, (i32 24))))]>, Requires<[HasV4T]>; -/* Incorrect Pattern -- immediate should be right shifted before being -used in the cmpb.gt instruction. // Pd=cmpb.gt(Rs,#s8) let isCompare = 1 in def CMPbGTri_V4 : MInst<(outs PredRegs:$dst), - (ins IntRegs:$src1, s8Imm:$src2), + (ins IntRegs:$src1, s32Imm:$src2), "$dst = cmpb.gt($src1, #$src2)", - [(set (i1 PredRegs:$dst), (setgt (shl (i32 IntRegs:$src1), (i32 24)), - s8ImmPred:$src2))]>, + [(set PredRegs:$dst, (setgt (shl IntRegs:$src1, (i32 24)), + s32_24ImmPred:$src2))]>, Requires<[HasV4T]>; -*/ // Pd=cmpb.gt(Rs,Rt) let isCompare = 1 in def CMPbGTrr_V4 : MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = cmpb.gt($src1, $src2)", - [(set (i1 PredRegs:$dst), - (setgt (shl (i32 IntRegs:$src1), (i32 24)), - (shl (i32 IntRegs:$src2), (i32 24))))]>, + [(set PredRegs:$dst, (setgt (shl IntRegs:$src1, (i32 24)), + (shl IntRegs:$src2, (i32 24))))]>, Requires<[HasV4T]>; // Pd=cmpb.gtu(Rs,#u7) @@ -5036,8 +3416,8 @@ let isCompare = 1 in def CMPbGTUri_V4 : MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, u7Imm:$src2), "$dst = cmpb.gtu($src1, #$src2)", - [(set (i1 PredRegs:$dst), (setugt (and (i32 IntRegs:$src1), 255), - u7ImmPred:$src2))]>, + [(set PredRegs:$dst, (setugt (and IntRegs:$src1, 255), + u7ImmPred:$src2))]>, Requires<[HasV4T]>; // Pd=cmpb.gtu(Rs,Rt) @@ -5045,21 +3425,18 @@ let isCompare = 1 in def CMPbGTUrr_V4 : MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = cmpb.gtu($src1, $src2)", - [(set (i1 PredRegs:$dst), (setugt (and (i32 IntRegs:$src1), 255), - (and (i32 IntRegs:$src2), 255)))]>, + [(set PredRegs:$dst, (setugt (and IntRegs:$src1, 255), + (and IntRegs:$src2, 255)))]>, Requires<[HasV4T]>; -// Following instruction is not being extended as it results into the incorrect -// code for negative numbers. - // Signed half compare(.eq) ri. // Pd=cmph.eq(Rs,#s8) let isCompare = 1 in def CMPhEQri_V4 : MInst<(outs PredRegs:$dst), - (ins IntRegs:$src1, s8Imm:$src2), + (ins IntRegs:$src1, u16Imm:$src2), "$dst = cmph.eq($src1, #$src2)", - [(set (i1 PredRegs:$dst), (seteq (and (i32 IntRegs:$src1), 65535), - s8ImmPred:$src2))]>, + [(set PredRegs:$dst, (seteq (and IntRegs:$src1, 65535), + u16_s8ImmPred:$src2))]>, Requires<[HasV4T]>; // Signed half compare(.eq) rr. @@ -5072,9 +3449,10 @@ let isCompare = 1 in def CMPhEQrr_xor_V4 : MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = cmph.eq($src1, $src2)", - [(set (i1 PredRegs:$dst), (seteq (and (xor (i32 IntRegs:$src1), - (i32 IntRegs:$src2)), - 65535), 0))]>, + [(set PredRegs:$dst, (seteq (and (xor IntRegs:$src1, + IntRegs:$src2), + 65535), + 0))]>, Requires<[HasV4T]>; // Signed half compare(.eq) rr. @@ -5087,25 +3465,19 @@ let isCompare = 1 in def CMPhEQrr_shl_V4 : MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = cmph.eq($src1, $src2)", - [(set (i1 PredRegs:$dst), - (seteq (shl (i32 IntRegs:$src1), (i32 16)), - (shl (i32 IntRegs:$src2), (i32 16))))]>, + [(set PredRegs:$dst, (seteq (shl IntRegs:$src1, (i32 16)), + (shl IntRegs:$src2, (i32 16))))]>, Requires<[HasV4T]>; -/* Incorrect Pattern -- immediate should be right shifted before being -used in the cmph.gt instruction. // Signed half compare(.gt) ri. // Pd=cmph.gt(Rs,#s8) - let isCompare = 1 in def CMPhGTri_V4 : MInst<(outs PredRegs:$dst), - (ins IntRegs:$src1, s8Imm:$src2), + (ins IntRegs:$src1, s32Imm:$src2), "$dst = cmph.gt($src1, #$src2)", - [(set (i1 PredRegs:$dst), - (setgt (shl (i32 IntRegs:$src1), (i32 16)), - s8ImmPred:$src2))]>, + [(set PredRegs:$dst, (setgt (shl IntRegs:$src1, (i32 16)), + s32_16s8ImmPred:$src2))]>, Requires<[HasV4T]>; -*/ // Signed half compare(.gt) rr. // Pd=cmph.gt(Rs,Rt) @@ -5113,9 +3485,8 @@ let isCompare = 1 in def CMPhGTrr_shl_V4 : MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = cmph.gt($src1, $src2)", - [(set (i1 PredRegs:$dst), - (setgt (shl (i32 IntRegs:$src1), (i32 16)), - (shl (i32 IntRegs:$src2), (i32 16))))]>, + [(set PredRegs:$dst, (setgt (shl IntRegs:$src1, (i32 16)), + (shl IntRegs:$src2, (i32 16))))]>, Requires<[HasV4T]>; // Unsigned half compare rr (.gtu). @@ -5124,9 +3495,8 @@ let isCompare = 1 in def CMPhGTUrr_V4 : MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), "$dst = cmph.gtu($src1, $src2)", - [(set (i1 PredRegs:$dst), - (setugt (and (i32 IntRegs:$src1), 65535), - (and (i32 IntRegs:$src2), 65535)))]>, + [(set PredRegs:$dst, (setugt (and IntRegs:$src1, 65535), + (and IntRegs:$src2, 65535)))]>, Requires<[HasV4T]>; // Unsigned half compare ri (.gtu). @@ -5135,8 +3505,8 @@ let isCompare = 1 in def CMPhGTUri_V4 : MInst<(outs PredRegs:$dst), (ins IntRegs:$src1, u7Imm:$src2), "$dst = cmph.gtu($src1, #$src2)", - [(set (i1 PredRegs:$dst), (setugt (and (i32 IntRegs:$src1), 65535), - u7ImmPred:$src2))]>, + [(set PredRegs:$dst, (setugt (and IntRegs:$src1, 65535), + u7ImmPred:$src2))]>, Requires<[HasV4T]>; //===----------------------------------------------------------------------===// @@ -5153,37 +3523,9 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1, isPredicable = 1, Requires<[HasV4T]>; } -// Restore registers and dealloc return function call. -let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1, - Defs = [R29, R30, R31, PC] in { - def RESTORE_DEALLOC_RET_JMP_V4 : JInst<(outs), (ins calltarget:$dst, variable_ops), - "jump $dst // Restore_and_dealloc_return", - []>, - Requires<[HasV4T]>; -} - -// Restore registers and dealloc frame before a tail call. -let isCall = 1, isBarrier = 1, - Defs = [R29, R30, R31, PC] in { - def RESTORE_DEALLOC_BEFORE_TAILCALL_V4 : JInst<(outs), (ins calltarget:$dst, variable_ops), - "call $dst // Restore_and_dealloc_before_tailcall", - []>, - Requires<[HasV4T]>; -} - -// Save registers function call. -let isCall = 1, isBarrier = 1, - Uses = [R29, R31] in { - def SAVE_REGISTERS_CALL_V4 : JInst<(outs), (ins calltarget:$dst, variable_ops), - "call $dst // Save_calle_saved_registers", - []>, - Requires<[HasV4T]>; -} - // if (Ps) dealloc_return let isReturn = 1, isTerminator = 1, - Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1, - isPredicated = 1 in { + Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1 in { def DEALLOC_RET_cPt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, i32imm:$amt1), "if ($src1) dealloc_return", []>, @@ -5192,8 +3534,7 @@ let isReturn = 1, isTerminator = 1, // if (!Ps) dealloc_return let isReturn = 1, isTerminator = 1, - Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1, - isPredicated = 1 in { + Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1 in { def DEALLOC_RET_cNotPt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, i32imm:$amt1), "if (!$src1) dealloc_return", @@ -5203,8 +3544,7 @@ let isReturn = 1, isTerminator = 1, // if (Ps.new) dealloc_return:nt let isReturn = 1, isTerminator = 1, - Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1, - isPredicated = 1 in { + Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1 in { def DEALLOC_RET_cdnPnt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, i32imm:$amt1), "if ($src1.new) dealloc_return:nt", @@ -5214,8 +3554,7 @@ let isReturn = 1, isTerminator = 1, // if (!Ps.new) dealloc_return:nt let isReturn = 1, isTerminator = 1, - Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1, - isPredicated = 1 in { + Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1 in { def DEALLOC_RET_cNotdnPnt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, i32imm:$amt1), "if (!$src1.new) dealloc_return:nt", @@ -5225,8 +3564,7 @@ let isReturn = 1, isTerminator = 1, // if (Ps.new) dealloc_return:t let isReturn = 1, isTerminator = 1, - Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1, - isPredicated = 1 in { + Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1 in { def DEALLOC_RET_cdnPt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, i32imm:$amt1), "if ($src1.new) dealloc_return:t", @@ -5236,511 +3574,10 @@ let isReturn = 1, isTerminator = 1, // if (!Ps.new) dealloc_return:nt let isReturn = 1, isTerminator = 1, - Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1, - isPredicated = 1 in { + Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1 in { def DEALLOC_RET_cNotdnPt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1, i32imm:$amt1), "if (!$src1.new) dealloc_return:t", []>, Requires<[HasV4T]>; } - - -// Load/Store with absolute addressing mode -// memw(#u6)=Rt - -multiclass ST_abs<string OpcStr> { - let isPredicable = 1 in - def _abs_V4 : STInst<(outs), - (ins globaladdress:$absaddr, IntRegs:$src), - !strconcat(OpcStr, "(##$absaddr) = $src"), - []>, - Requires<[HasV4T]>; - - let isPredicated = 1 in - def _abs_cPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2), - !strconcat("if ($src1)", !strconcat(OpcStr, "(##$absaddr) = $src2")), - []>, - Requires<[HasV4T]>; - - let isPredicated = 1 in - def _abs_cNotPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2), - !strconcat("if (!$src1)", !strconcat(OpcStr, "(##$absaddr) = $src2")), - []>, - Requires<[HasV4T]>; - - let isPredicated = 1 in - def _abs_cdnPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2), - !strconcat("if ($src1.new)", !strconcat(OpcStr, "(##$absaddr) = $src2")), - []>, - Requires<[HasV4T]>; - - let isPredicated = 1 in - def _abs_cdnNotPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2), - !strconcat("if (!$src1.new)", !strconcat(OpcStr, "(##$absaddr) = $src2")), - []>, - Requires<[HasV4T]>; - - def _abs_nv_V4 : STInst<(outs), - (ins globaladdress:$absaddr, IntRegs:$src), - !strconcat(OpcStr, "(##$absaddr) = $src.new"), - []>, - Requires<[HasV4T]>; - - let isPredicated = 1 in - def _abs_cPt_nv_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2), - !strconcat("if ($src1)", !strconcat(OpcStr, "(##$absaddr) = $src2.new")), - []>, - Requires<[HasV4T]>; - - let isPredicated = 1 in - def _abs_cNotPt_nv_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2), - !strconcat("if (!$src1)", !strconcat(OpcStr, "(##$absaddr) = $src2.new")), - []>, - Requires<[HasV4T]>; - - let isPredicated = 1 in - def _abs_cdnPt_nv_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2), - !strconcat("if ($src1.new)", !strconcat(OpcStr, "(##$absaddr) = $src2.new")), - []>, - Requires<[HasV4T]>; - - let isPredicated = 1 in - def _abs_cdnNotPt_nv_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$absaddr, IntRegs:$src2), - !strconcat("if (!$src1.new)", !strconcat(OpcStr, "(##$absaddr) = $src2.new")), - []>, - Requires<[HasV4T]>; -} - -let AddedComplexity = 30, isPredicable = 1 in -def STrid_abs_V4 : STInst<(outs), - (ins globaladdress:$absaddr, DoubleRegs:$src), - "memd(##$absaddr) = $src", - [(store (i64 DoubleRegs:$src), (HexagonCONST32 tglobaladdr:$absaddr))]>, - Requires<[HasV4T]>; - -let AddedComplexity = 30, isPredicated = 1 in -def STrid_abs_cPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$absaddr, DoubleRegs:$src2), - "if ($src1) memd(##$absaddr) = $src2", - []>, - Requires<[HasV4T]>; - -let AddedComplexity = 30, isPredicated = 1 in -def STrid_abs_cNotPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$absaddr, DoubleRegs:$src2), - "if (!$src1) memd(##$absaddr) = $src2", - []>, - Requires<[HasV4T]>; - -let AddedComplexity = 30, isPredicated = 1 in -def STrid_abs_cdnPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$absaddr, DoubleRegs:$src2), - "if ($src1.new) memd(##$absaddr) = $src2", - []>, - Requires<[HasV4T]>; - -let AddedComplexity = 30, isPredicated = 1 in -def STrid_abs_cdnNotPt_V4 : STInst<(outs), - (ins PredRegs:$src1, globaladdress:$absaddr, DoubleRegs:$src2), - "if (!$src1.new) memd(##$absaddr) = $src2", - []>, - Requires<[HasV4T]>; - -defm STrib : ST_abs<"memb">; -defm STrih : ST_abs<"memh">; -defm STriw : ST_abs<"memw">; - -let Predicates = [HasV4T], AddedComplexity = 30 in -def : Pat<(truncstorei8 (i32 IntRegs:$src1), (HexagonCONST32 tglobaladdr:$absaddr)), - (STrib_abs_V4 tglobaladdr: $absaddr, IntRegs: $src1)>; - -let Predicates = [HasV4T], AddedComplexity = 30 in -def : Pat<(truncstorei16 (i32 IntRegs:$src1), (HexagonCONST32 tglobaladdr:$absaddr)), - (STrih_abs_V4 tglobaladdr: $absaddr, IntRegs: $src1)>; - -let Predicates = [HasV4T], AddedComplexity = 30 in -def : Pat<(store (i32 IntRegs:$src1), (HexagonCONST32 tglobaladdr:$absaddr)), - (STriw_abs_V4 tglobaladdr: $absaddr, IntRegs: $src1)>; - - -multiclass LD_abs<string OpcStr> { - let isPredicable = 1 in - def _abs_V4 : LDInst<(outs IntRegs:$dst), - (ins globaladdress:$absaddr), - !strconcat("$dst = ", !strconcat(OpcStr, "(##$absaddr)")), - []>, - Requires<[HasV4T]>; - - let isPredicated = 1 in - def _abs_cPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$absaddr), - !strconcat("if ($src1) $dst = ", !strconcat(OpcStr, "(##$absaddr)")), - []>, - Requires<[HasV4T]>; - - let isPredicated = 1 in - def _abs_cNotPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$absaddr), - !strconcat("if (!$src1) $dst = ", !strconcat(OpcStr, "(##$absaddr)")), - []>, - Requires<[HasV4T]>; - - let isPredicated = 1 in - def _abs_cdnPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$absaddr), - !strconcat("if ($src1.new) $dst = ", !strconcat(OpcStr, "(##$absaddr)")), - []>, - Requires<[HasV4T]>; - - let isPredicated = 1 in - def _abs_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$absaddr), - !strconcat("if (!$src1.new) $dst = ", !strconcat(OpcStr, "(##$absaddr)")), - []>, - Requires<[HasV4T]>; -} - -let AddedComplexity = 30 in -def LDrid_abs_V4 : LDInst<(outs DoubleRegs:$dst), - (ins globaladdress:$absaddr), - "$dst = memd(##$absaddr)", - [(set (i64 DoubleRegs:$dst), (load (HexagonCONST32 tglobaladdr:$absaddr)))]>, - Requires<[HasV4T]>; - -let AddedComplexity = 30, isPredicated = 1 in -def LDrid_abs_cPt_V4 : LDInst<(outs DoubleRegs:$dst), - (ins PredRegs:$src1, globaladdress:$absaddr), - "if ($src1) $dst = memd(##$absaddr)", - []>, - Requires<[HasV4T]>; - -let AddedComplexity = 30, isPredicated = 1 in -def LDrid_abs_cNotPt_V4 : LDInst<(outs DoubleRegs:$dst), - (ins PredRegs:$src1, globaladdress:$absaddr), - "if (!$src1) $dst = memd(##$absaddr)", - []>, - Requires<[HasV4T]>; - -let AddedComplexity = 30, isPredicated = 1 in -def LDrid_abs_cdnPt_V4 : LDInst<(outs DoubleRegs:$dst), - (ins PredRegs:$src1, globaladdress:$absaddr), - "if ($src1.new) $dst = memd(##$absaddr)", - []>, - Requires<[HasV4T]>; - -let AddedComplexity = 30, isPredicated = 1 in -def LDrid_abs_cdnNotPt_V4 : LDInst<(outs DoubleRegs:$dst), - (ins PredRegs:$src1, globaladdress:$absaddr), - "if (!$src1.new) $dst = memd(##$absaddr)", - []>, - Requires<[HasV4T]>; - -defm LDrib : LD_abs<"memb">; -defm LDriub : LD_abs<"memub">; -defm LDrih : LD_abs<"memh">; -defm LDriuh : LD_abs<"memuh">; -defm LDriw : LD_abs<"memw">; - - -let Predicates = [HasV4T], AddedComplexity = 30 in -def : Pat<(i32 (load (HexagonCONST32 tglobaladdr:$absaddr))), - (LDriw_abs_V4 tglobaladdr: $absaddr)>; - -let Predicates = [HasV4T], AddedComplexity=30 in -def : Pat<(i32 (sextloadi8 (HexagonCONST32 tglobaladdr:$absaddr))), - (LDrib_abs_V4 tglobaladdr:$absaddr)>; - -let Predicates = [HasV4T], AddedComplexity=30 in -def : Pat<(i32 (zextloadi8 (HexagonCONST32 tglobaladdr:$absaddr))), - (LDriub_abs_V4 tglobaladdr:$absaddr)>; - -let Predicates = [HasV4T], AddedComplexity=30 in -def : Pat<(i32 (sextloadi16 (HexagonCONST32 tglobaladdr:$absaddr))), - (LDrih_abs_V4 tglobaladdr:$absaddr)>; - -let Predicates = [HasV4T], AddedComplexity=30 in -def : Pat<(i32 (zextloadi16 (HexagonCONST32 tglobaladdr:$absaddr))), - (LDriuh_abs_V4 tglobaladdr:$absaddr)>; - -// Transfer global address into a register -let AddedComplexity=50, isMoveImm = 1, isReMaterializable = 1 in -def TFRI_V4 : ALU32_ri<(outs IntRegs:$dst), (ins globaladdress:$src1), - "$dst = ##$src1", - [(set IntRegs:$dst, (HexagonCONST32 tglobaladdr:$src1))]>, - Requires<[HasV4T]>; - -let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in -def TFRI_cPt_V4 : ALU32_ri<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$src2), - "if($src1) $dst = ##$src2", - []>, - Requires<[HasV4T]>; - -let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in -def TFRI_cNotPt_V4 : ALU32_ri<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$src2), - "if(!$src1) $dst = ##$src2", - []>, - Requires<[HasV4T]>; - -let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in -def TFRI_cdnPt_V4 : ALU32_ri<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$src2), - "if($src1.new) $dst = ##$src2", - []>, - Requires<[HasV4T]>; - -let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in -def TFRI_cdnNotPt_V4 : ALU32_ri<(outs IntRegs:$dst), - (ins PredRegs:$src1, globaladdress:$src2), - "if(!$src1.new) $dst = ##$src2", - []>, - Requires<[HasV4T]>; - -let AddedComplexity = 50, Predicates = [HasV4T] in -def : Pat<(HexagonCONST32_GP tglobaladdr:$src1), - (TFRI_V4 tglobaladdr:$src1)>; - - -// Load - Indirect with long offset: These instructions take global address -// as an operand -let AddedComplexity = 10 in -def LDrid_ind_lo_V4 : LDInst<(outs DoubleRegs:$dst), - (ins IntRegs:$src1, u2Imm:$src2, globaladdress:$offset), - "$dst=memd($src1<<#$src2+##$offset)", - [(set (i64 DoubleRegs:$dst), - (load (add (shl IntRegs:$src1, u2ImmPred:$src2), - (HexagonCONST32 tglobaladdr:$offset))))]>, - Requires<[HasV4T]>; - -let AddedComplexity = 10 in -multiclass LD_indirect_lo<string OpcStr, PatFrag OpNode> { - def _lo_V4 : LDInst<(outs IntRegs:$dst), - (ins IntRegs:$src1, u2Imm:$src2, globaladdress:$offset), - !strconcat("$dst = ", !strconcat(OpcStr, "($src1<<#$src2+##$offset)")), - [(set IntRegs:$dst, - (i32 (OpNode (add (shl IntRegs:$src1, u2ImmPred:$src2), - (HexagonCONST32 tglobaladdr:$offset)))))]>, - Requires<[HasV4T]>; -} - -defm LDrib_ind : LD_indirect_lo<"memb", sextloadi8>; -defm LDriub_ind : LD_indirect_lo<"memub", zextloadi8>; -defm LDrih_ind : LD_indirect_lo<"memh", sextloadi16>; -defm LDriuh_ind : LD_indirect_lo<"memuh", zextloadi16>; -defm LDriw_ind : LD_indirect_lo<"memw", load>; - -// Store - Indirect with long offset: These instructions take global address -// as an operand -let AddedComplexity = 10 in -def STrid_ind_lo_V4 : STInst<(outs), - (ins IntRegs:$src1, u2Imm:$src2, globaladdress:$src3, - DoubleRegs:$src4), - "memd($src1<<#$src2+#$src3) = $src4", - [(store (i64 DoubleRegs:$src4), - (add (shl IntRegs:$src1, u2ImmPred:$src2), - (HexagonCONST32 tglobaladdr:$src3)))]>, - Requires<[HasV4T]>; - -let AddedComplexity = 10 in -multiclass ST_indirect_lo<string OpcStr, PatFrag OpNode> { - def _lo_V4 : STInst<(outs), - (ins IntRegs:$src1, u2Imm:$src2, globaladdress:$src3, - IntRegs:$src4), - !strconcat(OpcStr, "($src1<<#$src2+##$src3) = $src4"), - [(OpNode (i32 IntRegs:$src4), - (add (shl IntRegs:$src1, u2ImmPred:$src2), - (HexagonCONST32 tglobaladdr:$src3)))]>, - Requires<[HasV4T]>; -} - -defm STrib_ind : ST_indirect_lo<"memb", truncstorei8>; -defm STrih_ind : ST_indirect_lo<"memh", truncstorei16>; -defm STriw_ind : ST_indirect_lo<"memw", store>; - -// Store - absolute addressing mode: These instruction take constant -// value as the extended operand -multiclass ST_absimm<string OpcStr> { - let isPredicable = 1 in - def _abs_V4 : STInst<(outs), - (ins u6Imm:$src1, IntRegs:$src2), - !strconcat(OpcStr, "(#$src1) = $src2"), - []>, - Requires<[HasV4T]>; - - let isPredicated = 1 in - def _abs_cPt_V4 : STInst<(outs), - (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3), - !strconcat("if ($src1)", !strconcat(OpcStr, "(#$src2) = $src3")), - []>, - Requires<[HasV4T]>; - - let isPredicated = 1 in - def _abs_cNotPt_V4 : STInst<(outs), - (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3), - !strconcat("if (!$src1)", !strconcat(OpcStr, "(#$src2) = $src3")), - []>, - Requires<[HasV4T]>; - - let isPredicated = 1 in - def _abs_cdnPt_V4 : STInst<(outs), - (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3), - !strconcat("if ($src1.new)", !strconcat(OpcStr, "(#$src2) = $src3")), - []>, - Requires<[HasV4T]>; - - let isPredicated = 1 in - def _abs_cdnNotPt_V4 : STInst<(outs), - (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3), - !strconcat("if (!$src1.new)", !strconcat(OpcStr, "(#$src2) = $src3")), - []>, - Requires<[HasV4T]>; - - def _abs_nv_V4 : STInst<(outs), - (ins u6Imm:$src1, IntRegs:$src2), - !strconcat(OpcStr, "(#$src1) = $src2.new"), - []>, - Requires<[HasV4T]>; - - let isPredicated = 1 in - def _abs_cPt_nv_V4 : STInst<(outs), - (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3), - !strconcat("if ($src1)", !strconcat(OpcStr, "(#$src2) = $src3.new")), - []>, - Requires<[HasV4T]>; - - let isPredicated = 1 in - def _abs_cNotPt_nv_V4 : STInst<(outs), - (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3), - !strconcat("if (!$src1)", !strconcat(OpcStr, "(#$src2) = $src3.new")), - []>, - Requires<[HasV4T]>; - - let isPredicated = 1 in - def _abs_cdnPt_nv_V4 : STInst<(outs), - (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3), - !strconcat("if ($src1.new)", !strconcat(OpcStr, "(#$src2) = $src3.new")), - []>, - Requires<[HasV4T]>; - - let isPredicated = 1 in - def _abs_cdnNotPt_nv_V4 : STInst<(outs), - (ins PredRegs:$src1, u6Imm:$src2, IntRegs:$src3), - !strconcat("if (!$src1.new)", !strconcat(OpcStr, "(#$src2) = $src3.new")), - []>, - Requires<[HasV4T]>; -} - -defm STrib_imm : ST_absimm<"memb">; -defm STrih_imm : ST_absimm<"memh">; -defm STriw_imm : ST_absimm<"memw">; - -let Predicates = [HasV4T], AddedComplexity = 30 in -def : Pat<(truncstorei8 (i32 IntRegs:$src1), u6ImmPred:$src2), - (STrib_imm_abs_V4 u6ImmPred:$src2, IntRegs: $src1)>; - -let Predicates = [HasV4T], AddedComplexity = 30 in -def : Pat<(truncstorei16 (i32 IntRegs:$src1), u6ImmPred:$src2), - (STrih_imm_abs_V4 u6ImmPred:$src2, IntRegs: $src1)>; - -let Predicates = [HasV4T], AddedComplexity = 30 in -def : Pat<(store (i32 IntRegs:$src1), u6ImmPred:$src2), - (STriw_imm_abs_V4 u6ImmPred:$src2, IntRegs: $src1)>; - - -// Load - absolute addressing mode: These instruction take constant -// value as the extended operand - -multiclass LD_absimm<string OpcStr> { - let isPredicable = 1 in - def _abs_V4 : LDInst<(outs IntRegs:$dst), - (ins u6Imm:$src), - !strconcat("$dst = ", !strconcat(OpcStr, "(#$src)")), - []>, - Requires<[HasV4T]>; - - let isPredicated = 1 in - def _abs_cPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, u6Imm:$src2), - !strconcat("if ($src1) $dst = ", !strconcat(OpcStr, "(#$src2)")), - []>, - Requires<[HasV4T]>; - - let isPredicated = 1 in - def _abs_cNotPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, u6Imm:$src2), - !strconcat("if (!$src1) $dst = ", !strconcat(OpcStr, "(#$src2)")), - []>, - Requires<[HasV4T]>; - - let isPredicated = 1 in - def _abs_cdnPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, u6Imm:$src2), - !strconcat("if ($src1.new) $dst = ", !strconcat(OpcStr, "(#$src2)")), - []>, - Requires<[HasV4T]>; - - let isPredicated = 1 in - def _abs_cdnNotPt_V4 : LDInst<(outs IntRegs:$dst), - (ins PredRegs:$src1, u6Imm:$src2), - !strconcat("if (!$src1.new) $dst = ", !strconcat(OpcStr, "(#$src2)")), - []>, - Requires<[HasV4T]>; -} - -defm LDrib_imm : LD_absimm<"memb">; -defm LDriub_imm : LD_absimm<"memub">; -defm LDrih_imm : LD_absimm<"memh">; -defm LDriuh_imm : LD_absimm<"memuh">; -defm LDriw_imm : LD_absimm<"memw">; - -let Predicates = [HasV4T], AddedComplexity = 30 in -def : Pat<(i32 (load u6ImmPred:$src)), - (LDriw_imm_abs_V4 u6ImmPred:$src)>; - -let Predicates = [HasV4T], AddedComplexity=30 in -def : Pat<(i32 (sextloadi8 u6ImmPred:$src)), - (LDrib_imm_abs_V4 u6ImmPred:$src)>; - -let Predicates = [HasV4T], AddedComplexity=30 in -def : Pat<(i32 (zextloadi8 u6ImmPred:$src)), - (LDriub_imm_abs_V4 u6ImmPred:$src)>; - -let Predicates = [HasV4T], AddedComplexity=30 in -def : Pat<(i32 (sextloadi16 u6ImmPred:$src)), - (LDrih_imm_abs_V4 u6ImmPred:$src)>; - -let Predicates = [HasV4T], AddedComplexity=30 in -def : Pat<(i32 (zextloadi16 u6ImmPred:$src)), - (LDriuh_imm_abs_V4 u6ImmPred:$src)>; - - -// Indexed store double word - global address. -// memw(Rs+#u6:2)=#S8 -let AddedComplexity = 10 in -def STriw_offset_ext_V4 : STInst<(outs), - (ins IntRegs:$src1, u6_2Imm:$src2, globaladdress:$src3), - "memw($src1+#$src2) = ##$src3", - [(store (HexagonCONST32 tglobaladdr:$src3), - (add IntRegs:$src1, u6_2ImmPred:$src2))]>, - Requires<[HasV4T]>; - - -// Indexed store double word - global address. -// memw(Rs+#u6:2)=#S8 -let AddedComplexity = 10 in -def STrih_offset_ext_V4 : STInst<(outs), - (ins IntRegs:$src1, u6_1Imm:$src2, globaladdress:$src3), - "memh($src1+#$src2) = ##$src3", - [(truncstorei16 (HexagonCONST32 tglobaladdr:$src3), - (add IntRegs:$src1, u6_1ImmPred:$src2))]>, - Requires<[HasV4T]>; diff --git a/lib/Target/Hexagon/HexagonMCInst.h b/lib/Target/Hexagon/HexagonMCInst.h deleted file mode 100644 index 16ea7cf6ed7f..000000000000 --- a/lib/Target/Hexagon/HexagonMCInst.h +++ /dev/null @@ -1,41 +0,0 @@ -//===- HexagonMCInst.h - Hexagon sub-class of MCInst ----------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This class extends MCInst to allow some VLIW annotation. -// -//===----------------------------------------------------------------------===// - -#ifndef HEXAGONMCINST_H -#define HEXAGONMCINST_H - -#include "llvm/MC/MCInst.h" -#include "llvm/CodeGen/MachineInstr.h" - -namespace llvm { - class HexagonMCInst: public MCInst { - // Packet start and end markers - unsigned startPacket: 1, endPacket: 1; - const MachineInstr *MachineI; - public: - explicit HexagonMCInst(): MCInst(), - startPacket(0), endPacket(0) {} - - const MachineInstr* getMI() const { return MachineI; }; - - void setMI(const MachineInstr *MI) { MachineI = MI; }; - - bool isStartPacket() const { return (startPacket); }; - bool isEndPacket() const { return (endPacket); }; - - void setStartPacket(bool yes) { startPacket = yes; }; - void setEndPacket(bool yes) { endPacket = yes; }; - }; -} - -#endif diff --git a/lib/Target/Hexagon/HexagonMCInstLower.cpp b/lib/Target/Hexagon/HexagonMCInstLower.cpp index 70bddcc76a59..fbb331bdd8bf 100644 --- a/lib/Target/Hexagon/HexagonMCInstLower.cpp +++ b/lib/Target/Hexagon/HexagonMCInstLower.cpp @@ -49,7 +49,7 @@ void llvm::HexagonLowerToMC(const MachineInstr* MI, MCInst& MCI, switch (MO.getType()) { default: MI->dump(); - llvm_unreachable("unknown operand type"); + assert(0 && "unknown operand type"); case MachineOperand::MO_Register: // Ignore all implicit register operands. if (MO.isImplicit()) continue; diff --git a/lib/Target/Hexagon/HexagonSchedule.td b/lib/Target/Hexagon/HexagonSchedule.td index c4887963895c..fbea4452ec6c 100644 --- a/lib/Target/Hexagon/HexagonSchedule.td +++ b/lib/Target/Hexagon/HexagonSchedule.td @@ -13,6 +13,7 @@ def LSUNIT : FuncUnit; def MUNIT : FuncUnit; def SUNIT : FuncUnit; + // Itinerary classes def ALU32 : InstrItinClass; def ALU64 : InstrItinClass; @@ -23,25 +24,23 @@ def LD : InstrItinClass; def M : InstrItinClass; def ST : InstrItinClass; def S : InstrItinClass; -def SYS : InstrItinClass; -def MARKER : InstrItinClass; def PSEUDO : InstrItinClass; + def HexagonItineraries : - ProcessorItineraries<[LUNIT, LSUNIT, MUNIT, SUNIT], [], [ - InstrItinData<ALU32 , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>, - InstrItinData<ALU64 , [InstrStage<1, [MUNIT, SUNIT]>]>, - InstrItinData<CR , [InstrStage<1, [SUNIT]>]>, - InstrItinData<J , [InstrStage<1, [SUNIT, MUNIT]>]>, - InstrItinData<JR , [InstrStage<1, [MUNIT]>]>, - InstrItinData<LD , [InstrStage<1, [LUNIT, LSUNIT]>]>, - InstrItinData<M , [InstrStage<1, [MUNIT, SUNIT]>]>, - InstrItinData<ST , [InstrStage<1, [LSUNIT]>]>, - InstrItinData<S , [InstrStage<1, [SUNIT, MUNIT]>]>, - InstrItinData<SYS , [InstrStage<1, [LSUNIT]>]>, - InstrItinData<MARKER , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>, - InstrItinData<PSEUDO , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]> - ]>; + ProcessorItineraries<[LUNIT, LSUNIT, MUNIT, SUNIT], [], [ + InstrItinData<ALU32 , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>, + InstrItinData<ALU64 , [InstrStage<1, [MUNIT, SUNIT]>]>, + InstrItinData<CR , [InstrStage<1, [SUNIT]>]>, + InstrItinData<J , [InstrStage<1, [SUNIT, MUNIT]>]>, + InstrItinData<JR , [InstrStage<1, [MUNIT]>]>, + InstrItinData<LD , [InstrStage<1, [LUNIT, LSUNIT]>]>, + InstrItinData<M , [InstrStage<1, [MUNIT, SUNIT]>]>, + InstrItinData<ST , [InstrStage<1, [LSUNIT]>]>, + InstrItinData<S , [InstrStage<1, [SUNIT, MUNIT]>]>, + InstrItinData<PSEUDO , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]> +]>; + //===----------------------------------------------------------------------===// // V4 Machine Info + diff --git a/lib/Target/Hexagon/HexagonScheduleV4.td b/lib/Target/Hexagon/HexagonScheduleV4.td index 1d82dbb90e91..4cf66fe74342 100644 --- a/lib/Target/Hexagon/HexagonScheduleV4.td +++ b/lib/Target/Hexagon/HexagonScheduleV4.td @@ -23,6 +23,7 @@ // | SLOT3 | XTYPE ALU32 J CR | // |===========|==================================================| + // Functional Units. def SLOT0 : FuncUnit; def SLOT1 : FuncUnit; @@ -33,26 +34,22 @@ def SLOT3 : FuncUnit; def NV_V4 : InstrItinClass; def MEM_V4 : InstrItinClass; // ALU64/M/S Instruction classes of V2 are collectively knownn as XTYPE in V4. -def PREFIX : InstrItinClass; -def HexagonItinerariesV4 : - ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3], [], [ - InstrItinData<ALU32 , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>, - InstrItinData<ALU64 , [InstrStage<1, [SLOT2, SLOT3]>]>, - InstrItinData<CR , [InstrStage<1, [SLOT3]>]>, - InstrItinData<J , [InstrStage<1, [SLOT2, SLOT3]>]>, - InstrItinData<JR , [InstrStage<1, [SLOT2]>]>, - InstrItinData<LD , [InstrStage<1, [SLOT0, SLOT1]>]>, - InstrItinData<M , [InstrStage<1, [SLOT2, SLOT3]>]>, - InstrItinData<ST , [InstrStage<1, [SLOT0, SLOT1]>]>, - InstrItinData<S , [InstrStage<1, [SLOT2, SLOT3]>]>, - InstrItinData<SYS , [InstrStage<1, [SLOT0]>]>, - InstrItinData<NV_V4 , [InstrStage<1, [SLOT0]>]>, - InstrItinData<MEM_V4 , [InstrStage<1, [SLOT0]>]>, - InstrItinData<MARKER , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>, - InstrItinData<PREFIX , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>, - InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]> - ]>; +def HexagonItinerariesV4 : ProcessorItineraries< + [SLOT0, SLOT1, SLOT2, SLOT3], [], [ + InstrItinData<LD , [InstrStage<1, [SLOT0, SLOT1]>]>, + InstrItinData<ST , [InstrStage<1, [SLOT0, SLOT1]>]>, + InstrItinData<ALU32 , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>, + InstrItinData<NV_V4 , [InstrStage<1, [SLOT0]>]>, + InstrItinData<MEM_V4 , [InstrStage<1, [SLOT0]>]>, + InstrItinData<J , [InstrStage<1, [SLOT2, SLOT3]>]>, + InstrItinData<JR , [InstrStage<1, [SLOT2]>]>, + InstrItinData<CR , [InstrStage<1, [SLOT3]>]>, + InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>, + InstrItinData<ALU64 , [InstrStage<1, [SLOT2, SLOT3]>]>, + InstrItinData<M , [InstrStage<1, [SLOT2, SLOT3]>]>, + InstrItinData<S , [InstrStage<1, [SLOT2, SLOT3]>]> +]>; //===----------------------------------------------------------------------===// // Hexagon V4 Resource Definitions - diff --git a/lib/Target/Hexagon/HexagonTargetMachine.cpp b/lib/Target/Hexagon/HexagonTargetMachine.cpp index 411325bf963e..55bbba7251a7 100644 --- a/lib/Target/Hexagon/HexagonTargetMachine.cpp +++ b/lib/Target/Hexagon/HexagonTargetMachine.cpp @@ -100,23 +100,23 @@ TargetPassConfig *HexagonTargetMachine::createPassConfig(PassManagerBase &PM) { } bool HexagonPassConfig::addInstSelector() { - PM.add(createHexagonRemoveExtendOps(getHexagonTargetMachine())); - PM.add(createHexagonISelDag(getHexagonTargetMachine())); - PM.add(createHexagonPeephole()); + PM->add(createHexagonRemoveExtendOps(getHexagonTargetMachine())); + PM->add(createHexagonISelDag(getHexagonTargetMachine())); + PM->add(createHexagonPeephole()); return false; } bool HexagonPassConfig::addPreRegAlloc() { if (!DisableHardwareLoops) { - PM.add(createHexagonHardwareLoops()); + PM->add(createHexagonHardwareLoops()); } return false; } bool HexagonPassConfig::addPostRegAlloc() { - PM.add(createHexagonCFGOptimizer(getHexagonTargetMachine())); + PM->add(createHexagonCFGOptimizer(getHexagonTargetMachine())); return true; } @@ -129,17 +129,14 @@ bool HexagonPassConfig::addPreSched2() { bool HexagonPassConfig::addPreEmitPass() { if (!DisableHardwareLoops) { - PM.add(createHexagonFixupHwLoops()); + PM->add(createHexagonFixupHwLoops()); } // Expand Spill code for predicate registers. - PM.add(createHexagonExpandPredSpillCode(getHexagonTargetMachine())); + PM->add(createHexagonExpandPredSpillCode(getHexagonTargetMachine())); // Split up TFRcondsets into conditional transfers. - PM.add(createHexagonSplitTFRCondSets(getHexagonTargetMachine())); - - // Create Packets. - PM.add(createHexagonPacketizer()); + PM->add(createHexagonSplitTFRCondSets(getHexagonTargetMachine())); return false; } diff --git a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp deleted file mode 100644 index c6e7bd1f53d6..000000000000 --- a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp +++ /dev/null @@ -1,3642 +0,0 @@ -//===----- HexagonPacketizer.cpp - vliw packetizer ---------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This implements a simple VLIW packetizer using DFA. The packetizer works on -// machine basic blocks. For each instruction I in BB, the packetizer consults -// the DFA to see if machine resources are available to execute I. If so, the -// packetizer checks if I depends on any instruction J in the current packet. -// If no dependency is found, I is added to current packet and machine resource -// is marked as taken. If any dependency is found, a target API call is made to -// prune the dependence. -// -//===----------------------------------------------------------------------===// -#define DEBUG_TYPE "packets" -#include "llvm/CodeGen/DFAPacketizer.h" -#include "llvm/CodeGen/Passes.h" -#include "llvm/CodeGen/MachineDominators.h" -#include "llvm/CodeGen/MachineFunctionPass.h" -#include "llvm/CodeGen/MachineLoopInfo.h" -#include "llvm/CodeGen/ScheduleDAG.h" -#include "llvm/CodeGen/ScheduleDAGInstrs.h" -#include "llvm/CodeGen/LatencyPriorityQueue.h" -#include "llvm/CodeGen/SchedulerRegistry.h" -#include "llvm/CodeGen/MachineFrameInfo.h" -#include "llvm/CodeGen/MachineInstrBuilder.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/MachineFunctionAnalysis.h" -#include "llvm/CodeGen/ScheduleHazardRecognizer.h" -#include "llvm/Target/TargetMachine.h" -#include "llvm/Target/TargetInstrInfo.h" -#include "llvm/Target/TargetRegisterInfo.h" -#include "llvm/ADT/DenseMap.h" -#include "llvm/ADT/Statistic.h" -#include "llvm/Support/MathExtras.h" -#include "llvm/MC/MCInstrItineraries.h" -#include "llvm/Support/Compiler.h" -#include "llvm/Support/CommandLine.h" -#include "llvm/Support/Debug.h" -#include "Hexagon.h" -#include "HexagonTargetMachine.h" -#include "HexagonRegisterInfo.h" -#include "HexagonSubtarget.h" -#include "HexagonMachineFunctionInfo.h" - -#include <map> - -using namespace llvm; - -namespace { - class HexagonPacketizer : public MachineFunctionPass { - - public: - static char ID; - HexagonPacketizer() : MachineFunctionPass(ID) {} - - void getAnalysisUsage(AnalysisUsage &AU) const { - AU.setPreservesCFG(); - AU.addRequired<MachineDominatorTree>(); - AU.addPreserved<MachineDominatorTree>(); - AU.addRequired<MachineLoopInfo>(); - AU.addPreserved<MachineLoopInfo>(); - MachineFunctionPass::getAnalysisUsage(AU); - } - - const char *getPassName() const { - return "Hexagon Packetizer"; - } - - bool runOnMachineFunction(MachineFunction &Fn); - }; - char HexagonPacketizer::ID = 0; - - class HexagonPacketizerList : public VLIWPacketizerList { - - private: - - // Has the instruction been promoted to a dot-new instruction. - bool PromotedToDotNew; - - // Has the instruction been glued to allocframe. - bool GlueAllocframeStore; - - // Has the feeder instruction been glued to new value jump. - bool GlueToNewValueJump; - - // Check if there is a dependence between some instruction already in this - // packet and this instruction. - bool Dependence; - - // Only check for dependence if there are resources available to - // schedule this instruction. - bool FoundSequentialDependence; - - public: - // Ctor. - HexagonPacketizerList(MachineFunction &MF, MachineLoopInfo &MLI, - MachineDominatorTree &MDT); - - // initPacketizerState - initialize some internal flags. - void initPacketizerState(); - - // ignorePseudoInstruction - Ignore bundling of pseudo instructions. - bool ignorePseudoInstruction(MachineInstr *MI, MachineBasicBlock *MBB); - - // isSoloInstruction - return true if instruction MI can not be packetized - // with any other instruction, which means that MI itself is a packet. - bool isSoloInstruction(MachineInstr *MI); - - // isLegalToPacketizeTogether - Is it legal to packetize SUI and SUJ - // together. - bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ); - - // isLegalToPruneDependencies - Is it legal to prune dependece between SUI - // and SUJ. - bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ); - - MachineBasicBlock::iterator addToPacket(MachineInstr *MI); - private: - bool IsCallDependent(MachineInstr* MI, SDep::Kind DepType, unsigned DepReg); - bool PromoteToDotNew(MachineInstr* MI, SDep::Kind DepType, - MachineBasicBlock::iterator &MII, - const TargetRegisterClass* RC); - bool CanPromoteToDotNew(MachineInstr* MI, SUnit* PacketSU, - unsigned DepReg, - std::map <MachineInstr*, SUnit*> MIToSUnit, - MachineBasicBlock::iterator &MII, - const TargetRegisterClass* RC); - bool CanPromoteToNewValue(MachineInstr* MI, SUnit* PacketSU, - unsigned DepReg, - std::map <MachineInstr*, SUnit*> MIToSUnit, - MachineBasicBlock::iterator &MII); - bool CanPromoteToNewValueStore(MachineInstr* MI, MachineInstr* PacketMI, - unsigned DepReg, - std::map <MachineInstr*, SUnit*> MIToSUnit); - bool DemoteToDotOld(MachineInstr* MI); - bool ArePredicatesComplements(MachineInstr* MI1, MachineInstr* MI2, - std::map <MachineInstr*, SUnit*> MIToSUnit); - bool RestrictingDepExistInPacket(MachineInstr*, - unsigned, std::map <MachineInstr*, SUnit*>); - bool isNewifiable(MachineInstr* MI); - bool isCondInst(MachineInstr* MI); - bool IsNewifyStore (MachineInstr* MI); - bool tryAllocateResourcesForConstExt(MachineInstr* MI); - bool canReserveResourcesForConstExt(MachineInstr *MI); - void reserveResourcesForConstExt(MachineInstr* MI); - bool isNewValueInst(MachineInstr* MI); - bool isDotNewInst(MachineInstr* MI); - }; -} - -// HexagonPacketizerList Ctor. -HexagonPacketizerList::HexagonPacketizerList( - MachineFunction &MF, MachineLoopInfo &MLI,MachineDominatorTree &MDT) - : VLIWPacketizerList(MF, MLI, MDT, true){ -} - -bool HexagonPacketizer::runOnMachineFunction(MachineFunction &Fn) { - const TargetInstrInfo *TII = Fn.getTarget().getInstrInfo(); - MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>(); - MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>(); - - // Instantiate the packetizer. - HexagonPacketizerList Packetizer(Fn, MLI, MDT); - - // DFA state table should not be empty. - assert(Packetizer.getResourceTracker() && "Empty DFA table!"); - - // - // Loop over all basic blocks and remove KILL pseudo-instructions - // These instructions confuse the dependence analysis. Consider: - // D0 = ... (Insn 0) - // R0 = KILL R0, D0 (Insn 1) - // R0 = ... (Insn 2) - // Here, Insn 1 will result in the dependence graph not emitting an output - // dependence between Insn 0 and Insn 2. This can lead to incorrect - // packetization - // - for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); - MBB != MBBe; ++MBB) { - MachineBasicBlock::iterator End = MBB->end(); - MachineBasicBlock::iterator MI = MBB->begin(); - while (MI != End) { - if (MI->isKill()) { - MachineBasicBlock::iterator DeleteMI = MI; - ++MI; - MBB->erase(DeleteMI); - End = MBB->end(); - continue; - } - ++MI; - } - } - - // Loop over all of the basic blocks. - for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); - MBB != MBBe; ++MBB) { - // Find scheduling regions and schedule / packetize each region. - unsigned RemainingCount = MBB->size(); - for(MachineBasicBlock::iterator RegionEnd = MBB->end(); - RegionEnd != MBB->begin();) { - // The next region starts above the previous region. Look backward in the - // instruction stream until we find the nearest boundary. - MachineBasicBlock::iterator I = RegionEnd; - for(;I != MBB->begin(); --I, --RemainingCount) { - if (TII->isSchedulingBoundary(llvm::prior(I), MBB, Fn)) - break; - } - I = MBB->begin(); - - // Skip empty scheduling regions. - if (I == RegionEnd) { - RegionEnd = llvm::prior(RegionEnd); - --RemainingCount; - continue; - } - // Skip regions with one instruction. - if (I == llvm::prior(RegionEnd)) { - RegionEnd = llvm::prior(RegionEnd); - continue; - } - - Packetizer.PacketizeMIs(MBB, I, RegionEnd); - RegionEnd = I; - } - } - - return true; -} - - -static bool IsIndirectCall(MachineInstr* MI) { - return ((MI->getOpcode() == Hexagon::CALLR) || - (MI->getOpcode() == Hexagon::CALLRv3)); -} - -// Reserve resources for constant extender. Trigure an assertion if -// reservation fail. -void HexagonPacketizerList::reserveResourcesForConstExt(MachineInstr* MI) { - const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; - MachineInstr *PseudoMI = MI->getParent()->getParent()->CreateMachineInstr( - QII->get(Hexagon::IMMEXT), MI->getDebugLoc()); - - if (ResourceTracker->canReserveResources(PseudoMI)) { - ResourceTracker->reserveResources(PseudoMI); - MI->getParent()->getParent()->DeleteMachineInstr(PseudoMI); - } else { - MI->getParent()->getParent()->DeleteMachineInstr(PseudoMI); - llvm_unreachable("can not reserve resources for constant extender."); - } - return; -} - -bool HexagonPacketizerList::canReserveResourcesForConstExt(MachineInstr *MI) { - const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; - assert(QII->isExtended(MI) && - "Should only be called for constant extended instructions"); - MachineFunction *MF = MI->getParent()->getParent(); - MachineInstr *PseudoMI = MF->CreateMachineInstr(QII->get(Hexagon::IMMEXT), - MI->getDebugLoc()); - bool CanReserve = ResourceTracker->canReserveResources(PseudoMI); - MF->DeleteMachineInstr(PseudoMI); - return CanReserve; -} - -// Allocate resources (i.e. 4 bytes) for constant extender. If succeed, return -// true, otherwise, return false. -bool HexagonPacketizerList::tryAllocateResourcesForConstExt(MachineInstr* MI) { - const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; - MachineInstr *PseudoMI = MI->getParent()->getParent()->CreateMachineInstr( - QII->get(Hexagon::IMMEXT), MI->getDebugLoc()); - - if (ResourceTracker->canReserveResources(PseudoMI)) { - ResourceTracker->reserveResources(PseudoMI); - MI->getParent()->getParent()->DeleteMachineInstr(PseudoMI); - return true; - } else { - MI->getParent()->getParent()->DeleteMachineInstr(PseudoMI); - return false; - } -} - - -bool HexagonPacketizerList::IsCallDependent(MachineInstr* MI, - SDep::Kind DepType, - unsigned DepReg) { - - const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; - const HexagonRegisterInfo* QRI = (const HexagonRegisterInfo *) TM.getRegisterInfo(); - - // Check for lr dependence - if (DepReg == QRI->getRARegister()) { - return true; - } - - if (QII->isDeallocRet(MI)) { - if (DepReg == QRI->getFrameRegister() || - DepReg == QRI->getStackRegister()) - return true; - } - - // Check if this is a predicate dependence - const TargetRegisterClass* RC = QRI->getMinimalPhysRegClass(DepReg); - if (RC == Hexagon::PredRegsRegisterClass) { - return true; - } - - // - // Lastly check for an operand used in an indirect call - // If we had an attribute for checking if an instruction is an indirect call, - // then we could have avoided this relatively brittle implementation of - // IsIndirectCall() - // - // Assumes that the first operand of the CALLr is the function address - // - if (IsIndirectCall(MI) && (DepType == SDep::Data)) { - MachineOperand MO = MI->getOperand(0); - if (MO.isReg() && MO.isUse() && (MO.getReg() == DepReg)) { - return true; - } - } - - return false; -} - -static bool IsRegDependence(const SDep::Kind DepType) { - return (DepType == SDep::Data || DepType == SDep::Anti || - DepType == SDep::Output); -} - -static bool IsDirectJump(MachineInstr* MI) { - return (MI->getOpcode() == Hexagon::JMP); -} - -static bool IsSchedBarrier(MachineInstr* MI) { - switch (MI->getOpcode()) { - case Hexagon::BARRIER: - return true; - } - return false; -} - -static bool IsControlFlow(MachineInstr* MI) { - return (MI->getDesc().isTerminator() || MI->getDesc().isCall()); -} - -bool HexagonPacketizerList::isNewValueInst(MachineInstr* MI) { - const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; - if (QII->isNewValueJump(MI)) - return true; - - if (QII->isNewValueStore(MI)) - return true; - - return false; -} - -// Function returns true if an instruction can be promoted to the new-value -// store. It will always return false for v2 and v3. -// It lists all the conditional and unconditional stores that can be promoted -// to the new-value stores. - -bool HexagonPacketizerList::IsNewifyStore (MachineInstr* MI) { - const HexagonRegisterInfo* QRI = (const HexagonRegisterInfo *) TM.getRegisterInfo(); - switch (MI->getOpcode()) - { - // store byte - case Hexagon::STrib: - case Hexagon::STrib_indexed: - case Hexagon::STrib_indexed_shl_V4: - case Hexagon::STrib_shl_V4: - case Hexagon::STrib_GP_V4: - case Hexagon::STb_GP_V4: - case Hexagon::POST_STbri: - case Hexagon::STrib_cPt: - case Hexagon::STrib_cdnPt_V4: - case Hexagon::STrib_cNotPt: - case Hexagon::STrib_cdnNotPt_V4: - case Hexagon::STrib_indexed_cPt: - case Hexagon::STrib_indexed_cdnPt_V4: - case Hexagon::STrib_indexed_cNotPt: - case Hexagon::STrib_indexed_cdnNotPt_V4: - case Hexagon::STrib_indexed_shl_cPt_V4: - case Hexagon::STrib_indexed_shl_cdnPt_V4: - case Hexagon::STrib_indexed_shl_cNotPt_V4: - case Hexagon::STrib_indexed_shl_cdnNotPt_V4: - case Hexagon::POST_STbri_cPt: - case Hexagon::POST_STbri_cdnPt_V4: - case Hexagon::POST_STbri_cNotPt: - case Hexagon::POST_STbri_cdnNotPt_V4: - case Hexagon::STb_GP_cPt_V4: - case Hexagon::STb_GP_cNotPt_V4: - case Hexagon::STb_GP_cdnPt_V4: - case Hexagon::STb_GP_cdnNotPt_V4: - case Hexagon::STrib_GP_cPt_V4: - case Hexagon::STrib_GP_cNotPt_V4: - case Hexagon::STrib_GP_cdnPt_V4: - case Hexagon::STrib_GP_cdnNotPt_V4: - - // store halfword - case Hexagon::STrih: - case Hexagon::STrih_indexed: - case Hexagon::STrih_indexed_shl_V4: - case Hexagon::STrih_shl_V4: - case Hexagon::STrih_GP_V4: - case Hexagon::STh_GP_V4: - case Hexagon::POST_SThri: - case Hexagon::STrih_cPt: - case Hexagon::STrih_cdnPt_V4: - case Hexagon::STrih_cNotPt: - case Hexagon::STrih_cdnNotPt_V4: - case Hexagon::STrih_indexed_cPt: - case Hexagon::STrih_indexed_cdnPt_V4: - case Hexagon::STrih_indexed_cNotPt: - case Hexagon::STrih_indexed_cdnNotPt_V4: - case Hexagon::STrih_indexed_shl_cPt_V4: - case Hexagon::STrih_indexed_shl_cdnPt_V4: - case Hexagon::STrih_indexed_shl_cNotPt_V4: - case Hexagon::STrih_indexed_shl_cdnNotPt_V4: - case Hexagon::POST_SThri_cPt: - case Hexagon::POST_SThri_cdnPt_V4: - case Hexagon::POST_SThri_cNotPt: - case Hexagon::POST_SThri_cdnNotPt_V4: - case Hexagon::STh_GP_cPt_V4: - case Hexagon::STh_GP_cNotPt_V4: - case Hexagon::STh_GP_cdnPt_V4: - case Hexagon::STh_GP_cdnNotPt_V4: - case Hexagon::STrih_GP_cPt_V4: - case Hexagon::STrih_GP_cNotPt_V4: - case Hexagon::STrih_GP_cdnPt_V4: - case Hexagon::STrih_GP_cdnNotPt_V4: - - // store word - case Hexagon::STriw: - case Hexagon::STriw_indexed: - case Hexagon::STriw_indexed_shl_V4: - case Hexagon::STriw_shl_V4: - case Hexagon::STriw_GP_V4: - case Hexagon::STw_GP_V4: - case Hexagon::POST_STwri: - case Hexagon::STriw_cPt: - case Hexagon::STriw_cdnPt_V4: - case Hexagon::STriw_cNotPt: - case Hexagon::STriw_cdnNotPt_V4: - case Hexagon::STriw_indexed_cPt: - case Hexagon::STriw_indexed_cdnPt_V4: - case Hexagon::STriw_indexed_cNotPt: - case Hexagon::STriw_indexed_cdnNotPt_V4: - case Hexagon::STriw_indexed_shl_cPt_V4: - case Hexagon::STriw_indexed_shl_cdnPt_V4: - case Hexagon::STriw_indexed_shl_cNotPt_V4: - case Hexagon::STriw_indexed_shl_cdnNotPt_V4: - case Hexagon::POST_STwri_cPt: - case Hexagon::POST_STwri_cdnPt_V4: - case Hexagon::POST_STwri_cNotPt: - case Hexagon::POST_STwri_cdnNotPt_V4: - case Hexagon::STw_GP_cPt_V4: - case Hexagon::STw_GP_cNotPt_V4: - case Hexagon::STw_GP_cdnPt_V4: - case Hexagon::STw_GP_cdnNotPt_V4: - case Hexagon::STriw_GP_cPt_V4: - case Hexagon::STriw_GP_cNotPt_V4: - case Hexagon::STriw_GP_cdnPt_V4: - case Hexagon::STriw_GP_cdnNotPt_V4: - return QRI->Subtarget.hasV4TOps(); - } - return false; -} - -static bool IsLoopN(MachineInstr *MI) { - return (MI->getOpcode() == Hexagon::LOOP0_i || - MI->getOpcode() == Hexagon::LOOP0_r); -} - -/// DoesModifyCalleeSavedReg - Returns true if the instruction modifies a -/// callee-saved register. -static bool DoesModifyCalleeSavedReg(MachineInstr *MI, - const TargetRegisterInfo *TRI) { - for (const uint16_t *CSR = TRI->getCalleeSavedRegs(); *CSR; ++CSR) { - unsigned CalleeSavedReg = *CSR; - if (MI->modifiesRegister(CalleeSavedReg, TRI)) - return true; - } - return false; -} - -// Return the new value instruction for a given store. -static int GetDotNewOp(const int opc) { - switch (opc) { - default: llvm_unreachable("Unknown .new type"); - - // store new value byte - case Hexagon::STrib: - return Hexagon::STrib_nv_V4; - - case Hexagon::STrib_indexed: - return Hexagon::STrib_indexed_nv_V4; - - case Hexagon::STrib_indexed_shl_V4: - return Hexagon::STrib_indexed_shl_nv_V4; - - case Hexagon::STrib_shl_V4: - return Hexagon::STrib_shl_nv_V4; - - case Hexagon::STrib_GP_V4: - return Hexagon::STrib_GP_nv_V4; - - case Hexagon::STb_GP_V4: - return Hexagon::STb_GP_nv_V4; - - case Hexagon::POST_STbri: - return Hexagon::POST_STbri_nv_V4; - - case Hexagon::STrib_cPt: - return Hexagon::STrib_cPt_nv_V4; - - case Hexagon::STrib_cdnPt_V4: - return Hexagon::STrib_cdnPt_nv_V4; - - case Hexagon::STrib_cNotPt: - return Hexagon::STrib_cNotPt_nv_V4; - - case Hexagon::STrib_cdnNotPt_V4: - return Hexagon::STrib_cdnNotPt_nv_V4; - - case Hexagon::STrib_indexed_cPt: - return Hexagon::STrib_indexed_cPt_nv_V4; - - case Hexagon::STrib_indexed_cdnPt_V4: - return Hexagon::STrib_indexed_cdnPt_nv_V4; - - case Hexagon::STrib_indexed_cNotPt: - return Hexagon::STrib_indexed_cNotPt_nv_V4; - - case Hexagon::STrib_indexed_cdnNotPt_V4: - return Hexagon::STrib_indexed_cdnNotPt_nv_V4; - - case Hexagon::STrib_indexed_shl_cPt_V4: - return Hexagon::STrib_indexed_shl_cPt_nv_V4; - - case Hexagon::STrib_indexed_shl_cdnPt_V4: - return Hexagon::STrib_indexed_shl_cdnPt_nv_V4; - - case Hexagon::STrib_indexed_shl_cNotPt_V4: - return Hexagon::STrib_indexed_shl_cNotPt_nv_V4; - - case Hexagon::STrib_indexed_shl_cdnNotPt_V4: - return Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4; - - case Hexagon::POST_STbri_cPt: - return Hexagon::POST_STbri_cPt_nv_V4; - - case Hexagon::POST_STbri_cdnPt_V4: - return Hexagon::POST_STbri_cdnPt_nv_V4; - - case Hexagon::POST_STbri_cNotPt: - return Hexagon::POST_STbri_cNotPt_nv_V4; - - case Hexagon::POST_STbri_cdnNotPt_V4: - return Hexagon::POST_STbri_cdnNotPt_nv_V4; - - case Hexagon::STb_GP_cPt_V4: - return Hexagon::STb_GP_cPt_nv_V4; - - case Hexagon::STb_GP_cNotPt_V4: - return Hexagon::STb_GP_cNotPt_nv_V4; - - case Hexagon::STb_GP_cdnPt_V4: - return Hexagon::STb_GP_cdnPt_nv_V4; - - case Hexagon::STb_GP_cdnNotPt_V4: - return Hexagon::STb_GP_cdnNotPt_nv_V4; - - case Hexagon::STrib_GP_cPt_V4: - return Hexagon::STrib_GP_cPt_nv_V4; - - case Hexagon::STrib_GP_cNotPt_V4: - return Hexagon::STrib_GP_cNotPt_nv_V4; - - case Hexagon::STrib_GP_cdnPt_V4: - return Hexagon::STrib_GP_cdnPt_nv_V4; - - case Hexagon::STrib_GP_cdnNotPt_V4: - return Hexagon::STrib_GP_cdnNotPt_nv_V4; - - // store new value halfword - case Hexagon::STrih: - return Hexagon::STrih_nv_V4; - - case Hexagon::STrih_indexed: - return Hexagon::STrih_indexed_nv_V4; - - case Hexagon::STrih_indexed_shl_V4: - return Hexagon::STrih_indexed_shl_nv_V4; - - case Hexagon::STrih_shl_V4: - return Hexagon::STrih_shl_nv_V4; - - case Hexagon::STrih_GP_V4: - return Hexagon::STrih_GP_nv_V4; - - case Hexagon::STh_GP_V4: - return Hexagon::STh_GP_nv_V4; - - case Hexagon::POST_SThri: - return Hexagon::POST_SThri_nv_V4; - - case Hexagon::STrih_cPt: - return Hexagon::STrih_cPt_nv_V4; - - case Hexagon::STrih_cdnPt_V4: - return Hexagon::STrih_cdnPt_nv_V4; - - case Hexagon::STrih_cNotPt: - return Hexagon::STrih_cNotPt_nv_V4; - - case Hexagon::STrih_cdnNotPt_V4: - return Hexagon::STrih_cdnNotPt_nv_V4; - - case Hexagon::STrih_indexed_cPt: - return Hexagon::STrih_indexed_cPt_nv_V4; - - case Hexagon::STrih_indexed_cdnPt_V4: - return Hexagon::STrih_indexed_cdnPt_nv_V4; - - case Hexagon::STrih_indexed_cNotPt: - return Hexagon::STrih_indexed_cNotPt_nv_V4; - - case Hexagon::STrih_indexed_cdnNotPt_V4: - return Hexagon::STrih_indexed_cdnNotPt_nv_V4; - - case Hexagon::STrih_indexed_shl_cPt_V4: - return Hexagon::STrih_indexed_shl_cPt_nv_V4; - - case Hexagon::STrih_indexed_shl_cdnPt_V4: - return Hexagon::STrih_indexed_shl_cdnPt_nv_V4; - - case Hexagon::STrih_indexed_shl_cNotPt_V4: - return Hexagon::STrih_indexed_shl_cNotPt_nv_V4; - - case Hexagon::STrih_indexed_shl_cdnNotPt_V4: - return Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4; - - case Hexagon::POST_SThri_cPt: - return Hexagon::POST_SThri_cPt_nv_V4; - - case Hexagon::POST_SThri_cdnPt_V4: - return Hexagon::POST_SThri_cdnPt_nv_V4; - - case Hexagon::POST_SThri_cNotPt: - return Hexagon::POST_SThri_cNotPt_nv_V4; - - case Hexagon::POST_SThri_cdnNotPt_V4: - return Hexagon::POST_SThri_cdnNotPt_nv_V4; - - case Hexagon::STh_GP_cPt_V4: - return Hexagon::STh_GP_cPt_nv_V4; - - case Hexagon::STh_GP_cNotPt_V4: - return Hexagon::STh_GP_cNotPt_nv_V4; - - case Hexagon::STh_GP_cdnPt_V4: - return Hexagon::STh_GP_cdnPt_nv_V4; - - case Hexagon::STh_GP_cdnNotPt_V4: - return Hexagon::STh_GP_cdnNotPt_nv_V4; - - case Hexagon::STrih_GP_cPt_V4: - return Hexagon::STrih_GP_cPt_nv_V4; - - case Hexagon::STrih_GP_cNotPt_V4: - return Hexagon::STrih_GP_cNotPt_nv_V4; - - case Hexagon::STrih_GP_cdnPt_V4: - return Hexagon::STrih_GP_cdnPt_nv_V4; - - case Hexagon::STrih_GP_cdnNotPt_V4: - return Hexagon::STrih_GP_cdnNotPt_nv_V4; - - // store new value word - case Hexagon::STriw: - return Hexagon::STriw_nv_V4; - - case Hexagon::STriw_indexed: - return Hexagon::STriw_indexed_nv_V4; - - case Hexagon::STriw_indexed_shl_V4: - return Hexagon::STriw_indexed_shl_nv_V4; - - case Hexagon::STriw_shl_V4: - return Hexagon::STriw_shl_nv_V4; - - case Hexagon::STriw_GP_V4: - return Hexagon::STriw_GP_nv_V4; - - case Hexagon::STw_GP_V4: - return Hexagon::STw_GP_nv_V4; - - case Hexagon::POST_STwri: - return Hexagon::POST_STwri_nv_V4; - - case Hexagon::STriw_cPt: - return Hexagon::STriw_cPt_nv_V4; - - case Hexagon::STriw_cdnPt_V4: - return Hexagon::STriw_cdnPt_nv_V4; - - case Hexagon::STriw_cNotPt: - return Hexagon::STriw_cNotPt_nv_V4; - - case Hexagon::STriw_cdnNotPt_V4: - return Hexagon::STriw_cdnNotPt_nv_V4; - - case Hexagon::STriw_indexed_cPt: - return Hexagon::STriw_indexed_cPt_nv_V4; - - case Hexagon::STriw_indexed_cdnPt_V4: - return Hexagon::STriw_indexed_cdnPt_nv_V4; - - case Hexagon::STriw_indexed_cNotPt: - return Hexagon::STriw_indexed_cNotPt_nv_V4; - - case Hexagon::STriw_indexed_cdnNotPt_V4: - return Hexagon::STriw_indexed_cdnNotPt_nv_V4; - - case Hexagon::STriw_indexed_shl_cPt_V4: - return Hexagon::STriw_indexed_shl_cPt_nv_V4; - - case Hexagon::STriw_indexed_shl_cdnPt_V4: - return Hexagon::STriw_indexed_shl_cdnPt_nv_V4; - - case Hexagon::STriw_indexed_shl_cNotPt_V4: - return Hexagon::STriw_indexed_shl_cNotPt_nv_V4; - - case Hexagon::STriw_indexed_shl_cdnNotPt_V4: - return Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4; - - case Hexagon::POST_STwri_cPt: - return Hexagon::POST_STwri_cPt_nv_V4; - - case Hexagon::POST_STwri_cdnPt_V4: - return Hexagon::POST_STwri_cdnPt_nv_V4; - - case Hexagon::POST_STwri_cNotPt: - return Hexagon::POST_STwri_cNotPt_nv_V4; - - case Hexagon::POST_STwri_cdnNotPt_V4: - return Hexagon::POST_STwri_cdnNotPt_nv_V4; - - case Hexagon::STw_GP_cPt_V4: - return Hexagon::STw_GP_cPt_nv_V4; - - case Hexagon::STw_GP_cNotPt_V4: - return Hexagon::STw_GP_cNotPt_nv_V4; - - case Hexagon::STw_GP_cdnPt_V4: - return Hexagon::STw_GP_cdnPt_nv_V4; - - case Hexagon::STw_GP_cdnNotPt_V4: - return Hexagon::STw_GP_cdnNotPt_nv_V4; - - case Hexagon::STriw_GP_cPt_V4: - return Hexagon::STriw_GP_cPt_nv_V4; - - case Hexagon::STriw_GP_cNotPt_V4: - return Hexagon::STriw_GP_cNotPt_nv_V4; - - case Hexagon::STriw_GP_cdnPt_V4: - return Hexagon::STriw_GP_cdnPt_nv_V4; - - case Hexagon::STriw_GP_cdnNotPt_V4: - return Hexagon::STriw_GP_cdnNotPt_nv_V4; - } -} - -// Return .new predicate version for an instruction -static int GetDotNewPredOp(const int opc) { - switch (opc) { - default: llvm_unreachable("Unknown .new type"); - - // Conditional stores - // Store byte conditionally - case Hexagon::STrib_cPt : - return Hexagon::STrib_cdnPt_V4; - - case Hexagon::STrib_cNotPt : - return Hexagon::STrib_cdnNotPt_V4; - - case Hexagon::STrib_indexed_cPt : - return Hexagon::STrib_indexed_cdnPt_V4; - - case Hexagon::STrib_indexed_cNotPt : - return Hexagon::STrib_indexed_cdnNotPt_V4; - - case Hexagon::STrib_imm_cPt_V4 : - return Hexagon::STrib_imm_cdnPt_V4; - - case Hexagon::STrib_imm_cNotPt_V4 : - return Hexagon::STrib_imm_cdnNotPt_V4; - - case Hexagon::POST_STbri_cPt : - return Hexagon::POST_STbri_cdnPt_V4; - - case Hexagon::POST_STbri_cNotPt : - return Hexagon::POST_STbri_cdnNotPt_V4; - - case Hexagon::STrib_indexed_shl_cPt_V4 : - return Hexagon::STrib_indexed_shl_cdnPt_V4; - - case Hexagon::STrib_indexed_shl_cNotPt_V4 : - return Hexagon::STrib_indexed_shl_cdnNotPt_V4; - - case Hexagon::STb_GP_cPt_V4 : - return Hexagon::STb_GP_cdnPt_V4; - - case Hexagon::STb_GP_cNotPt_V4 : - return Hexagon::STb_GP_cdnNotPt_V4; - - case Hexagon::STrib_GP_cPt_V4 : - return Hexagon::STrib_GP_cdnPt_V4; - - case Hexagon::STrib_GP_cNotPt_V4 : - return Hexagon::STrib_GP_cdnNotPt_V4; - - // Store doubleword conditionally - case Hexagon::STrid_cPt : - return Hexagon::STrid_cdnPt_V4; - - case Hexagon::STrid_cNotPt : - return Hexagon::STrid_cdnNotPt_V4; - - case Hexagon::STrid_indexed_cPt : - return Hexagon::STrid_indexed_cdnPt_V4; - - case Hexagon::STrid_indexed_cNotPt : - return Hexagon::STrid_indexed_cdnNotPt_V4; - - case Hexagon::STrid_indexed_shl_cPt_V4 : - return Hexagon::STrid_indexed_shl_cdnPt_V4; - - case Hexagon::STrid_indexed_shl_cNotPt_V4 : - return Hexagon::STrid_indexed_shl_cdnNotPt_V4; - - case Hexagon::POST_STdri_cPt : - return Hexagon::POST_STdri_cdnPt_V4; - - case Hexagon::POST_STdri_cNotPt : - return Hexagon::POST_STdri_cdnNotPt_V4; - - case Hexagon::STd_GP_cPt_V4 : - return Hexagon::STd_GP_cdnPt_V4; - - case Hexagon::STd_GP_cNotPt_V4 : - return Hexagon::STd_GP_cdnNotPt_V4; - - case Hexagon::STrid_GP_cPt_V4 : - return Hexagon::STrid_GP_cdnPt_V4; - - case Hexagon::STrid_GP_cNotPt_V4 : - return Hexagon::STrid_GP_cdnNotPt_V4; - - // Store halfword conditionally - case Hexagon::STrih_cPt : - return Hexagon::STrih_cdnPt_V4; - - case Hexagon::STrih_cNotPt : - return Hexagon::STrih_cdnNotPt_V4; - - case Hexagon::STrih_indexed_cPt : - return Hexagon::STrih_indexed_cdnPt_V4; - - case Hexagon::STrih_indexed_cNotPt : - return Hexagon::STrih_indexed_cdnNotPt_V4; - - case Hexagon::STrih_imm_cPt_V4 : - return Hexagon::STrih_imm_cdnPt_V4; - - case Hexagon::STrih_imm_cNotPt_V4 : - return Hexagon::STrih_imm_cdnNotPt_V4; - - case Hexagon::STrih_indexed_shl_cPt_V4 : - return Hexagon::STrih_indexed_shl_cdnPt_V4; - - case Hexagon::STrih_indexed_shl_cNotPt_V4 : - return Hexagon::STrih_indexed_shl_cdnNotPt_V4; - - case Hexagon::POST_SThri_cPt : - return Hexagon::POST_SThri_cdnPt_V4; - - case Hexagon::POST_SThri_cNotPt : - return Hexagon::POST_SThri_cdnNotPt_V4; - - case Hexagon::STh_GP_cPt_V4 : - return Hexagon::STh_GP_cdnPt_V4; - - case Hexagon::STh_GP_cNotPt_V4 : - return Hexagon::STh_GP_cdnNotPt_V4; - - case Hexagon::STrih_GP_cPt_V4 : - return Hexagon::STrih_GP_cdnPt_V4; - - case Hexagon::STrih_GP_cNotPt_V4 : - return Hexagon::STrih_GP_cdnNotPt_V4; - - // Store word conditionally - case Hexagon::STriw_cPt : - return Hexagon::STriw_cdnPt_V4; - - case Hexagon::STriw_cNotPt : - return Hexagon::STriw_cdnNotPt_V4; - - case Hexagon::STriw_indexed_cPt : - return Hexagon::STriw_indexed_cdnPt_V4; - - case Hexagon::STriw_indexed_cNotPt : - return Hexagon::STriw_indexed_cdnNotPt_V4; - - case Hexagon::STriw_imm_cPt_V4 : - return Hexagon::STriw_imm_cdnPt_V4; - - case Hexagon::STriw_imm_cNotPt_V4 : - return Hexagon::STriw_imm_cdnNotPt_V4; - - case Hexagon::STriw_indexed_shl_cPt_V4 : - return Hexagon::STriw_indexed_shl_cdnPt_V4; - - case Hexagon::STriw_indexed_shl_cNotPt_V4 : - return Hexagon::STriw_indexed_shl_cdnNotPt_V4; - - case Hexagon::POST_STwri_cPt : - return Hexagon::POST_STwri_cdnPt_V4; - - case Hexagon::POST_STwri_cNotPt : - return Hexagon::POST_STwri_cdnNotPt_V4; - - case Hexagon::STw_GP_cPt_V4 : - return Hexagon::STw_GP_cdnPt_V4; - - case Hexagon::STw_GP_cNotPt_V4 : - return Hexagon::STw_GP_cdnNotPt_V4; - - case Hexagon::STriw_GP_cPt_V4 : - return Hexagon::STriw_GP_cdnPt_V4; - - case Hexagon::STriw_GP_cNotPt_V4 : - return Hexagon::STriw_GP_cdnNotPt_V4; - - // Condtional Jumps - case Hexagon::JMP_c: - return Hexagon::JMP_cdnPt; - - case Hexagon::JMP_cNot: - return Hexagon::JMP_cdnNotPt; - - case Hexagon::JMPR_cPt: - return Hexagon::JMPR_cdnPt_V3; - - case Hexagon::JMPR_cNotPt: - return Hexagon::JMPR_cdnNotPt_V3; - - // Conditional Transfers - case Hexagon::TFR_cPt: - return Hexagon::TFR_cdnPt; - - case Hexagon::TFR_cNotPt: - return Hexagon::TFR_cdnNotPt; - - case Hexagon::TFRI_cPt: - return Hexagon::TFRI_cdnPt; - - case Hexagon::TFRI_cNotPt: - return Hexagon::TFRI_cdnNotPt; - - // Load double word - case Hexagon::LDrid_cPt : - return Hexagon::LDrid_cdnPt; - - case Hexagon::LDrid_cNotPt : - return Hexagon::LDrid_cdnNotPt; - - case Hexagon::LDrid_indexed_cPt : - return Hexagon::LDrid_indexed_cdnPt; - - case Hexagon::LDrid_indexed_cNotPt : - return Hexagon::LDrid_indexed_cdnNotPt; - - case Hexagon::POST_LDrid_cPt : - return Hexagon::POST_LDrid_cdnPt_V4; - - case Hexagon::POST_LDrid_cNotPt : - return Hexagon::POST_LDrid_cdnNotPt_V4; - - // Load word - case Hexagon::LDriw_cPt : - return Hexagon::LDriw_cdnPt; - - case Hexagon::LDriw_cNotPt : - return Hexagon::LDriw_cdnNotPt; - - case Hexagon::LDriw_indexed_cPt : - return Hexagon::LDriw_indexed_cdnPt; - - case Hexagon::LDriw_indexed_cNotPt : - return Hexagon::LDriw_indexed_cdnNotPt; - - case Hexagon::POST_LDriw_cPt : - return Hexagon::POST_LDriw_cdnPt_V4; - - case Hexagon::POST_LDriw_cNotPt : - return Hexagon::POST_LDriw_cdnNotPt_V4; - - // Load halfword - case Hexagon::LDrih_cPt : - return Hexagon::LDrih_cdnPt; - - case Hexagon::LDrih_cNotPt : - return Hexagon::LDrih_cdnNotPt; - - case Hexagon::LDrih_indexed_cPt : - return Hexagon::LDrih_indexed_cdnPt; - - case Hexagon::LDrih_indexed_cNotPt : - return Hexagon::LDrih_indexed_cdnNotPt; - - case Hexagon::POST_LDrih_cPt : - return Hexagon::POST_LDrih_cdnPt_V4; - - case Hexagon::POST_LDrih_cNotPt : - return Hexagon::POST_LDrih_cdnNotPt_V4; - - // Load byte - case Hexagon::LDrib_cPt : - return Hexagon::LDrib_cdnPt; - - case Hexagon::LDrib_cNotPt : - return Hexagon::LDrib_cdnNotPt; - - case Hexagon::LDrib_indexed_cPt : - return Hexagon::LDrib_indexed_cdnPt; - - case Hexagon::LDrib_indexed_cNotPt : - return Hexagon::LDrib_indexed_cdnNotPt; - - case Hexagon::POST_LDrib_cPt : - return Hexagon::POST_LDrib_cdnPt_V4; - - case Hexagon::POST_LDrib_cNotPt : - return Hexagon::POST_LDrib_cdnNotPt_V4; - - // Load unsigned halfword - case Hexagon::LDriuh_cPt : - return Hexagon::LDriuh_cdnPt; - - case Hexagon::LDriuh_cNotPt : - return Hexagon::LDriuh_cdnNotPt; - - case Hexagon::LDriuh_indexed_cPt : - return Hexagon::LDriuh_indexed_cdnPt; - - case Hexagon::LDriuh_indexed_cNotPt : - return Hexagon::LDriuh_indexed_cdnNotPt; - - case Hexagon::POST_LDriuh_cPt : - return Hexagon::POST_LDriuh_cdnPt_V4; - - case Hexagon::POST_LDriuh_cNotPt : - return Hexagon::POST_LDriuh_cdnNotPt_V4; - - // Load unsigned byte - case Hexagon::LDriub_cPt : - return Hexagon::LDriub_cdnPt; - - case Hexagon::LDriub_cNotPt : - return Hexagon::LDriub_cdnNotPt; - - case Hexagon::LDriub_indexed_cPt : - return Hexagon::LDriub_indexed_cdnPt; - - case Hexagon::LDriub_indexed_cNotPt : - return Hexagon::LDriub_indexed_cdnNotPt; - - case Hexagon::POST_LDriub_cPt : - return Hexagon::POST_LDriub_cdnPt_V4; - - case Hexagon::POST_LDriub_cNotPt : - return Hexagon::POST_LDriub_cdnNotPt_V4; - - // V4 indexed+scaled load - - case Hexagon::LDrid_indexed_cPt_V4 : - return Hexagon::LDrid_indexed_cdnPt_V4; - - case Hexagon::LDrid_indexed_cNotPt_V4 : - return Hexagon::LDrid_indexed_cdnNotPt_V4; - - case Hexagon::LDrid_indexed_shl_cPt_V4 : - return Hexagon::LDrid_indexed_shl_cdnPt_V4; - - case Hexagon::LDrid_indexed_shl_cNotPt_V4 : - return Hexagon::LDrid_indexed_shl_cdnNotPt_V4; - - case Hexagon::LDrib_indexed_cPt_V4 : - return Hexagon::LDrib_indexed_cdnPt_V4; - - case Hexagon::LDrib_indexed_cNotPt_V4 : - return Hexagon::LDrib_indexed_cdnNotPt_V4; - - case Hexagon::LDrib_indexed_shl_cPt_V4 : - return Hexagon::LDrib_indexed_shl_cdnPt_V4; - - case Hexagon::LDrib_indexed_shl_cNotPt_V4 : - return Hexagon::LDrib_indexed_shl_cdnNotPt_V4; - - case Hexagon::LDriub_indexed_cPt_V4 : - return Hexagon::LDriub_indexed_cdnPt_V4; - - case Hexagon::LDriub_indexed_cNotPt_V4 : - return Hexagon::LDriub_indexed_cdnNotPt_V4; - - case Hexagon::LDriub_indexed_shl_cPt_V4 : - return Hexagon::LDriub_indexed_shl_cdnPt_V4; - - case Hexagon::LDriub_indexed_shl_cNotPt_V4 : - return Hexagon::LDriub_indexed_shl_cdnNotPt_V4; - - case Hexagon::LDrih_indexed_cPt_V4 : - return Hexagon::LDrih_indexed_cdnPt_V4; - - case Hexagon::LDrih_indexed_cNotPt_V4 : - return Hexagon::LDrih_indexed_cdnNotPt_V4; - - case Hexagon::LDrih_indexed_shl_cPt_V4 : - return Hexagon::LDrih_indexed_shl_cdnPt_V4; - - case Hexagon::LDrih_indexed_shl_cNotPt_V4 : - return Hexagon::LDrih_indexed_shl_cdnNotPt_V4; - - case Hexagon::LDriuh_indexed_cPt_V4 : - return Hexagon::LDriuh_indexed_cdnPt_V4; - - case Hexagon::LDriuh_indexed_cNotPt_V4 : - return Hexagon::LDriuh_indexed_cdnNotPt_V4; - - case Hexagon::LDriuh_indexed_shl_cPt_V4 : - return Hexagon::LDriuh_indexed_shl_cdnPt_V4; - - case Hexagon::LDriuh_indexed_shl_cNotPt_V4 : - return Hexagon::LDriuh_indexed_shl_cdnNotPt_V4; - - case Hexagon::LDriw_indexed_cPt_V4 : - return Hexagon::LDriw_indexed_cdnPt_V4; - - case Hexagon::LDriw_indexed_cNotPt_V4 : - return Hexagon::LDriw_indexed_cdnNotPt_V4; - - case Hexagon::LDriw_indexed_shl_cPt_V4 : - return Hexagon::LDriw_indexed_shl_cdnPt_V4; - - case Hexagon::LDriw_indexed_shl_cNotPt_V4 : - return Hexagon::LDriw_indexed_shl_cdnNotPt_V4; - - // V4 global address load - - case Hexagon::LDd_GP_cPt_V4: - return Hexagon::LDd_GP_cdnPt_V4; - - case Hexagon::LDd_GP_cNotPt_V4: - return Hexagon::LDd_GP_cdnNotPt_V4; - - case Hexagon::LDb_GP_cPt_V4: - return Hexagon::LDb_GP_cdnPt_V4; - - case Hexagon::LDb_GP_cNotPt_V4: - return Hexagon::LDb_GP_cdnNotPt_V4; - - case Hexagon::LDub_GP_cPt_V4: - return Hexagon::LDub_GP_cdnPt_V4; - - case Hexagon::LDub_GP_cNotPt_V4: - return Hexagon::LDub_GP_cdnNotPt_V4; - - case Hexagon::LDh_GP_cPt_V4: - return Hexagon::LDh_GP_cdnPt_V4; - - case Hexagon::LDh_GP_cNotPt_V4: - return Hexagon::LDh_GP_cdnNotPt_V4; - - case Hexagon::LDuh_GP_cPt_V4: - return Hexagon::LDuh_GP_cdnPt_V4; - - case Hexagon::LDuh_GP_cNotPt_V4: - return Hexagon::LDuh_GP_cdnNotPt_V4; - - case Hexagon::LDw_GP_cPt_V4: - return Hexagon::LDw_GP_cdnPt_V4; - - case Hexagon::LDw_GP_cNotPt_V4: - return Hexagon::LDw_GP_cdnNotPt_V4; - - case Hexagon::LDrid_GP_cPt_V4: - return Hexagon::LDrid_GP_cdnPt_V4; - - case Hexagon::LDrid_GP_cNotPt_V4: - return Hexagon::LDrid_GP_cdnNotPt_V4; - - case Hexagon::LDrib_GP_cPt_V4: - return Hexagon::LDrib_GP_cdnPt_V4; - - case Hexagon::LDrib_GP_cNotPt_V4: - return Hexagon::LDrib_GP_cdnNotPt_V4; - - case Hexagon::LDriub_GP_cPt_V4: - return Hexagon::LDriub_GP_cdnPt_V4; - - case Hexagon::LDriub_GP_cNotPt_V4: - return Hexagon::LDriub_GP_cdnNotPt_V4; - - case Hexagon::LDrih_GP_cPt_V4: - return Hexagon::LDrih_GP_cdnPt_V4; - - case Hexagon::LDrih_GP_cNotPt_V4: - return Hexagon::LDrih_GP_cdnNotPt_V4; - - case Hexagon::LDriuh_GP_cPt_V4: - return Hexagon::LDriuh_GP_cdnPt_V4; - - case Hexagon::LDriuh_GP_cNotPt_V4: - return Hexagon::LDriuh_GP_cdnNotPt_V4; - - case Hexagon::LDriw_GP_cPt_V4: - return Hexagon::LDriw_GP_cdnPt_V4; - - case Hexagon::LDriw_GP_cNotPt_V4: - return Hexagon::LDriw_GP_cdnNotPt_V4; - - // Conditional store new-value byte - case Hexagon::STrib_cPt_nv_V4 : - return Hexagon::STrib_cdnPt_nv_V4; - case Hexagon::STrib_cNotPt_nv_V4 : - return Hexagon::STrib_cdnNotPt_nv_V4; - - case Hexagon::STrib_indexed_cPt_nv_V4 : - return Hexagon::STrib_indexed_cdnPt_nv_V4; - case Hexagon::STrib_indexed_cNotPt_nv_V4 : - return Hexagon::STrib_indexed_cdnNotPt_nv_V4; - - case Hexagon::STrib_indexed_shl_cPt_nv_V4 : - return Hexagon::STrib_indexed_shl_cdnPt_nv_V4; - case Hexagon::STrib_indexed_shl_cNotPt_nv_V4 : - return Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4; - - case Hexagon::POST_STbri_cPt_nv_V4 : - return Hexagon::POST_STbri_cdnPt_nv_V4; - case Hexagon::POST_STbri_cNotPt_nv_V4 : - return Hexagon::POST_STbri_cdnNotPt_nv_V4; - - case Hexagon::STb_GP_cPt_nv_V4 : - return Hexagon::STb_GP_cdnPt_nv_V4; - - case Hexagon::STb_GP_cNotPt_nv_V4 : - return Hexagon::STb_GP_cdnNotPt_nv_V4; - - case Hexagon::STrib_GP_cPt_nv_V4 : - return Hexagon::STrib_GP_cdnPt_nv_V4; - - case Hexagon::STrib_GP_cNotPt_nv_V4 : - return Hexagon::STrib_GP_cdnNotPt_nv_V4; - - // Conditional store new-value halfword - case Hexagon::STrih_cPt_nv_V4 : - return Hexagon::STrih_cdnPt_nv_V4; - case Hexagon::STrih_cNotPt_nv_V4 : - return Hexagon::STrih_cdnNotPt_nv_V4; - - case Hexagon::STrih_indexed_cPt_nv_V4 : - return Hexagon::STrih_indexed_cdnPt_nv_V4; - case Hexagon::STrih_indexed_cNotPt_nv_V4 : - return Hexagon::STrih_indexed_cdnNotPt_nv_V4; - - case Hexagon::STrih_indexed_shl_cPt_nv_V4 : - return Hexagon::STrih_indexed_shl_cdnPt_nv_V4; - case Hexagon::STrih_indexed_shl_cNotPt_nv_V4 : - return Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4; - - case Hexagon::POST_SThri_cPt_nv_V4 : - return Hexagon::POST_SThri_cdnPt_nv_V4; - case Hexagon::POST_SThri_cNotPt_nv_V4 : - return Hexagon::POST_SThri_cdnNotPt_nv_V4; - - case Hexagon::STh_GP_cPt_nv_V4 : - return Hexagon::STh_GP_cdnPt_nv_V4; - - case Hexagon::STh_GP_cNotPt_nv_V4 : - return Hexagon::STh_GP_cdnNotPt_nv_V4; - - case Hexagon::STrih_GP_cPt_nv_V4 : - return Hexagon::STrih_GP_cdnPt_nv_V4; - - case Hexagon::STrih_GP_cNotPt_nv_V4 : - return Hexagon::STrih_GP_cdnNotPt_nv_V4; - - // Conditional store new-value word - case Hexagon::STriw_cPt_nv_V4 : - return Hexagon::STriw_cdnPt_nv_V4; - case Hexagon::STriw_cNotPt_nv_V4 : - return Hexagon::STriw_cdnNotPt_nv_V4; - - case Hexagon::STriw_indexed_cPt_nv_V4 : - return Hexagon::STriw_indexed_cdnPt_nv_V4; - case Hexagon::STriw_indexed_cNotPt_nv_V4 : - return Hexagon::STriw_indexed_cdnNotPt_nv_V4; - - case Hexagon::STriw_indexed_shl_cPt_nv_V4 : - return Hexagon::STriw_indexed_shl_cdnPt_nv_V4; - case Hexagon::STriw_indexed_shl_cNotPt_nv_V4 : - return Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4; - - case Hexagon::POST_STwri_cPt_nv_V4 : - return Hexagon::POST_STwri_cdnPt_nv_V4; - case Hexagon::POST_STwri_cNotPt_nv_V4: - return Hexagon::POST_STwri_cdnNotPt_nv_V4; - - case Hexagon::STw_GP_cPt_nv_V4 : - return Hexagon::STw_GP_cdnPt_nv_V4; - - case Hexagon::STw_GP_cNotPt_nv_V4 : - return Hexagon::STw_GP_cdnNotPt_nv_V4; - - case Hexagon::STriw_GP_cPt_nv_V4 : - return Hexagon::STriw_GP_cdnPt_nv_V4; - - case Hexagon::STriw_GP_cNotPt_nv_V4 : - return Hexagon::STriw_GP_cdnNotPt_nv_V4; - - // Conditional add - case Hexagon::ADD_ri_cPt : - return Hexagon::ADD_ri_cdnPt; - case Hexagon::ADD_ri_cNotPt : - return Hexagon::ADD_ri_cdnNotPt; - - case Hexagon::ADD_rr_cPt : - return Hexagon::ADD_rr_cdnPt; - case Hexagon::ADD_rr_cNotPt : - return Hexagon::ADD_rr_cdnNotPt; - - // Conditional logical Operations - case Hexagon::XOR_rr_cPt : - return Hexagon::XOR_rr_cdnPt; - case Hexagon::XOR_rr_cNotPt : - return Hexagon::XOR_rr_cdnNotPt; - - case Hexagon::AND_rr_cPt : - return Hexagon::AND_rr_cdnPt; - case Hexagon::AND_rr_cNotPt : - return Hexagon::AND_rr_cdnNotPt; - - case Hexagon::OR_rr_cPt : - return Hexagon::OR_rr_cdnPt; - case Hexagon::OR_rr_cNotPt : - return Hexagon::OR_rr_cdnNotPt; - - // Conditional Subtract - case Hexagon::SUB_rr_cPt : - return Hexagon::SUB_rr_cdnPt; - case Hexagon::SUB_rr_cNotPt : - return Hexagon::SUB_rr_cdnNotPt; - - // Conditional combine - case Hexagon::COMBINE_rr_cPt : - return Hexagon::COMBINE_rr_cdnPt; - case Hexagon::COMBINE_rr_cNotPt : - return Hexagon::COMBINE_rr_cdnNotPt; - - case Hexagon::ASLH_cPt_V4 : - return Hexagon::ASLH_cdnPt_V4; - case Hexagon::ASLH_cNotPt_V4 : - return Hexagon::ASLH_cdnNotPt_V4; - - case Hexagon::ASRH_cPt_V4 : - return Hexagon::ASRH_cdnPt_V4; - case Hexagon::ASRH_cNotPt_V4 : - return Hexagon::ASRH_cdnNotPt_V4; - - case Hexagon::SXTB_cPt_V4 : - return Hexagon::SXTB_cdnPt_V4; - case Hexagon::SXTB_cNotPt_V4 : - return Hexagon::SXTB_cdnNotPt_V4; - - case Hexagon::SXTH_cPt_V4 : - return Hexagon::SXTH_cdnPt_V4; - case Hexagon::SXTH_cNotPt_V4 : - return Hexagon::SXTH_cdnNotPt_V4; - - case Hexagon::ZXTB_cPt_V4 : - return Hexagon::ZXTB_cdnPt_V4; - case Hexagon::ZXTB_cNotPt_V4 : - return Hexagon::ZXTB_cdnNotPt_V4; - - case Hexagon::ZXTH_cPt_V4 : - return Hexagon::ZXTH_cdnPt_V4; - case Hexagon::ZXTH_cNotPt_V4 : - return Hexagon::ZXTH_cdnNotPt_V4; - } -} - -// Returns true if an instruction can be promoted to .new predicate -// or new-value store. -bool HexagonPacketizerList::isNewifiable(MachineInstr* MI) { - if ( isCondInst(MI) || IsNewifyStore(MI)) - return true; - else - return false; -} - -bool HexagonPacketizerList::isCondInst (MachineInstr* MI) { - const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; - const MCInstrDesc& TID = MI->getDesc(); - // bug 5670: until that is fixed, - // this portion is disabled. - if ( TID.isConditionalBranch() // && !IsRegisterJump(MI)) || - || QII->isConditionalTransfer(MI) - || QII->isConditionalALU32(MI) - || QII->isConditionalLoad(MI) - || QII->isConditionalStore(MI)) { - return true; - } - return false; -} - - -// Promote an instructiont to its .new form. -// At this time, we have already made a call to CanPromoteToDotNew -// and made sure that it can *indeed* be promoted. -bool HexagonPacketizerList::PromoteToDotNew(MachineInstr* MI, - SDep::Kind DepType, MachineBasicBlock::iterator &MII, - const TargetRegisterClass* RC) { - - assert (DepType == SDep::Data); - const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; - - int NewOpcode; - if (RC == Hexagon::PredRegsRegisterClass) - NewOpcode = GetDotNewPredOp(MI->getOpcode()); - else - NewOpcode = GetDotNewOp(MI->getOpcode()); - MI->setDesc(QII->get(NewOpcode)); - - return true; -} - -// Returns the most basic instruction for the .new predicated instructions and -// new-value stores. -// For example, all of the following instructions will be converted back to the -// same instruction: -// 1) if (p0.new) memw(R0+#0) = R1.new ---> -// 2) if (p0) memw(R0+#0)= R1.new -------> if (p0) memw(R0+#0) = R1 -// 3) if (p0.new) memw(R0+#0) = R1 ---> -// -// To understand the translation of instruction 1 to its original form, consider -// a packet with 3 instructions. -// { p0 = cmp.eq(R0,R1) -// if (p0.new) R2 = add(R3, R4) -// R5 = add (R3, R1) -// } -// if (p0) memw(R5+#0) = R2 <--- trying to include it in the previous packet -// -// This instruction can be part of the previous packet only if both p0 and R2 -// are promoted to .new values. This promotion happens in steps, first -// predicate register is promoted to .new and in the next iteration R2 is -// promoted. Therefore, in case of dependence check failure (due to R5) during -// next iteration, it should be converted back to its most basic form. - -static int GetDotOldOp(const int opc) { - switch (opc) { - default: llvm_unreachable("Unknown .old type"); - - case Hexagon::TFR_cdnPt: - return Hexagon::TFR_cPt; - - case Hexagon::TFR_cdnNotPt: - return Hexagon::TFR_cNotPt; - - case Hexagon::TFRI_cdnPt: - return Hexagon::TFRI_cPt; - - case Hexagon::TFRI_cdnNotPt: - return Hexagon::TFRI_cNotPt; - - case Hexagon::JMP_cdnPt: - return Hexagon::JMP_c; - - case Hexagon::JMP_cdnNotPt: - return Hexagon::JMP_cNot; - - case Hexagon::JMPR_cdnPt_V3: - return Hexagon::JMPR_cPt; - - case Hexagon::JMPR_cdnNotPt_V3: - return Hexagon::JMPR_cNotPt; - - // Load double word - - case Hexagon::LDrid_cdnPt : - return Hexagon::LDrid_cPt; - - case Hexagon::LDrid_cdnNotPt : - return Hexagon::LDrid_cNotPt; - - case Hexagon::LDrid_indexed_cdnPt : - return Hexagon::LDrid_indexed_cPt; - - case Hexagon::LDrid_indexed_cdnNotPt : - return Hexagon::LDrid_indexed_cNotPt; - - case Hexagon::POST_LDrid_cdnPt_V4 : - return Hexagon::POST_LDrid_cPt; - - case Hexagon::POST_LDrid_cdnNotPt_V4 : - return Hexagon::POST_LDrid_cNotPt; - - // Load word - - case Hexagon::LDriw_cdnPt : - return Hexagon::LDriw_cPt; - - case Hexagon::LDriw_cdnNotPt : - return Hexagon::LDriw_cNotPt; - - case Hexagon::LDriw_indexed_cdnPt : - return Hexagon::LDriw_indexed_cPt; - - case Hexagon::LDriw_indexed_cdnNotPt : - return Hexagon::LDriw_indexed_cNotPt; - - case Hexagon::POST_LDriw_cdnPt_V4 : - return Hexagon::POST_LDriw_cPt; - - case Hexagon::POST_LDriw_cdnNotPt_V4 : - return Hexagon::POST_LDriw_cNotPt; - - // Load half - - case Hexagon::LDrih_cdnPt : - return Hexagon::LDrih_cPt; - - case Hexagon::LDrih_cdnNotPt : - return Hexagon::LDrih_cNotPt; - - case Hexagon::LDrih_indexed_cdnPt : - return Hexagon::LDrih_indexed_cPt; - - case Hexagon::LDrih_indexed_cdnNotPt : - return Hexagon::LDrih_indexed_cNotPt; - - case Hexagon::POST_LDrih_cdnPt_V4 : - return Hexagon::POST_LDrih_cPt; - - case Hexagon::POST_LDrih_cdnNotPt_V4 : - return Hexagon::POST_LDrih_cNotPt; - - // Load byte - - case Hexagon::LDrib_cdnPt : - return Hexagon::LDrib_cPt; - - case Hexagon::LDrib_cdnNotPt : - return Hexagon::LDrib_cNotPt; - - case Hexagon::LDrib_indexed_cdnPt : - return Hexagon::LDrib_indexed_cPt; - - case Hexagon::LDrib_indexed_cdnNotPt : - return Hexagon::LDrib_indexed_cNotPt; - - case Hexagon::POST_LDrib_cdnPt_V4 : - return Hexagon::POST_LDrib_cPt; - - case Hexagon::POST_LDrib_cdnNotPt_V4 : - return Hexagon::POST_LDrib_cNotPt; - - // Load unsigned half - - case Hexagon::LDriuh_cdnPt : - return Hexagon::LDriuh_cPt; - - case Hexagon::LDriuh_cdnNotPt : - return Hexagon::LDriuh_cNotPt; - - case Hexagon::LDriuh_indexed_cdnPt : - return Hexagon::LDriuh_indexed_cPt; - - case Hexagon::LDriuh_indexed_cdnNotPt : - return Hexagon::LDriuh_indexed_cNotPt; - - case Hexagon::POST_LDriuh_cdnPt_V4 : - return Hexagon::POST_LDriuh_cPt; - - case Hexagon::POST_LDriuh_cdnNotPt_V4 : - return Hexagon::POST_LDriuh_cNotPt; - - // Load unsigned byte - case Hexagon::LDriub_cdnPt : - return Hexagon::LDriub_cPt; - - case Hexagon::LDriub_cdnNotPt : - return Hexagon::LDriub_cNotPt; - - case Hexagon::LDriub_indexed_cdnPt : - return Hexagon::LDriub_indexed_cPt; - - case Hexagon::LDriub_indexed_cdnNotPt : - return Hexagon::LDriub_indexed_cNotPt; - - case Hexagon::POST_LDriub_cdnPt_V4 : - return Hexagon::POST_LDriub_cPt; - - case Hexagon::POST_LDriub_cdnNotPt_V4 : - return Hexagon::POST_LDriub_cNotPt; - - // V4 indexed+scaled Load - - case Hexagon::LDrid_indexed_cdnPt_V4 : - return Hexagon::LDrid_indexed_cPt_V4; - - case Hexagon::LDrid_indexed_cdnNotPt_V4 : - return Hexagon::LDrid_indexed_cNotPt_V4; - - case Hexagon::LDrid_indexed_shl_cdnPt_V4 : - return Hexagon::LDrid_indexed_shl_cPt_V4; - - case Hexagon::LDrid_indexed_shl_cdnNotPt_V4 : - return Hexagon::LDrid_indexed_shl_cNotPt_V4; - - case Hexagon::LDrib_indexed_cdnPt_V4 : - return Hexagon::LDrib_indexed_cPt_V4; - - case Hexagon::LDrib_indexed_cdnNotPt_V4 : - return Hexagon::LDrib_indexed_cNotPt_V4; - - case Hexagon::LDrib_indexed_shl_cdnPt_V4 : - return Hexagon::LDrib_indexed_shl_cPt_V4; - - case Hexagon::LDrib_indexed_shl_cdnNotPt_V4 : - return Hexagon::LDrib_indexed_shl_cNotPt_V4; - - case Hexagon::LDriub_indexed_cdnPt_V4 : - return Hexagon::LDriub_indexed_cPt_V4; - - case Hexagon::LDriub_indexed_cdnNotPt_V4 : - return Hexagon::LDriub_indexed_cNotPt_V4; - - case Hexagon::LDriub_indexed_shl_cdnPt_V4 : - return Hexagon::LDriub_indexed_shl_cPt_V4; - - case Hexagon::LDriub_indexed_shl_cdnNotPt_V4 : - return Hexagon::LDriub_indexed_shl_cNotPt_V4; - - case Hexagon::LDrih_indexed_cdnPt_V4 : - return Hexagon::LDrih_indexed_cPt_V4; - - case Hexagon::LDrih_indexed_cdnNotPt_V4 : - return Hexagon::LDrih_indexed_cNotPt_V4; - - case Hexagon::LDrih_indexed_shl_cdnPt_V4 : - return Hexagon::LDrih_indexed_shl_cPt_V4; - - case Hexagon::LDrih_indexed_shl_cdnNotPt_V4 : - return Hexagon::LDrih_indexed_shl_cNotPt_V4; - - case Hexagon::LDriuh_indexed_cdnPt_V4 : - return Hexagon::LDriuh_indexed_cPt_V4; - - case Hexagon::LDriuh_indexed_cdnNotPt_V4 : - return Hexagon::LDriuh_indexed_cNotPt_V4; - - case Hexagon::LDriuh_indexed_shl_cdnPt_V4 : - return Hexagon::LDriuh_indexed_shl_cPt_V4; - - case Hexagon::LDriuh_indexed_shl_cdnNotPt_V4 : - return Hexagon::LDriuh_indexed_shl_cNotPt_V4; - - case Hexagon::LDriw_indexed_cdnPt_V4 : - return Hexagon::LDriw_indexed_cPt_V4; - - case Hexagon::LDriw_indexed_cdnNotPt_V4 : - return Hexagon::LDriw_indexed_cNotPt_V4; - - case Hexagon::LDriw_indexed_shl_cdnPt_V4 : - return Hexagon::LDriw_indexed_shl_cPt_V4; - - case Hexagon::LDriw_indexed_shl_cdnNotPt_V4 : - return Hexagon::LDriw_indexed_shl_cNotPt_V4; - - // V4 global address load - - case Hexagon::LDd_GP_cdnPt_V4: - return Hexagon::LDd_GP_cPt_V4; - - case Hexagon::LDd_GP_cdnNotPt_V4: - return Hexagon::LDd_GP_cNotPt_V4; - - case Hexagon::LDb_GP_cdnPt_V4: - return Hexagon::LDb_GP_cPt_V4; - - case Hexagon::LDb_GP_cdnNotPt_V4: - return Hexagon::LDb_GP_cNotPt_V4; - - case Hexagon::LDub_GP_cdnPt_V4: - return Hexagon::LDub_GP_cPt_V4; - - case Hexagon::LDub_GP_cdnNotPt_V4: - return Hexagon::LDub_GP_cNotPt_V4; - - case Hexagon::LDh_GP_cdnPt_V4: - return Hexagon::LDh_GP_cPt_V4; - - case Hexagon::LDh_GP_cdnNotPt_V4: - return Hexagon::LDh_GP_cNotPt_V4; - - case Hexagon::LDuh_GP_cdnPt_V4: - return Hexagon::LDuh_GP_cPt_V4; - - case Hexagon::LDuh_GP_cdnNotPt_V4: - return Hexagon::LDuh_GP_cNotPt_V4; - - case Hexagon::LDw_GP_cdnPt_V4: - return Hexagon::LDw_GP_cPt_V4; - - case Hexagon::LDw_GP_cdnNotPt_V4: - return Hexagon::LDw_GP_cNotPt_V4; - - case Hexagon::LDrid_GP_cdnPt_V4: - return Hexagon::LDrid_GP_cPt_V4; - - case Hexagon::LDrid_GP_cdnNotPt_V4: - return Hexagon::LDrid_GP_cNotPt_V4; - - case Hexagon::LDrib_GP_cdnPt_V4: - return Hexagon::LDrib_GP_cPt_V4; - - case Hexagon::LDrib_GP_cdnNotPt_V4: - return Hexagon::LDrib_GP_cNotPt_V4; - - case Hexagon::LDriub_GP_cdnPt_V4: - return Hexagon::LDriub_GP_cPt_V4; - - case Hexagon::LDriub_GP_cdnNotPt_V4: - return Hexagon::LDriub_GP_cNotPt_V4; - - case Hexagon::LDrih_GP_cdnPt_V4: - return Hexagon::LDrih_GP_cPt_V4; - - case Hexagon::LDrih_GP_cdnNotPt_V4: - return Hexagon::LDrih_GP_cNotPt_V4; - - case Hexagon::LDriuh_GP_cdnPt_V4: - return Hexagon::LDriuh_GP_cPt_V4; - - case Hexagon::LDriuh_GP_cdnNotPt_V4: - return Hexagon::LDriuh_GP_cNotPt_V4; - - case Hexagon::LDriw_GP_cdnPt_V4: - return Hexagon::LDriw_GP_cPt_V4; - - case Hexagon::LDriw_GP_cdnNotPt_V4: - return Hexagon::LDriw_GP_cNotPt_V4; - - // Conditional add - - case Hexagon::ADD_ri_cdnPt : - return Hexagon::ADD_ri_cPt; - case Hexagon::ADD_ri_cdnNotPt : - return Hexagon::ADD_ri_cNotPt; - - case Hexagon::ADD_rr_cdnPt : - return Hexagon::ADD_rr_cPt; - case Hexagon::ADD_rr_cdnNotPt: - return Hexagon::ADD_rr_cNotPt; - - // Conditional logical Operations - - case Hexagon::XOR_rr_cdnPt : - return Hexagon::XOR_rr_cPt; - case Hexagon::XOR_rr_cdnNotPt : - return Hexagon::XOR_rr_cNotPt; - - case Hexagon::AND_rr_cdnPt : - return Hexagon::AND_rr_cPt; - case Hexagon::AND_rr_cdnNotPt : - return Hexagon::AND_rr_cNotPt; - - case Hexagon::OR_rr_cdnPt : - return Hexagon::OR_rr_cPt; - case Hexagon::OR_rr_cdnNotPt : - return Hexagon::OR_rr_cNotPt; - - // Conditional Subtract - - case Hexagon::SUB_rr_cdnPt : - return Hexagon::SUB_rr_cPt; - case Hexagon::SUB_rr_cdnNotPt : - return Hexagon::SUB_rr_cNotPt; - - // Conditional combine - - case Hexagon::COMBINE_rr_cdnPt : - return Hexagon::COMBINE_rr_cPt; - case Hexagon::COMBINE_rr_cdnNotPt : - return Hexagon::COMBINE_rr_cNotPt; - -// Conditional shift operations - - case Hexagon::ASLH_cdnPt_V4 : - return Hexagon::ASLH_cPt_V4; - case Hexagon::ASLH_cdnNotPt_V4 : - return Hexagon::ASLH_cNotPt_V4; - - case Hexagon::ASRH_cdnPt_V4 : - return Hexagon::ASRH_cPt_V4; - case Hexagon::ASRH_cdnNotPt_V4 : - return Hexagon::ASRH_cNotPt_V4; - - case Hexagon::SXTB_cdnPt_V4 : - return Hexagon::SXTB_cPt_V4; - case Hexagon::SXTB_cdnNotPt_V4 : - return Hexagon::SXTB_cNotPt_V4; - - case Hexagon::SXTH_cdnPt_V4 : - return Hexagon::SXTH_cPt_V4; - case Hexagon::SXTH_cdnNotPt_V4 : - return Hexagon::SXTH_cNotPt_V4; - - case Hexagon::ZXTB_cdnPt_V4 : - return Hexagon::ZXTB_cPt_V4; - case Hexagon::ZXTB_cdnNotPt_V4 : - return Hexagon::ZXTB_cNotPt_V4; - - case Hexagon::ZXTH_cdnPt_V4 : - return Hexagon::ZXTH_cPt_V4; - case Hexagon::ZXTH_cdnNotPt_V4 : - return Hexagon::ZXTH_cNotPt_V4; - - // Store byte - - case Hexagon::STrib_imm_cdnPt_V4 : - return Hexagon::STrib_imm_cPt_V4; - - case Hexagon::STrib_imm_cdnNotPt_V4 : - return Hexagon::STrib_imm_cNotPt_V4; - - case Hexagon::STrib_cdnPt_nv_V4 : - case Hexagon::STrib_cPt_nv_V4 : - case Hexagon::STrib_cdnPt_V4 : - return Hexagon::STrib_cPt; - - case Hexagon::STrib_cdnNotPt_nv_V4 : - case Hexagon::STrib_cNotPt_nv_V4 : - case Hexagon::STrib_cdnNotPt_V4 : - return Hexagon::STrib_cNotPt; - - case Hexagon::STrib_indexed_cdnPt_V4 : - case Hexagon::STrib_indexed_cPt_nv_V4 : - case Hexagon::STrib_indexed_cdnPt_nv_V4 : - return Hexagon::STrib_indexed_cPt; - - case Hexagon::STrib_indexed_cdnNotPt_V4 : - case Hexagon::STrib_indexed_cNotPt_nv_V4 : - case Hexagon::STrib_indexed_cdnNotPt_nv_V4 : - return Hexagon::STrib_indexed_cNotPt; - - case Hexagon::STrib_indexed_shl_cdnPt_nv_V4: - case Hexagon::STrib_indexed_shl_cPt_nv_V4 : - case Hexagon::STrib_indexed_shl_cdnPt_V4 : - return Hexagon::STrib_indexed_shl_cPt_V4; - - case Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4: - case Hexagon::STrib_indexed_shl_cNotPt_nv_V4 : - case Hexagon::STrib_indexed_shl_cdnNotPt_V4 : - return Hexagon::STrib_indexed_shl_cNotPt_V4; - - case Hexagon::POST_STbri_cdnPt_nv_V4 : - case Hexagon::POST_STbri_cPt_nv_V4 : - case Hexagon::POST_STbri_cdnPt_V4 : - return Hexagon::POST_STbri_cPt; - - case Hexagon::POST_STbri_cdnNotPt_nv_V4 : - case Hexagon::POST_STbri_cNotPt_nv_V4: - case Hexagon::POST_STbri_cdnNotPt_V4 : - return Hexagon::POST_STbri_cNotPt; - - case Hexagon::STb_GP_cdnPt_nv_V4: - case Hexagon::STb_GP_cdnPt_V4: - case Hexagon::STb_GP_cPt_nv_V4: - return Hexagon::STb_GP_cPt_V4; - - case Hexagon::STb_GP_cdnNotPt_nv_V4: - case Hexagon::STb_GP_cdnNotPt_V4: - case Hexagon::STb_GP_cNotPt_nv_V4: - return Hexagon::STb_GP_cNotPt_V4; - - case Hexagon::STrib_GP_cdnPt_nv_V4: - case Hexagon::STrib_GP_cdnPt_V4: - case Hexagon::STrib_GP_cPt_nv_V4: - return Hexagon::STrib_GP_cPt_V4; - - case Hexagon::STrib_GP_cdnNotPt_nv_V4: - case Hexagon::STrib_GP_cdnNotPt_V4: - case Hexagon::STrib_GP_cNotPt_nv_V4: - return Hexagon::STrib_GP_cNotPt_V4; - - // Store new-value byte - unconditional - case Hexagon::STrib_nv_V4: - return Hexagon::STrib; - - case Hexagon::STrib_indexed_nv_V4: - return Hexagon::STrib_indexed; - - case Hexagon::STrib_indexed_shl_nv_V4: - return Hexagon::STrib_indexed_shl_V4; - - case Hexagon::STrib_shl_nv_V4: - return Hexagon::STrib_shl_V4; - - case Hexagon::STrib_GP_nv_V4: - return Hexagon::STrib_GP_V4; - - case Hexagon::STb_GP_nv_V4: - return Hexagon::STb_GP_V4; - - case Hexagon::POST_STbri_nv_V4: - return Hexagon::POST_STbri; - - // Store halfword - case Hexagon::STrih_imm_cdnPt_V4 : - return Hexagon::STrih_imm_cPt_V4; - - case Hexagon::STrih_imm_cdnNotPt_V4 : - return Hexagon::STrih_imm_cNotPt_V4; - - case Hexagon::STrih_cdnPt_nv_V4 : - case Hexagon::STrih_cPt_nv_V4 : - case Hexagon::STrih_cdnPt_V4 : - return Hexagon::STrih_cPt; - - case Hexagon::STrih_cdnNotPt_nv_V4 : - case Hexagon::STrih_cNotPt_nv_V4 : - case Hexagon::STrih_cdnNotPt_V4 : - return Hexagon::STrih_cNotPt; - - case Hexagon::STrih_indexed_cdnPt_nv_V4: - case Hexagon::STrih_indexed_cPt_nv_V4 : - case Hexagon::STrih_indexed_cdnPt_V4 : - return Hexagon::STrih_indexed_cPt; - - case Hexagon::STrih_indexed_cdnNotPt_nv_V4: - case Hexagon::STrih_indexed_cNotPt_nv_V4 : - case Hexagon::STrih_indexed_cdnNotPt_V4 : - return Hexagon::STrih_indexed_cNotPt; - - case Hexagon::STrih_indexed_shl_cdnPt_nv_V4 : - case Hexagon::STrih_indexed_shl_cPt_nv_V4 : - case Hexagon::STrih_indexed_shl_cdnPt_V4 : - return Hexagon::STrih_indexed_shl_cPt_V4; - - case Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4 : - case Hexagon::STrih_indexed_shl_cNotPt_nv_V4 : - case Hexagon::STrih_indexed_shl_cdnNotPt_V4 : - return Hexagon::STrih_indexed_shl_cNotPt_V4; - - case Hexagon::POST_SThri_cdnPt_nv_V4 : - case Hexagon::POST_SThri_cPt_nv_V4 : - case Hexagon::POST_SThri_cdnPt_V4 : - return Hexagon::POST_SThri_cPt; - - case Hexagon::POST_SThri_cdnNotPt_nv_V4 : - case Hexagon::POST_SThri_cNotPt_nv_V4 : - case Hexagon::POST_SThri_cdnNotPt_V4 : - return Hexagon::POST_SThri_cNotPt; - - case Hexagon::STh_GP_cdnPt_nv_V4: - case Hexagon::STh_GP_cdnPt_V4: - case Hexagon::STh_GP_cPt_nv_V4: - return Hexagon::STh_GP_cPt_V4; - - case Hexagon::STh_GP_cdnNotPt_nv_V4: - case Hexagon::STh_GP_cdnNotPt_V4: - case Hexagon::STh_GP_cNotPt_nv_V4: - return Hexagon::STh_GP_cNotPt_V4; - - case Hexagon::STrih_GP_cdnPt_nv_V4: - case Hexagon::STrih_GP_cdnPt_V4: - case Hexagon::STrih_GP_cPt_nv_V4: - return Hexagon::STrih_GP_cPt_V4; - - case Hexagon::STrih_GP_cdnNotPt_nv_V4: - case Hexagon::STrih_GP_cdnNotPt_V4: - case Hexagon::STrih_GP_cNotPt_nv_V4: - return Hexagon::STrih_GP_cNotPt_V4; - - // Store new-value halfword - unconditional - - case Hexagon::STrih_nv_V4: - return Hexagon::STrih; - - case Hexagon::STrih_indexed_nv_V4: - return Hexagon::STrih_indexed; - - case Hexagon::STrih_indexed_shl_nv_V4: - return Hexagon::STrih_indexed_shl_V4; - - case Hexagon::STrih_shl_nv_V4: - return Hexagon::STrih_shl_V4; - - case Hexagon::STrih_GP_nv_V4: - return Hexagon::STrih_GP_V4; - - case Hexagon::STh_GP_nv_V4: - return Hexagon::STh_GP_V4; - - case Hexagon::POST_SThri_nv_V4: - return Hexagon::POST_SThri; - - // Store word - - case Hexagon::STriw_imm_cdnPt_V4 : - return Hexagon::STriw_imm_cPt_V4; - - case Hexagon::STriw_imm_cdnNotPt_V4 : - return Hexagon::STriw_imm_cNotPt_V4; - - case Hexagon::STriw_cdnPt_nv_V4 : - case Hexagon::STriw_cPt_nv_V4 : - case Hexagon::STriw_cdnPt_V4 : - return Hexagon::STriw_cPt; - - case Hexagon::STriw_cdnNotPt_nv_V4 : - case Hexagon::STriw_cNotPt_nv_V4 : - case Hexagon::STriw_cdnNotPt_V4 : - return Hexagon::STriw_cNotPt; - - case Hexagon::STriw_indexed_cdnPt_nv_V4 : - case Hexagon::STriw_indexed_cPt_nv_V4 : - case Hexagon::STriw_indexed_cdnPt_V4 : - return Hexagon::STriw_indexed_cPt; - - case Hexagon::STriw_indexed_cdnNotPt_nv_V4 : - case Hexagon::STriw_indexed_cNotPt_nv_V4 : - case Hexagon::STriw_indexed_cdnNotPt_V4 : - return Hexagon::STriw_indexed_cNotPt; - - case Hexagon::STriw_indexed_shl_cdnPt_nv_V4 : - case Hexagon::STriw_indexed_shl_cPt_nv_V4 : - case Hexagon::STriw_indexed_shl_cdnPt_V4 : - return Hexagon::STriw_indexed_shl_cPt_V4; - - case Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4 : - case Hexagon::STriw_indexed_shl_cNotPt_nv_V4 : - case Hexagon::STriw_indexed_shl_cdnNotPt_V4 : - return Hexagon::STriw_indexed_shl_cNotPt_V4; - - case Hexagon::POST_STwri_cdnPt_nv_V4 : - case Hexagon::POST_STwri_cPt_nv_V4 : - case Hexagon::POST_STwri_cdnPt_V4 : - return Hexagon::POST_STwri_cPt; - - case Hexagon::POST_STwri_cdnNotPt_nv_V4 : - case Hexagon::POST_STwri_cNotPt_nv_V4 : - case Hexagon::POST_STwri_cdnNotPt_V4 : - return Hexagon::POST_STwri_cNotPt; - - case Hexagon::STw_GP_cdnPt_nv_V4: - case Hexagon::STw_GP_cdnPt_V4: - case Hexagon::STw_GP_cPt_nv_V4: - return Hexagon::STw_GP_cPt_V4; - - case Hexagon::STw_GP_cdnNotPt_nv_V4: - case Hexagon::STw_GP_cdnNotPt_V4: - case Hexagon::STw_GP_cNotPt_nv_V4: - return Hexagon::STw_GP_cNotPt_V4; - - case Hexagon::STriw_GP_cdnPt_nv_V4: - case Hexagon::STriw_GP_cdnPt_V4: - case Hexagon::STriw_GP_cPt_nv_V4: - return Hexagon::STriw_GP_cPt_V4; - - case Hexagon::STriw_GP_cdnNotPt_nv_V4: - case Hexagon::STriw_GP_cdnNotPt_V4: - case Hexagon::STriw_GP_cNotPt_nv_V4: - return Hexagon::STriw_GP_cNotPt_V4; - - // Store new-value word - unconditional - - case Hexagon::STriw_nv_V4: - return Hexagon::STriw; - - case Hexagon::STriw_indexed_nv_V4: - return Hexagon::STriw_indexed; - - case Hexagon::STriw_indexed_shl_nv_V4: - return Hexagon::STriw_indexed_shl_V4; - - case Hexagon::STriw_shl_nv_V4: - return Hexagon::STriw_shl_V4; - - case Hexagon::STriw_GP_nv_V4: - return Hexagon::STriw_GP_V4; - - case Hexagon::STw_GP_nv_V4: - return Hexagon::STw_GP_V4; - - case Hexagon::POST_STwri_nv_V4: - return Hexagon::POST_STwri; - - // Store doubleword - - case Hexagon::STrid_cdnPt_V4 : - return Hexagon::STrid_cPt; - - case Hexagon::STrid_cdnNotPt_V4 : - return Hexagon::STrid_cNotPt; - - case Hexagon::STrid_indexed_cdnPt_V4 : - return Hexagon::STrid_indexed_cPt; - - case Hexagon::STrid_indexed_cdnNotPt_V4 : - return Hexagon::STrid_indexed_cNotPt; - - case Hexagon::STrid_indexed_shl_cdnPt_V4 : - return Hexagon::STrid_indexed_shl_cPt_V4; - - case Hexagon::STrid_indexed_shl_cdnNotPt_V4 : - return Hexagon::STrid_indexed_shl_cNotPt_V4; - - case Hexagon::POST_STdri_cdnPt_V4 : - return Hexagon::POST_STdri_cPt; - - case Hexagon::POST_STdri_cdnNotPt_V4 : - return Hexagon::POST_STdri_cNotPt; - - case Hexagon::STd_GP_cdnPt_V4 : - return Hexagon::STd_GP_cPt_V4; - - case Hexagon::STd_GP_cdnNotPt_V4 : - return Hexagon::STd_GP_cNotPt_V4; - - case Hexagon::STrid_GP_cdnPt_V4 : - return Hexagon::STrid_GP_cPt_V4; - - case Hexagon::STrid_GP_cdnNotPt_V4 : - return Hexagon::STrid_GP_cNotPt_V4; - } -} - -bool HexagonPacketizerList::DemoteToDotOld(MachineInstr* MI) { - const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; - int NewOpcode = GetDotOldOp(MI->getOpcode()); - MI->setDesc(QII->get(NewOpcode)); - return true; -} - -// Returns true if an instruction is predicated on p0 and false if it's -// predicated on !p0. - -static bool GetPredicateSense(MachineInstr* MI, - const HexagonInstrInfo *QII) { - - switch (MI->getOpcode()) { - case Hexagon::TFR_cPt: - case Hexagon::TFR_cdnPt: - case Hexagon::TFRI_cPt: - case Hexagon::TFRI_cdnPt: - case Hexagon::STrib_cPt : - case Hexagon::STrib_cdnPt_V4 : - case Hexagon::STrib_indexed_cPt : - case Hexagon::STrib_indexed_cdnPt_V4 : - case Hexagon::STrib_indexed_shl_cPt_V4 : - case Hexagon::STrib_indexed_shl_cdnPt_V4 : - case Hexagon::POST_STbri_cPt : - case Hexagon::POST_STbri_cdnPt_V4 : - case Hexagon::STrih_cPt : - case Hexagon::STrih_cdnPt_V4 : - case Hexagon::STrih_indexed_cPt : - case Hexagon::STrih_indexed_cdnPt_V4 : - case Hexagon::STrih_indexed_shl_cPt_V4 : - case Hexagon::STrih_indexed_shl_cdnPt_V4 : - case Hexagon::POST_SThri_cPt : - case Hexagon::POST_SThri_cdnPt_V4 : - case Hexagon::STriw_cPt : - case Hexagon::STriw_cdnPt_V4 : - case Hexagon::STriw_indexed_cPt : - case Hexagon::STriw_indexed_cdnPt_V4 : - case Hexagon::STriw_indexed_shl_cPt_V4 : - case Hexagon::STriw_indexed_shl_cdnPt_V4 : - case Hexagon::POST_STwri_cPt : - case Hexagon::POST_STwri_cdnPt_V4 : - case Hexagon::STrib_imm_cPt_V4 : - case Hexagon::STrib_imm_cdnPt_V4 : - case Hexagon::STrid_cPt : - case Hexagon::STrid_cdnPt_V4 : - case Hexagon::STrid_indexed_cPt : - case Hexagon::STrid_indexed_cdnPt_V4 : - case Hexagon::STrid_indexed_shl_cPt_V4 : - case Hexagon::STrid_indexed_shl_cdnPt_V4 : - case Hexagon::POST_STdri_cPt : - case Hexagon::POST_STdri_cdnPt_V4 : - case Hexagon::STrih_imm_cPt_V4 : - case Hexagon::STrih_imm_cdnPt_V4 : - case Hexagon::STriw_imm_cPt_V4 : - case Hexagon::STriw_imm_cdnPt_V4 : - case Hexagon::JMP_cdnPt : - case Hexagon::LDrid_cPt : - case Hexagon::LDrid_cdnPt : - case Hexagon::LDrid_indexed_cPt : - case Hexagon::LDrid_indexed_cdnPt : - case Hexagon::POST_LDrid_cPt : - case Hexagon::POST_LDrid_cdnPt_V4 : - case Hexagon::LDriw_cPt : - case Hexagon::LDriw_cdnPt : - case Hexagon::LDriw_indexed_cPt : - case Hexagon::LDriw_indexed_cdnPt : - case Hexagon::POST_LDriw_cPt : - case Hexagon::POST_LDriw_cdnPt_V4 : - case Hexagon::LDrih_cPt : - case Hexagon::LDrih_cdnPt : - case Hexagon::LDrih_indexed_cPt : - case Hexagon::LDrih_indexed_cdnPt : - case Hexagon::POST_LDrih_cPt : - case Hexagon::POST_LDrih_cdnPt_V4 : - case Hexagon::LDrib_cPt : - case Hexagon::LDrib_cdnPt : - case Hexagon::LDrib_indexed_cPt : - case Hexagon::LDrib_indexed_cdnPt : - case Hexagon::POST_LDrib_cPt : - case Hexagon::POST_LDrib_cdnPt_V4 : - case Hexagon::LDriuh_cPt : - case Hexagon::LDriuh_cdnPt : - case Hexagon::LDriuh_indexed_cPt : - case Hexagon::LDriuh_indexed_cdnPt : - case Hexagon::POST_LDriuh_cPt : - case Hexagon::POST_LDriuh_cdnPt_V4 : - case Hexagon::LDriub_cPt : - case Hexagon::LDriub_cdnPt : - case Hexagon::LDriub_indexed_cPt : - case Hexagon::LDriub_indexed_cdnPt : - case Hexagon::POST_LDriub_cPt : - case Hexagon::POST_LDriub_cdnPt_V4 : - case Hexagon::LDrid_indexed_cPt_V4 : - case Hexagon::LDrid_indexed_cdnPt_V4 : - case Hexagon::LDrid_indexed_shl_cPt_V4 : - case Hexagon::LDrid_indexed_shl_cdnPt_V4 : - case Hexagon::LDrib_indexed_cPt_V4 : - case Hexagon::LDrib_indexed_cdnPt_V4 : - case Hexagon::LDrib_indexed_shl_cPt_V4 : - case Hexagon::LDrib_indexed_shl_cdnPt_V4 : - case Hexagon::LDriub_indexed_cPt_V4 : - case Hexagon::LDriub_indexed_cdnPt_V4 : - case Hexagon::LDriub_indexed_shl_cPt_V4 : - case Hexagon::LDriub_indexed_shl_cdnPt_V4 : - case Hexagon::LDrih_indexed_cPt_V4 : - case Hexagon::LDrih_indexed_cdnPt_V4 : - case Hexagon::LDrih_indexed_shl_cPt_V4 : - case Hexagon::LDrih_indexed_shl_cdnPt_V4 : - case Hexagon::LDriuh_indexed_cPt_V4 : - case Hexagon::LDriuh_indexed_cdnPt_V4 : - case Hexagon::LDriuh_indexed_shl_cPt_V4 : - case Hexagon::LDriuh_indexed_shl_cdnPt_V4 : - case Hexagon::LDriw_indexed_cPt_V4 : - case Hexagon::LDriw_indexed_cdnPt_V4 : - case Hexagon::LDriw_indexed_shl_cPt_V4 : - case Hexagon::LDriw_indexed_shl_cdnPt_V4 : - case Hexagon::ADD_ri_cPt : - case Hexagon::ADD_ri_cdnPt : - case Hexagon::ADD_rr_cPt : - case Hexagon::ADD_rr_cdnPt : - case Hexagon::XOR_rr_cPt : - case Hexagon::XOR_rr_cdnPt : - case Hexagon::AND_rr_cPt : - case Hexagon::AND_rr_cdnPt : - case Hexagon::OR_rr_cPt : - case Hexagon::OR_rr_cdnPt : - case Hexagon::SUB_rr_cPt : - case Hexagon::SUB_rr_cdnPt : - case Hexagon::COMBINE_rr_cPt : - case Hexagon::COMBINE_rr_cdnPt : - case Hexagon::ASLH_cPt_V4 : - case Hexagon::ASLH_cdnPt_V4 : - case Hexagon::ASRH_cPt_V4 : - case Hexagon::ASRH_cdnPt_V4 : - case Hexagon::SXTB_cPt_V4 : - case Hexagon::SXTB_cdnPt_V4 : - case Hexagon::SXTH_cPt_V4 : - case Hexagon::SXTH_cdnPt_V4 : - case Hexagon::ZXTB_cPt_V4 : - case Hexagon::ZXTB_cdnPt_V4 : - case Hexagon::ZXTH_cPt_V4 : - case Hexagon::ZXTH_cdnPt_V4 : - case Hexagon::LDrid_GP_cPt_V4 : - case Hexagon::LDrib_GP_cPt_V4 : - case Hexagon::LDriub_GP_cPt_V4 : - case Hexagon::LDrih_GP_cPt_V4 : - case Hexagon::LDriuh_GP_cPt_V4 : - case Hexagon::LDriw_GP_cPt_V4 : - case Hexagon::LDd_GP_cPt_V4 : - case Hexagon::LDb_GP_cPt_V4 : - case Hexagon::LDub_GP_cPt_V4 : - case Hexagon::LDh_GP_cPt_V4 : - case Hexagon::LDuh_GP_cPt_V4 : - case Hexagon::LDw_GP_cPt_V4 : - case Hexagon::STrid_GP_cPt_V4 : - case Hexagon::STrib_GP_cPt_V4 : - case Hexagon::STrih_GP_cPt_V4 : - case Hexagon::STriw_GP_cPt_V4 : - case Hexagon::STd_GP_cPt_V4 : - case Hexagon::STb_GP_cPt_V4 : - case Hexagon::STh_GP_cPt_V4 : - case Hexagon::STw_GP_cPt_V4 : - case Hexagon::LDrid_GP_cdnPt_V4 : - case Hexagon::LDrib_GP_cdnPt_V4 : - case Hexagon::LDriub_GP_cdnPt_V4 : - case Hexagon::LDrih_GP_cdnPt_V4 : - case Hexagon::LDriuh_GP_cdnPt_V4 : - case Hexagon::LDriw_GP_cdnPt_V4 : - case Hexagon::LDd_GP_cdnPt_V4 : - case Hexagon::LDb_GP_cdnPt_V4 : - case Hexagon::LDub_GP_cdnPt_V4 : - case Hexagon::LDh_GP_cdnPt_V4 : - case Hexagon::LDuh_GP_cdnPt_V4 : - case Hexagon::LDw_GP_cdnPt_V4 : - case Hexagon::STrid_GP_cdnPt_V4 : - case Hexagon::STrib_GP_cdnPt_V4 : - case Hexagon::STrih_GP_cdnPt_V4 : - case Hexagon::STriw_GP_cdnPt_V4 : - case Hexagon::STd_GP_cdnPt_V4 : - case Hexagon::STb_GP_cdnPt_V4 : - case Hexagon::STh_GP_cdnPt_V4 : - case Hexagon::STw_GP_cdnPt_V4 : - return true; - - case Hexagon::TFR_cNotPt: - case Hexagon::TFR_cdnNotPt: - case Hexagon::TFRI_cNotPt: - case Hexagon::TFRI_cdnNotPt: - case Hexagon::STrib_cNotPt : - case Hexagon::STrib_cdnNotPt_V4 : - case Hexagon::STrib_indexed_cNotPt : - case Hexagon::STrib_indexed_cdnNotPt_V4 : - case Hexagon::STrib_indexed_shl_cNotPt_V4 : - case Hexagon::STrib_indexed_shl_cdnNotPt_V4 : - case Hexagon::POST_STbri_cNotPt : - case Hexagon::POST_STbri_cdnNotPt_V4 : - case Hexagon::STrih_cNotPt : - case Hexagon::STrih_cdnNotPt_V4 : - case Hexagon::STrih_indexed_cNotPt : - case Hexagon::STrih_indexed_cdnNotPt_V4 : - case Hexagon::STrih_indexed_shl_cNotPt_V4 : - case Hexagon::STrih_indexed_shl_cdnNotPt_V4 : - case Hexagon::POST_SThri_cNotPt : - case Hexagon::POST_SThri_cdnNotPt_V4 : - case Hexagon::STriw_cNotPt : - case Hexagon::STriw_cdnNotPt_V4 : - case Hexagon::STriw_indexed_cNotPt : - case Hexagon::STriw_indexed_cdnNotPt_V4 : - case Hexagon::STriw_indexed_shl_cNotPt_V4 : - case Hexagon::STriw_indexed_shl_cdnNotPt_V4 : - case Hexagon::POST_STwri_cNotPt : - case Hexagon::POST_STwri_cdnNotPt_V4 : - case Hexagon::STrib_imm_cNotPt_V4 : - case Hexagon::STrib_imm_cdnNotPt_V4 : - case Hexagon::STrid_cNotPt : - case Hexagon::STrid_cdnNotPt_V4 : - case Hexagon::STrid_indexed_cdnNotPt_V4 : - case Hexagon::STrid_indexed_cNotPt : - case Hexagon::STrid_indexed_shl_cNotPt_V4 : - case Hexagon::STrid_indexed_shl_cdnNotPt_V4 : - case Hexagon::POST_STdri_cNotPt : - case Hexagon::POST_STdri_cdnNotPt_V4 : - case Hexagon::STrih_imm_cNotPt_V4 : - case Hexagon::STrih_imm_cdnNotPt_V4 : - case Hexagon::STriw_imm_cNotPt_V4 : - case Hexagon::STriw_imm_cdnNotPt_V4 : - case Hexagon::JMP_cdnNotPt : - case Hexagon::LDrid_cNotPt : - case Hexagon::LDrid_cdnNotPt : - case Hexagon::LDrid_indexed_cNotPt : - case Hexagon::LDrid_indexed_cdnNotPt : - case Hexagon::POST_LDrid_cNotPt : - case Hexagon::POST_LDrid_cdnNotPt_V4 : - case Hexagon::LDriw_cNotPt : - case Hexagon::LDriw_cdnNotPt : - case Hexagon::LDriw_indexed_cNotPt : - case Hexagon::LDriw_indexed_cdnNotPt : - case Hexagon::POST_LDriw_cNotPt : - case Hexagon::POST_LDriw_cdnNotPt_V4 : - case Hexagon::LDrih_cNotPt : - case Hexagon::LDrih_cdnNotPt : - case Hexagon::LDrih_indexed_cNotPt : - case Hexagon::LDrih_indexed_cdnNotPt : - case Hexagon::POST_LDrih_cNotPt : - case Hexagon::POST_LDrih_cdnNotPt_V4 : - case Hexagon::LDrib_cNotPt : - case Hexagon::LDrib_cdnNotPt : - case Hexagon::LDrib_indexed_cNotPt : - case Hexagon::LDrib_indexed_cdnNotPt : - case Hexagon::POST_LDrib_cNotPt : - case Hexagon::POST_LDrib_cdnNotPt_V4 : - case Hexagon::LDriuh_cNotPt : - case Hexagon::LDriuh_cdnNotPt : - case Hexagon::LDriuh_indexed_cNotPt : - case Hexagon::LDriuh_indexed_cdnNotPt : - case Hexagon::POST_LDriuh_cNotPt : - case Hexagon::POST_LDriuh_cdnNotPt_V4 : - case Hexagon::LDriub_cNotPt : - case Hexagon::LDriub_cdnNotPt : - case Hexagon::LDriub_indexed_cNotPt : - case Hexagon::LDriub_indexed_cdnNotPt : - case Hexagon::POST_LDriub_cNotPt : - case Hexagon::POST_LDriub_cdnNotPt_V4 : - case Hexagon::LDrid_indexed_cNotPt_V4 : - case Hexagon::LDrid_indexed_cdnNotPt_V4 : - case Hexagon::LDrid_indexed_shl_cNotPt_V4 : - case Hexagon::LDrid_indexed_shl_cdnNotPt_V4 : - case Hexagon::LDrib_indexed_cNotPt_V4 : - case Hexagon::LDrib_indexed_cdnNotPt_V4 : - case Hexagon::LDrib_indexed_shl_cNotPt_V4 : - case Hexagon::LDrib_indexed_shl_cdnNotPt_V4 : - case Hexagon::LDriub_indexed_cNotPt_V4 : - case Hexagon::LDriub_indexed_cdnNotPt_V4 : - case Hexagon::LDriub_indexed_shl_cNotPt_V4 : - case Hexagon::LDriub_indexed_shl_cdnNotPt_V4 : - case Hexagon::LDrih_indexed_cNotPt_V4 : - case Hexagon::LDrih_indexed_cdnNotPt_V4 : - case Hexagon::LDrih_indexed_shl_cNotPt_V4 : - case Hexagon::LDrih_indexed_shl_cdnNotPt_V4 : - case Hexagon::LDriuh_indexed_cNotPt_V4 : - case Hexagon::LDriuh_indexed_cdnNotPt_V4 : - case Hexagon::LDriuh_indexed_shl_cNotPt_V4 : - case Hexagon::LDriuh_indexed_shl_cdnNotPt_V4 : - case Hexagon::LDriw_indexed_cNotPt_V4 : - case Hexagon::LDriw_indexed_cdnNotPt_V4 : - case Hexagon::LDriw_indexed_shl_cNotPt_V4 : - case Hexagon::LDriw_indexed_shl_cdnNotPt_V4 : - case Hexagon::ADD_ri_cNotPt : - case Hexagon::ADD_ri_cdnNotPt : - case Hexagon::ADD_rr_cNotPt : - case Hexagon::ADD_rr_cdnNotPt : - case Hexagon::XOR_rr_cNotPt : - case Hexagon::XOR_rr_cdnNotPt : - case Hexagon::AND_rr_cNotPt : - case Hexagon::AND_rr_cdnNotPt : - case Hexagon::OR_rr_cNotPt : - case Hexagon::OR_rr_cdnNotPt : - case Hexagon::SUB_rr_cNotPt : - case Hexagon::SUB_rr_cdnNotPt : - case Hexagon::COMBINE_rr_cNotPt : - case Hexagon::COMBINE_rr_cdnNotPt : - case Hexagon::ASLH_cNotPt_V4 : - case Hexagon::ASLH_cdnNotPt_V4 : - case Hexagon::ASRH_cNotPt_V4 : - case Hexagon::ASRH_cdnNotPt_V4 : - case Hexagon::SXTB_cNotPt_V4 : - case Hexagon::SXTB_cdnNotPt_V4 : - case Hexagon::SXTH_cNotPt_V4 : - case Hexagon::SXTH_cdnNotPt_V4 : - case Hexagon::ZXTB_cNotPt_V4 : - case Hexagon::ZXTB_cdnNotPt_V4 : - case Hexagon::ZXTH_cNotPt_V4 : - case Hexagon::ZXTH_cdnNotPt_V4 : - - case Hexagon::LDrid_GP_cNotPt_V4 : - case Hexagon::LDrib_GP_cNotPt_V4 : - case Hexagon::LDriub_GP_cNotPt_V4 : - case Hexagon::LDrih_GP_cNotPt_V4 : - case Hexagon::LDriuh_GP_cNotPt_V4 : - case Hexagon::LDriw_GP_cNotPt_V4 : - case Hexagon::LDd_GP_cNotPt_V4 : - case Hexagon::LDb_GP_cNotPt_V4 : - case Hexagon::LDub_GP_cNotPt_V4 : - case Hexagon::LDh_GP_cNotPt_V4 : - case Hexagon::LDuh_GP_cNotPt_V4 : - case Hexagon::LDw_GP_cNotPt_V4 : - case Hexagon::STrid_GP_cNotPt_V4 : - case Hexagon::STrib_GP_cNotPt_V4 : - case Hexagon::STrih_GP_cNotPt_V4 : - case Hexagon::STriw_GP_cNotPt_V4 : - case Hexagon::STd_GP_cNotPt_V4 : - case Hexagon::STb_GP_cNotPt_V4 : - case Hexagon::STh_GP_cNotPt_V4 : - case Hexagon::STw_GP_cNotPt_V4 : - case Hexagon::LDrid_GP_cdnNotPt_V4 : - case Hexagon::LDrib_GP_cdnNotPt_V4 : - case Hexagon::LDriub_GP_cdnNotPt_V4 : - case Hexagon::LDrih_GP_cdnNotPt_V4 : - case Hexagon::LDriuh_GP_cdnNotPt_V4 : - case Hexagon::LDriw_GP_cdnNotPt_V4 : - case Hexagon::LDd_GP_cdnNotPt_V4 : - case Hexagon::LDb_GP_cdnNotPt_V4 : - case Hexagon::LDub_GP_cdnNotPt_V4 : - case Hexagon::LDh_GP_cdnNotPt_V4 : - case Hexagon::LDuh_GP_cdnNotPt_V4 : - case Hexagon::LDw_GP_cdnNotPt_V4 : - case Hexagon::STrid_GP_cdnNotPt_V4 : - case Hexagon::STrib_GP_cdnNotPt_V4 : - case Hexagon::STrih_GP_cdnNotPt_V4 : - case Hexagon::STriw_GP_cdnNotPt_V4 : - case Hexagon::STd_GP_cdnNotPt_V4 : - case Hexagon::STb_GP_cdnNotPt_V4 : - case Hexagon::STh_GP_cdnNotPt_V4 : - case Hexagon::STw_GP_cdnNotPt_V4 : - return false; - - default: - assert (false && "Unknown predicate sense of the instruction"); - } - // return *some value* to avoid compiler warning - return false; -} - -bool HexagonPacketizerList::isDotNewInst(MachineInstr* MI) { - if (isNewValueInst(MI)) - return true; - - switch (MI->getOpcode()) { - case Hexagon::TFR_cdnNotPt: - case Hexagon::TFR_cdnPt: - case Hexagon::TFRI_cdnNotPt: - case Hexagon::TFRI_cdnPt: - case Hexagon::LDrid_cdnPt : - case Hexagon::LDrid_cdnNotPt : - case Hexagon::LDrid_indexed_cdnPt : - case Hexagon::LDrid_indexed_cdnNotPt : - case Hexagon::POST_LDrid_cdnPt_V4 : - case Hexagon::POST_LDrid_cdnNotPt_V4 : - case Hexagon::LDriw_cdnPt : - case Hexagon::LDriw_cdnNotPt : - case Hexagon::LDriw_indexed_cdnPt : - case Hexagon::LDriw_indexed_cdnNotPt : - case Hexagon::POST_LDriw_cdnPt_V4 : - case Hexagon::POST_LDriw_cdnNotPt_V4 : - case Hexagon::LDrih_cdnPt : - case Hexagon::LDrih_cdnNotPt : - case Hexagon::LDrih_indexed_cdnPt : - case Hexagon::LDrih_indexed_cdnNotPt : - case Hexagon::POST_LDrih_cdnPt_V4 : - case Hexagon::POST_LDrih_cdnNotPt_V4 : - case Hexagon::LDrib_cdnPt : - case Hexagon::LDrib_cdnNotPt : - case Hexagon::LDrib_indexed_cdnPt : - case Hexagon::LDrib_indexed_cdnNotPt : - case Hexagon::POST_LDrib_cdnPt_V4 : - case Hexagon::POST_LDrib_cdnNotPt_V4 : - case Hexagon::LDriuh_cdnPt : - case Hexagon::LDriuh_cdnNotPt : - case Hexagon::LDriuh_indexed_cdnPt : - case Hexagon::LDriuh_indexed_cdnNotPt : - case Hexagon::POST_LDriuh_cdnPt_V4 : - case Hexagon::POST_LDriuh_cdnNotPt_V4 : - case Hexagon::LDriub_cdnPt : - case Hexagon::LDriub_cdnNotPt : - case Hexagon::LDriub_indexed_cdnPt : - case Hexagon::LDriub_indexed_cdnNotPt : - case Hexagon::POST_LDriub_cdnPt_V4 : - case Hexagon::POST_LDriub_cdnNotPt_V4 : - - case Hexagon::LDrid_indexed_cdnPt_V4 : - case Hexagon::LDrid_indexed_cdnNotPt_V4 : - case Hexagon::LDrid_indexed_shl_cdnPt_V4 : - case Hexagon::LDrid_indexed_shl_cdnNotPt_V4 : - case Hexagon::LDrib_indexed_cdnPt_V4 : - case Hexagon::LDrib_indexed_cdnNotPt_V4 : - case Hexagon::LDrib_indexed_shl_cdnPt_V4 : - case Hexagon::LDrib_indexed_shl_cdnNotPt_V4 : - case Hexagon::LDriub_indexed_cdnPt_V4 : - case Hexagon::LDriub_indexed_cdnNotPt_V4 : - case Hexagon::LDriub_indexed_shl_cdnPt_V4 : - case Hexagon::LDriub_indexed_shl_cdnNotPt_V4 : - case Hexagon::LDrih_indexed_cdnPt_V4 : - case Hexagon::LDrih_indexed_cdnNotPt_V4 : - case Hexagon::LDrih_indexed_shl_cdnPt_V4 : - case Hexagon::LDrih_indexed_shl_cdnNotPt_V4 : - case Hexagon::LDriuh_indexed_cdnPt_V4 : - case Hexagon::LDriuh_indexed_cdnNotPt_V4 : - case Hexagon::LDriuh_indexed_shl_cdnPt_V4 : - case Hexagon::LDriuh_indexed_shl_cdnNotPt_V4 : - case Hexagon::LDriw_indexed_cdnPt_V4 : - case Hexagon::LDriw_indexed_cdnNotPt_V4 : - case Hexagon::LDriw_indexed_shl_cdnPt_V4 : - case Hexagon::LDriw_indexed_shl_cdnNotPt_V4 : - -// Coditional add - case Hexagon::ADD_ri_cdnPt: - case Hexagon::ADD_ri_cdnNotPt: - case Hexagon::ADD_rr_cdnPt: - case Hexagon::ADD_rr_cdnNotPt: - - // Conditional logical operations - case Hexagon::XOR_rr_cdnPt : - case Hexagon::XOR_rr_cdnNotPt : - case Hexagon::AND_rr_cdnPt : - case Hexagon::AND_rr_cdnNotPt : - case Hexagon::OR_rr_cdnPt : - case Hexagon::OR_rr_cdnNotPt : - - // Conditonal subtract - case Hexagon::SUB_rr_cdnPt : - case Hexagon::SUB_rr_cdnNotPt : - - // Conditional combine - case Hexagon::COMBINE_rr_cdnPt : - case Hexagon::COMBINE_rr_cdnNotPt : - - // Conditional shift operations - case Hexagon::ASLH_cdnPt_V4: - case Hexagon::ASLH_cdnNotPt_V4: - case Hexagon::ASRH_cdnPt_V4: - case Hexagon::ASRH_cdnNotPt_V4: - case Hexagon::SXTB_cdnPt_V4: - case Hexagon::SXTB_cdnNotPt_V4: - case Hexagon::SXTH_cdnPt_V4: - case Hexagon::SXTH_cdnNotPt_V4: - case Hexagon::ZXTB_cdnPt_V4: - case Hexagon::ZXTB_cdnNotPt_V4: - case Hexagon::ZXTH_cdnPt_V4: - case Hexagon::ZXTH_cdnNotPt_V4: - - // Conditional stores - case Hexagon::STrib_imm_cdnPt_V4 : - case Hexagon::STrib_imm_cdnNotPt_V4 : - case Hexagon::STrib_cdnPt_V4 : - case Hexagon::STrib_cdnNotPt_V4 : - case Hexagon::STrib_indexed_cdnPt_V4 : - case Hexagon::STrib_indexed_cdnNotPt_V4 : - case Hexagon::POST_STbri_cdnPt_V4 : - case Hexagon::POST_STbri_cdnNotPt_V4 : - case Hexagon::STrib_indexed_shl_cdnPt_V4 : - case Hexagon::STrib_indexed_shl_cdnNotPt_V4 : - - // Store doubleword conditionally - case Hexagon::STrid_indexed_cdnPt_V4 : - case Hexagon::STrid_indexed_cdnNotPt_V4 : - case Hexagon::STrid_indexed_shl_cdnPt_V4 : - case Hexagon::STrid_indexed_shl_cdnNotPt_V4 : - case Hexagon::POST_STdri_cdnPt_V4 : - case Hexagon::POST_STdri_cdnNotPt_V4 : - - // Store halfword conditionally - case Hexagon::STrih_cdnPt_V4 : - case Hexagon::STrih_cdnNotPt_V4 : - case Hexagon::STrih_indexed_cdnPt_V4 : - case Hexagon::STrih_indexed_cdnNotPt_V4 : - case Hexagon::STrih_imm_cdnPt_V4 : - case Hexagon::STrih_imm_cdnNotPt_V4 : - case Hexagon::STrih_indexed_shl_cdnPt_V4 : - case Hexagon::STrih_indexed_shl_cdnNotPt_V4 : - case Hexagon::POST_SThri_cdnPt_V4 : - case Hexagon::POST_SThri_cdnNotPt_V4 : - - // Store word conditionally - case Hexagon::STriw_cdnPt_V4 : - case Hexagon::STriw_cdnNotPt_V4 : - case Hexagon::STriw_indexed_cdnPt_V4 : - case Hexagon::STriw_indexed_cdnNotPt_V4 : - case Hexagon::STriw_imm_cdnPt_V4 : - case Hexagon::STriw_imm_cdnNotPt_V4 : - case Hexagon::STriw_indexed_shl_cdnPt_V4 : - case Hexagon::STriw_indexed_shl_cdnNotPt_V4 : - case Hexagon::POST_STwri_cdnPt_V4 : - case Hexagon::POST_STwri_cdnNotPt_V4 : - - case Hexagon::LDd_GP_cdnPt_V4: - case Hexagon::LDd_GP_cdnNotPt_V4: - case Hexagon::LDb_GP_cdnPt_V4: - case Hexagon::LDb_GP_cdnNotPt_V4: - case Hexagon::LDub_GP_cdnPt_V4: - case Hexagon::LDub_GP_cdnNotPt_V4: - case Hexagon::LDh_GP_cdnPt_V4: - case Hexagon::LDh_GP_cdnNotPt_V4: - case Hexagon::LDuh_GP_cdnPt_V4: - case Hexagon::LDuh_GP_cdnNotPt_V4: - case Hexagon::LDw_GP_cdnPt_V4: - case Hexagon::LDw_GP_cdnNotPt_V4: - case Hexagon::LDrid_GP_cdnPt_V4: - case Hexagon::LDrid_GP_cdnNotPt_V4: - case Hexagon::LDrib_GP_cdnPt_V4: - case Hexagon::LDrib_GP_cdnNotPt_V4: - case Hexagon::LDriub_GP_cdnPt_V4: - case Hexagon::LDriub_GP_cdnNotPt_V4: - case Hexagon::LDrih_GP_cdnPt_V4: - case Hexagon::LDrih_GP_cdnNotPt_V4: - case Hexagon::LDriuh_GP_cdnPt_V4: - case Hexagon::LDriuh_GP_cdnNotPt_V4: - case Hexagon::LDriw_GP_cdnPt_V4: - case Hexagon::LDriw_GP_cdnNotPt_V4: - - case Hexagon::STrid_GP_cdnPt_V4: - case Hexagon::STrid_GP_cdnNotPt_V4: - case Hexagon::STrib_GP_cdnPt_V4: - case Hexagon::STrib_GP_cdnNotPt_V4: - case Hexagon::STrih_GP_cdnPt_V4: - case Hexagon::STrih_GP_cdnNotPt_V4: - case Hexagon::STriw_GP_cdnPt_V4: - case Hexagon::STriw_GP_cdnNotPt_V4: - case Hexagon::STd_GP_cdnPt_V4: - case Hexagon::STd_GP_cdnNotPt_V4: - case Hexagon::STb_GP_cdnPt_V4: - case Hexagon::STb_GP_cdnNotPt_V4: - case Hexagon::STh_GP_cdnPt_V4: - case Hexagon::STh_GP_cdnNotPt_V4: - case Hexagon::STw_GP_cdnPt_V4: - case Hexagon::STw_GP_cdnNotPt_V4: - - return true; - } - return false; -} - -static MachineOperand& GetPostIncrementOperand(MachineInstr *MI, - const HexagonInstrInfo *QII) { - assert(QII->isPostIncrement(MI) && "Not a post increment operation."); -#ifndef NDEBUG - // Post Increment means duplicates. Use dense map to find duplicates in the - // list. Caution: Densemap initializes with the minimum of 64 buckets, - // whereas there are at most 5 operands in the post increment. - DenseMap<unsigned, unsigned> DefRegsSet; - for(unsigned opNum = 0; opNum < MI->getNumOperands(); opNum++) - if (MI->getOperand(opNum).isReg() && - MI->getOperand(opNum).isDef()) { - DefRegsSet[MI->getOperand(opNum).getReg()] = 1; - } - - for(unsigned opNum = 0; opNum < MI->getNumOperands(); opNum++) - if (MI->getOperand(opNum).isReg() && - MI->getOperand(opNum).isUse()) { - if (DefRegsSet[MI->getOperand(opNum).getReg()]) { - return MI->getOperand(opNum); - } - } -#else - if (MI->getDesc().mayLoad()) { - // The 2nd operand is always the post increment operand in load. - assert(MI->getOperand(1).isReg() && - "Post increment operand has be to a register."); - return (MI->getOperand(1)); - } - if (MI->getDesc().mayStore()) { - // The 1st operand is always the post increment operand in store. - assert(MI->getOperand(0).isReg() && - "Post increment operand has be to a register."); - return (MI->getOperand(0)); - } -#endif - // we should never come here. - llvm_unreachable("mayLoad or mayStore not set for Post Increment operation"); -} - -// get the value being stored -static MachineOperand& GetStoreValueOperand(MachineInstr *MI) { - // value being stored is always the last operand. - return (MI->getOperand(MI->getNumOperands()-1)); -} - -// can be new value store? -// Following restrictions are to be respected in convert a store into -// a new value store. -// 1. If an instruction uses auto-increment, its address register cannot -// be a new-value register. Arch Spec 5.4.2.1 -// 2. If an instruction uses absolute-set addressing mode, -// its address register cannot be a new-value register. -// Arch Spec 5.4.2.1.TODO: This is not enabled as -// as absolute-set address mode patters are not implemented. -// 3. If an instruction produces a 64-bit result, its registers cannot be used -// as new-value registers. Arch Spec 5.4.2.2. -// 4. If the instruction that sets a new-value register is conditional, then -// the instruction that uses the new-value register must also be conditional, -// and both must always have their predicates evaluate identically. -// Arch Spec 5.4.2.3. -// 5. There is an implied restriction of a packet can not have another store, -// if there is a new value store in the packet. Corollary, if there is -// already a store in a packet, there can not be a new value store. -// Arch Spec: 3.4.4.2 -bool HexagonPacketizerList::CanPromoteToNewValueStore( MachineInstr *MI, - MachineInstr *PacketMI, unsigned DepReg, - std::map <MachineInstr*, SUnit*> MIToSUnit) -{ - // Make sure we are looking at the store - if (!IsNewifyStore(MI)) - return false; - - // Make sure there is dependency and can be new value'ed - if (GetStoreValueOperand(MI).isReg() && - GetStoreValueOperand(MI).getReg() != DepReg) - return false; - - const HexagonRegisterInfo* QRI = (const HexagonRegisterInfo *) TM.getRegisterInfo(); - const MCInstrDesc& MCID = PacketMI->getDesc(); - // first operand is always the result - - const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; - const TargetRegisterClass* PacketRC = QII->getRegClass(MCID, 0, QRI); - - // if there is already an store in the packet, no can do new value store - // Arch Spec 3.4.4.2. - for (std::vector<MachineInstr*>::iterator VI = CurrentPacketMIs.begin(), - VE = CurrentPacketMIs.end(); - (VI != VE); ++VI) { - SUnit* PacketSU = MIToSUnit[*VI]; - if (PacketSU->getInstr()->getDesc().mayStore() || - // if we have mayStore = 1 set on ALLOCFRAME and DEALLOCFRAME, - // then we don't need this - PacketSU->getInstr()->getOpcode() == Hexagon::ALLOCFRAME || - PacketSU->getInstr()->getOpcode() == Hexagon::DEALLOCFRAME) - return false; - } - - if (PacketRC == Hexagon::DoubleRegsRegisterClass) { - // new value store constraint: double regs can not feed into new value store - // arch spec section: 5.4.2.2 - return false; - } - - // Make sure it's NOT the post increment register that we are going to - // new value. - if (QII->isPostIncrement(MI) && - MI->getDesc().mayStore() && - GetPostIncrementOperand(MI, QII).getReg() == DepReg) { - return false; - } - - if (QII->isPostIncrement(PacketMI) && - PacketMI->getDesc().mayLoad() && - GetPostIncrementOperand(PacketMI, QII).getReg() == DepReg) { - // if source is post_inc, or absolute-set addressing, - // it can not feed into new value store - // r3 = memw(r2++#4) - // memw(r30 + #-1404) = r2.new -> can not be new value store - // arch spec section: 5.4.2.1 - return false; - } - - // If the source that feeds the store is predicated, new value store must also be - // also predicated. - if (QII->isPredicated(PacketMI)) { - if (!QII->isPredicated(MI)) - return false; - - // Check to make sure that they both will have their predicates - // evaluate identically - unsigned predRegNumSrc; - unsigned predRegNumDst; - const TargetRegisterClass* predRegClass; - - // Get predicate register used in the source instruction - for(unsigned opNum = 0; opNum < PacketMI->getNumOperands(); opNum++) { - if ( PacketMI->getOperand(opNum).isReg()) - predRegNumSrc = PacketMI->getOperand(opNum).getReg(); - predRegClass = QRI->getMinimalPhysRegClass(predRegNumSrc); - if (predRegClass == Hexagon::PredRegsRegisterClass) { - break; - } - } - assert ((predRegClass == Hexagon::PredRegsRegisterClass ) && - ("predicate register not found in a predicated PacketMI instruction")); - - // Get predicate register used in new-value store instruction - for(unsigned opNum = 0; opNum < MI->getNumOperands(); opNum++) { - if ( MI->getOperand(opNum).isReg()) - predRegNumDst = MI->getOperand(opNum).getReg(); - predRegClass = QRI->getMinimalPhysRegClass(predRegNumDst); - if (predRegClass == Hexagon::PredRegsRegisterClass) { - break; - } - } - assert ((predRegClass == Hexagon::PredRegsRegisterClass ) && - ("predicate register not found in a predicated MI instruction")); - - // New-value register producer and user (store) need to satisfy these - // constraints: - // 1) Both instructions should be predicated on the same register. - // 2) If producer of the new-value register is .new predicated then store - // should also be .new predicated and if producer is not .new predicated - // then store should not be .new predicated. - // 3) Both new-value register producer and user should have same predicate - // sense, i.e, either both should be negated or both should be none negated. - - if (( predRegNumDst != predRegNumSrc) || - isDotNewInst(PacketMI) != isDotNewInst(MI) || - GetPredicateSense(MI, QII) != GetPredicateSense(PacketMI, QII)) { - return false; - } - } - - // Make sure that other than the new-value register no other store instruction - // register has been modified in the same packet. Predicate registers can be - // modified by they should not be modified between the producer and the store - // instruction as it will make them both conditional on different values. - // We already know this to be true for all the instructions before and - // including PacketMI. Howerver, we need to perform the check for the - // remaining instructions in the packet. - - std::vector<MachineInstr*>::iterator VI; - std::vector<MachineInstr*>::iterator VE; - unsigned StartCheck = 0; - - for (VI=CurrentPacketMIs.begin(), VE = CurrentPacketMIs.end(); - (VI != VE); ++VI) { - SUnit* TempSU = MIToSUnit[*VI]; - MachineInstr* TempMI = TempSU->getInstr(); - - // Following condition is true for all the instructions until PacketMI is - // reached (StartCheck is set to 0 before the for loop). - // StartCheck flag is 1 for all the instructions after PacketMI. - if (TempMI != PacketMI && !StartCheck) // start processing only after - continue; // encountering PacketMI - - StartCheck = 1; - if (TempMI == PacketMI) // We don't want to check PacketMI for dependence - continue; - - for(unsigned opNum = 0; opNum < MI->getNumOperands(); opNum++) { - if (MI->getOperand(opNum).isReg() && - TempSU->getInstr()->modifiesRegister(MI->getOperand(opNum).getReg(), QRI)) - return false; - } - } - - // Make sure that for non POST_INC stores: - // 1. The only use of reg is DepReg and no other registers. - // This handles V4 base+index registers. - // The following store can not be dot new. - // Eg. r0 = add(r0, #3)a - // memw(r1+r0<<#2) = r0 - if (!QII->isPostIncrement(MI) && - GetStoreValueOperand(MI).isReg() && - GetStoreValueOperand(MI).getReg() == DepReg) { - for(unsigned opNum = 0; opNum < MI->getNumOperands()-1; opNum++) { - if (MI->getOperand(opNum).isReg() && - MI->getOperand(opNum).getReg() == DepReg) { - return false; - } - } - // 2. If data definition is because of implicit definition of the register, - // do not newify the store. Eg. - // %R9<def> = ZXTH %R12, %D6<imp-use>, %R12<imp-def> - // STrih_indexed %R8, 2, %R12<kill>; mem:ST2[%scevgep343] - for(unsigned opNum = 0; opNum < PacketMI->getNumOperands(); opNum++) { - if (PacketMI->getOperand(opNum).isReg() && - PacketMI->getOperand(opNum).getReg() == DepReg && - PacketMI->getOperand(opNum).isDef() && - PacketMI->getOperand(opNum).isImplicit()) { - return false; - } - } - } - - // Can be dot new store. - return true; -} - -// can this MI to promoted to either -// new value store or new value jump -bool HexagonPacketizerList::CanPromoteToNewValue( MachineInstr *MI, - SUnit *PacketSU, unsigned DepReg, - std::map <MachineInstr*, SUnit*> MIToSUnit, - MachineBasicBlock::iterator &MII) -{ - - const HexagonRegisterInfo* QRI = (const HexagonRegisterInfo *) TM.getRegisterInfo(); - if (!QRI->Subtarget.hasV4TOps() || - !IsNewifyStore(MI)) - return false; - - MachineInstr *PacketMI = PacketSU->getInstr(); - - // Check to see the store can be new value'ed. - if (CanPromoteToNewValueStore(MI, PacketMI, DepReg, MIToSUnit)) - return true; - - // Check to see the compare/jump can be new value'ed. - // This is done as a pass on its own. Don't need to check it here. - return false; -} - -// Check to see if an instruction can be dot new -// There are three kinds. -// 1. dot new on predicate - V2/V3/V4 -// 2. dot new on stores NV/ST - V4 -// 3. dot new on jump NV/J - V4 -- This is generated in a pass. -bool HexagonPacketizerList::CanPromoteToDotNew( MachineInstr *MI, - SUnit *PacketSU, unsigned DepReg, - std::map <MachineInstr*, SUnit*> MIToSUnit, - MachineBasicBlock::iterator &MII, - const TargetRegisterClass* RC ) -{ - // already a dot new instruction - if (isDotNewInst(MI) && !IsNewifyStore(MI)) - return false; - - if (!isNewifiable(MI)) - return false; - - // predicate .new - if (RC == Hexagon::PredRegsRegisterClass && isCondInst(MI)) - return true; - else if (RC != Hexagon::PredRegsRegisterClass && - !IsNewifyStore(MI)) // MI is not a new-value store - return false; - else { - // Create a dot new machine instruction to see if resources can be - // allocated. If not, bail out now. - const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; - int NewOpcode = GetDotNewOp(MI->getOpcode()); - const MCInstrDesc &desc = QII->get(NewOpcode); - DebugLoc dl; - MachineInstr *NewMI = MI->getParent()->getParent()->CreateMachineInstr(desc, dl); - bool ResourcesAvailable = ResourceTracker->canReserveResources(NewMI); - MI->getParent()->getParent()->DeleteMachineInstr(NewMI); - - if (!ResourcesAvailable) - return false; - - // new value store only - // new new value jump generated as a passes - if (!CanPromoteToNewValue(MI, PacketSU, DepReg, MIToSUnit, MII)) { - return false; - } - } - return true; -} - -// Go through the packet instructions and search for anti dependency -// between them and DepReg from MI -// Consider this case: -// Trying to add -// a) %R1<def> = TFRI_cdNotPt %P3, 2 -// to this packet: -// { -// b) %P0<def> = OR_pp %P3<kill>, %P0<kill> -// c) %P3<def> = TFR_PdRs %R23 -// d) %R1<def> = TFRI_cdnPt %P3, 4 -// } -// The P3 from a) and d) will be complements after -// a)'s P3 is converted to .new form -// Anti Dep between c) and b) is irrelevant for this case -bool HexagonPacketizerList::RestrictingDepExistInPacket (MachineInstr* MI, - unsigned DepReg, - std::map <MachineInstr*, SUnit*> MIToSUnit) { - - const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; - SUnit* PacketSUDep = MIToSUnit[MI]; - - for (std::vector<MachineInstr*>::iterator VIN = CurrentPacketMIs.begin(), - VEN = CurrentPacketMIs.end(); (VIN != VEN); ++VIN) { - - // We only care for dependencies to predicated instructions - if(!QII->isPredicated(*VIN)) continue; - - // Scheduling Unit for current insn in the packet - SUnit* PacketSU = MIToSUnit[*VIN]; - - // Look at dependencies between current members of the packet - // and predicate defining instruction MI. - // Make sure that dependency is on the exact register - // we care about. - if (PacketSU->isSucc(PacketSUDep)) { - for (unsigned i = 0; i < PacketSU->Succs.size(); ++i) { - if ((PacketSU->Succs[i].getSUnit() == PacketSUDep) && - (PacketSU->Succs[i].getKind() == SDep::Anti) && - (PacketSU->Succs[i].getReg() == DepReg)) { - return true; - } - } - } - } - - return false; -} - - -// Given two predicated instructions, this function detects whether -// the predicates are complements -bool HexagonPacketizerList::ArePredicatesComplements (MachineInstr* MI1, - MachineInstr* MI2, std::map <MachineInstr*, SUnit*> MIToSUnit) { - - const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; - // Currently can only reason about conditional transfers - if (!QII->isConditionalTransfer(MI1) || !QII->isConditionalTransfer(MI2)) { - return false; - } - - // Scheduling unit for candidate - SUnit* SU = MIToSUnit[MI1]; - - // One corner case deals with the following scenario: - // Trying to add - // a) %R24<def> = TFR_cPt %P0, %R25 - // to this packet: - // - // { - // b) %R25<def> = TFR_cNotPt %P0, %R24 - // c) %P0<def> = CMPEQri %R26, 1 - // } - // - // On general check a) and b) are complements, but - // presence of c) will convert a) to .new form, and - // then it is not a complement - // We attempt to detect it by analyzing existing - // dependencies in the packet - - // Analyze relationships between all existing members of the packet. - // Look for Anti dependecy on the same predicate reg - // as used in the candidate - for (std::vector<MachineInstr*>::iterator VIN = CurrentPacketMIs.begin(), - VEN = CurrentPacketMIs.end(); (VIN != VEN); ++VIN) { - - // Scheduling Unit for current insn in the packet - SUnit* PacketSU = MIToSUnit[*VIN]; - - // If this instruction in the packet is succeeded by the candidate... - if (PacketSU->isSucc(SU)) { - for (unsigned i = 0; i < PacketSU->Succs.size(); ++i) { - // The corner case exist when there is true data - // dependency between candidate and one of current - // packet members, this dep is on predicate reg, and - // there already exist anti dep on the same pred in - // the packet. - if (PacketSU->Succs[i].getSUnit() == SU && - Hexagon::PredRegsRegisterClass->contains( - PacketSU->Succs[i].getReg()) && - PacketSU->Succs[i].getKind() == SDep::Data && - // Here I know that *VIN is predicate setting instruction - // with true data dep to candidate on the register - // we care about - c) in the above example. - // Now I need to see if there is an anti dependency - // from c) to any other instruction in the - // same packet on the pred reg of interest - RestrictingDepExistInPacket(*VIN,PacketSU->Succs[i].getReg(), - MIToSUnit)) { - return false; - } - } - } - } - - // If the above case does not apply, check regular - // complement condition. - // Check that the predicate register is the same and - // that the predicate sense is different - // We also need to differentiate .old vs. .new: - // !p0 is not complimentary to p0.new - return ((MI1->getOperand(1).getReg() == MI2->getOperand(1).getReg()) && - (GetPredicateSense(MI1, QII) != GetPredicateSense(MI2, QII)) && - (isDotNewInst(MI1) == isDotNewInst(MI2))); -} - -// initPacketizerState - Initialize packetizer flags -void HexagonPacketizerList::initPacketizerState() { - - Dependence = false; - PromotedToDotNew = false; - GlueToNewValueJump = false; - GlueAllocframeStore = false; - FoundSequentialDependence = false; - - return; -} - -// ignorePseudoInstruction - Ignore bundling of pseudo instructions. -bool HexagonPacketizerList::ignorePseudoInstruction(MachineInstr *MI, - MachineBasicBlock *MBB) { - if (MI->isDebugValue()) - return true; - - // We must print out inline assembly - if (MI->isInlineAsm()) - return false; - - // We check if MI has any functional units mapped to it. - // If it doesn't, we ignore the instruction. - const MCInstrDesc& TID = MI->getDesc(); - unsigned SchedClass = TID.getSchedClass(); - const InstrStage* IS = ResourceTracker->getInstrItins()->beginStage(SchedClass); - unsigned FuncUnits = IS->getUnits(); - return !FuncUnits; -} - -// isSoloInstruction: - Returns true for instructions that must be -// scheduled in their own packet. -bool HexagonPacketizerList::isSoloInstruction(MachineInstr *MI) { - - if (MI->isInlineAsm()) - return true; - - if (MI->isEHLabel()) - return true; - - // From Hexagon V4 Programmer's Reference Manual 3.4.4 Grouping constraints: - // trap, pause, barrier, icinva, isync, and syncht are solo instructions. - // They must not be grouped with other instructions in a packet. - if (IsSchedBarrier(MI)) - return true; - - return false; -} - -// isLegalToPacketizeTogether: -// SUI is the current instruction that is out side of the current packet. -// SUJ is the current instruction inside the current packet against which that -// SUI will be packetized. -bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) { - MachineInstr *I = SUI->getInstr(); - MachineInstr *J = SUJ->getInstr(); - assert(I && J && "Unable to packetize null instruction!"); - - const MCInstrDesc &MCIDI = I->getDesc(); - const MCInstrDesc &MCIDJ = J->getDesc(); - - MachineBasicBlock::iterator II = I; - - const unsigned FrameSize = MF.getFrameInfo()->getStackSize(); - const HexagonRegisterInfo* QRI = (const HexagonRegisterInfo *) TM.getRegisterInfo(); - const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; - - // Inline asm cannot go in the packet. - if (I->getOpcode() == Hexagon::INLINEASM) - llvm_unreachable("Should not meet inline asm here!"); - - if (isSoloInstruction(I)) - llvm_unreachable("Should not meet solo instr here!"); - - // A save callee-save register function call can only be in a packet - // with instructions that don't write to the callee-save registers. - if ((QII->isSaveCalleeSavedRegsCall(I) && - DoesModifyCalleeSavedReg(J, QRI)) || - (QII->isSaveCalleeSavedRegsCall(J) && - DoesModifyCalleeSavedReg(I, QRI))) { - Dependence = true; - return false; - } - - // Two control flow instructions cannot go in the same packet. - if (IsControlFlow(I) && IsControlFlow(J)) { - Dependence = true; - return false; - } - - // A LoopN instruction cannot appear in the same packet as a jump or call. - if (IsLoopN(I) && ( IsDirectJump(J) - || MCIDJ.isCall() - || QII->isDeallocRet(J))) { - Dependence = true; - return false; - } - if (IsLoopN(J) && ( IsDirectJump(I) - || MCIDI.isCall() - || QII->isDeallocRet(I))) { - Dependence = true; - return false; - } - - // dealloc_return cannot appear in the same packet as a conditional or - // unconditional jump. - if (QII->isDeallocRet(I) && ( MCIDJ.isBranch() - || MCIDJ.isCall() - || MCIDJ.isBarrier())) { - Dependence = true; - return false; - } - - - // V4 allows dual store. But does not allow second store, if the - // first store is not in SLOT0. New value store, new value jump, - // dealloc_return and memop always take SLOT0. - // Arch spec 3.4.4.2 - if (QRI->Subtarget.hasV4TOps()) { - - if (MCIDI.mayStore() && MCIDJ.mayStore() && isNewValueInst(J)) { - Dependence = true; - return false; - } - - if ( (QII->isMemOp(J) && MCIDI.mayStore()) - || (MCIDJ.mayStore() && QII->isMemOp(I)) - || (QII->isMemOp(J) && QII->isMemOp(I))) { - Dependence = true; - return false; - } - - //if dealloc_return - if (MCIDJ.mayStore() && QII->isDeallocRet(I)){ - Dependence = true; - return false; - } - - // If an instruction feeds new value jump, glue it. - MachineBasicBlock::iterator NextMII = I; - ++NextMII; - MachineInstr *NextMI = NextMII; - - if (QII->isNewValueJump(NextMI)) { - - bool secondRegMatch = false; - bool maintainNewValueJump = false; - - if (NextMI->getOperand(1).isReg() && - I->getOperand(0).getReg() == NextMI->getOperand(1).getReg()) { - secondRegMatch = true; - maintainNewValueJump = true; - } - - if (!secondRegMatch && - I->getOperand(0).getReg() == NextMI->getOperand(0).getReg()) { - maintainNewValueJump = true; - } - - for (std::vector<MachineInstr*>::iterator - VI = CurrentPacketMIs.begin(), - VE = CurrentPacketMIs.end(); - (VI != VE && maintainNewValueJump); ++VI) { - SUnit* PacketSU = MIToSUnit[*VI]; - - // NVJ can not be part of the dual jump - Arch Spec: section 7.8 - if (PacketSU->getInstr()->getDesc().isCall()) { - Dependence = true; - break; - } - // Validate - // 1. Packet does not have a store in it. - // 2. If the first operand of the nvj is newified, and the second - // operand is also a reg, it (second reg) is not defined in - // the same packet. - // 3. If the second operand of the nvj is newified, (which means - // first operand is also a reg), first reg is not defined in - // the same packet. - if (PacketSU->getInstr()->getDesc().mayStore() || - PacketSU->getInstr()->getOpcode() == Hexagon::ALLOCFRAME || - // Check #2. - (!secondRegMatch && NextMI->getOperand(1).isReg() && - PacketSU->getInstr()->modifiesRegister( - NextMI->getOperand(1).getReg(), QRI)) || - // Check #3. - (secondRegMatch && - PacketSU->getInstr()->modifiesRegister( - NextMI->getOperand(0).getReg(), QRI))) { - Dependence = true; - break; - } - } - if (!Dependence) - GlueToNewValueJump = true; - else - return false; - } - } - - if (SUJ->isSucc(SUI)) { - for (unsigned i = 0; - (i < SUJ->Succs.size()) && !FoundSequentialDependence; - ++i) { - - if (SUJ->Succs[i].getSUnit() != SUI) { - continue; - } - - SDep::Kind DepType = SUJ->Succs[i].getKind(); - - // For direct calls: - // Ignore register dependences for call instructions for - // packetization purposes except for those due to r31 and - // predicate registers. - // - // For indirect calls: - // Same as direct calls + check for true dependences to the register - // used in the indirect call. - // - // We completely ignore Order dependences for call instructions - // - // For returns: - // Ignore register dependences for return instructions like jumpr, - // dealloc return unless we have dependencies on the explicit uses - // of the registers used by jumpr (like r31) or dealloc return - // (like r29 or r30). - // - // TODO: Currently, jumpr is handling only return of r31. So, the - // following logic (specificaly IsCallDependent) is working fine. - // We need to enable jumpr for register other than r31 and then, - // we need to rework the last part, where it handles indirect call - // of that (IsCallDependent) function. Bug 6216 is opened for this. - // - unsigned DepReg; - const TargetRegisterClass* RC; - if (DepType == SDep::Data) { - DepReg = SUJ->Succs[i].getReg(); - RC = QRI->getMinimalPhysRegClass(DepReg); - } - if ((MCIDI.isCall() || MCIDI.isReturn()) && - (!IsRegDependence(DepType) || - !IsCallDependent(I, DepType, SUJ->Succs[i].getReg()))) { - /* do nothing */ - } - - // For instructions that can be promoted to dot-new, try to promote. - else if ((DepType == SDep::Data) && - CanPromoteToDotNew(I, SUJ, DepReg, MIToSUnit, II, RC) && - PromoteToDotNew(I, DepType, II, RC)) { - PromotedToDotNew = true; - /* do nothing */ - } - - else if ((DepType == SDep::Data) && - (QII->isNewValueJump(I))) { - /* do nothing */ - } - - // For predicated instructions, if the predicates are complements - // then there can be no dependence. - else if (QII->isPredicated(I) && - QII->isPredicated(J) && - ArePredicatesComplements(I, J, MIToSUnit)) { - /* do nothing */ - - } - else if (IsDirectJump(I) && - !MCIDJ.isBranch() && - !MCIDJ.isCall() && - (DepType == SDep::Order)) { - // Ignore Order dependences between unconditional direct branches - // and non-control-flow instructions - /* do nothing */ - } - else if (MCIDI.isConditionalBranch() && (DepType != SDep::Data) && - (DepType != SDep::Output)) { - // Ignore all dependences for jumps except for true and output - // dependences - /* do nothing */ - } - - // Ignore output dependences due to superregs. We can - // write to two different subregisters of R1:0 for instance - // in the same cycle - // - - // - // Let the - // If neither I nor J defines DepReg, then this is a - // superfluous output dependence. The dependence must be of the - // form: - // R0 = ... - // R1 = ... - // and there is an output dependence between the two instructions - // with - // DepReg = D0 - // We want to ignore these dependences. - // Ideally, the dependence constructor should annotate such - // dependences. We can then avoid this relatively expensive check. - // - else if (DepType == SDep::Output) { - // DepReg is the register that's responsible for the dependence. - unsigned DepReg = SUJ->Succs[i].getReg(); - - // Check if I and J really defines DepReg. - if (I->definesRegister(DepReg) || - J->definesRegister(DepReg)) { - FoundSequentialDependence = true; - break; - } - } - - // We ignore Order dependences for - // 1. Two loads unless they are volatile. - // 2. Two stores in V4 unless they are volatile. - else if ((DepType == SDep::Order) && - !I->hasVolatileMemoryRef() && - !J->hasVolatileMemoryRef()) { - if (QRI->Subtarget.hasV4TOps() && - // hexagonv4 allows dual store. - MCIDI.mayStore() && MCIDJ.mayStore()) { - /* do nothing */ - } - // store followed by store-- not OK on V2 - // store followed by load -- not OK on all (OK if addresses - // are not aliased) - // load followed by store -- OK on all - // load followed by load -- OK on all - else if ( !MCIDJ.mayStore()) { - /* do nothing */ - } - else { - FoundSequentialDependence = true; - break; - } - } - - // For V4, special case ALLOCFRAME. Even though there is dependency - // between ALLOCAFRAME and subsequent store, allow it to be - // packetized in a same packet. This implies that the store is using - // caller's SP. Hense, offset needs to be updated accordingly. - else if (DepType == SDep::Data - && QRI->Subtarget.hasV4TOps() - && J->getOpcode() == Hexagon::ALLOCFRAME - && (I->getOpcode() == Hexagon::STrid - || I->getOpcode() == Hexagon::STriw - || I->getOpcode() == Hexagon::STrib) - && I->getOperand(0).getReg() == QRI->getStackRegister() - && QII->isValidOffset(I->getOpcode(), - I->getOperand(1).getImm() - - (FrameSize + HEXAGON_LRFP_SIZE))) - { - GlueAllocframeStore = true; - // Since this store is to be glued with allocframe in the same - // packet, it will use SP of the previous stack frame, i.e - // caller's SP. Therefore, we need to recalculate offset according - // to this change. - I->getOperand(1).setImm(I->getOperand(1).getImm() - - (FrameSize + HEXAGON_LRFP_SIZE)); - } - - // - // Skip over anti-dependences. Two instructions that are - // anti-dependent can share a packet - // - else if (DepType != SDep::Anti) { - FoundSequentialDependence = true; - break; - } - } - - if (FoundSequentialDependence) { - Dependence = true; - return false; - } - } - - return true; -} - -// isLegalToPruneDependencies -bool HexagonPacketizerList::isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) { - MachineInstr *I = SUI->getInstr(); - assert(I && SUJ->getInstr() && "Unable to packetize null instruction!"); - - const unsigned FrameSize = MF.getFrameInfo()->getStackSize(); - - if (Dependence) { - - // Check if the instruction was promoted to a dot-new. If so, demote it - // back into a dot-old. - if (PromotedToDotNew) { - DemoteToDotOld(I); - } - - // Check if the instruction (must be a store) was glued with an Allocframe - // instruction. If so, restore its offset to its original value, i.e. use - // curent SP instead of caller's SP. - if (GlueAllocframeStore) { - I->getOperand(1).setImm(I->getOperand(1).getImm() + - FrameSize + HEXAGON_LRFP_SIZE); - } - - return false; - } - return true; -} - -MachineBasicBlock::iterator HexagonPacketizerList::addToPacket(MachineInstr *MI) { - - MachineBasicBlock::iterator MII = MI; - MachineBasicBlock *MBB = MI->getParent(); - - const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII; - - if (GlueToNewValueJump) { - - ++MII; - MachineInstr *nvjMI = MII; - assert(ResourceTracker->canReserveResources(MI)); - ResourceTracker->reserveResources(MI); - if (QII->isExtended(MI) && - !tryAllocateResourcesForConstExt(MI)) { - endPacket(MBB, MI); - ResourceTracker->reserveResources(MI); - assert(canReserveResourcesForConstExt(MI) && - "Ensure that there is a slot"); - reserveResourcesForConstExt(MI); - // Reserve resources for new value jump constant extender. - assert(canReserveResourcesForConstExt(MI) && - "Ensure that there is a slot"); - reserveResourcesForConstExt(nvjMI); - assert(ResourceTracker->canReserveResources(nvjMI) && - "Ensure that there is a slot"); - - } else if ( // Extended instruction takes two slots in the packet. - // Try reserve and allocate 4-byte in the current packet first. - (QII->isExtended(nvjMI) - && (!tryAllocateResourcesForConstExt(nvjMI) - || !ResourceTracker->canReserveResources(nvjMI))) - || // For non-extended instruction, no need to allocate extra 4 bytes. - (!QII->isExtended(nvjMI) && !ResourceTracker->canReserveResources(nvjMI))) - { - endPacket(MBB, MI); - // A new and empty packet starts. - // We are sure that the resources requirements can be satisfied. - // Therefore, do not need to call "canReserveResources" anymore. - ResourceTracker->reserveResources(MI); - if (QII->isExtended(nvjMI)) - reserveResourcesForConstExt(nvjMI); - } - // Here, we are sure that "reserveResources" would succeed. - ResourceTracker->reserveResources(nvjMI); - CurrentPacketMIs.push_back(MI); - CurrentPacketMIs.push_back(nvjMI); - } else { - if ( QII->isExtended(MI) - && ( !tryAllocateResourcesForConstExt(MI) - || !ResourceTracker->canReserveResources(MI))) - { - endPacket(MBB, MI); - // Check if the instruction was promoted to a dot-new. If so, demote it - // back into a dot-old - if (PromotedToDotNew) { - DemoteToDotOld(MI); - } - reserveResourcesForConstExt(MI); - } - // In case that "MI" is not an extended insn, - // the resource availability has already been checked. - ResourceTracker->reserveResources(MI); - CurrentPacketMIs.push_back(MI); - } - return MII; -} - -//===----------------------------------------------------------------------===// -// Public Constructor Functions -//===----------------------------------------------------------------------===// - -FunctionPass *llvm::createHexagonPacketizer() { - return new HexagonPacketizer(); -} - diff --git a/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp b/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp index 75d6bfb0813a..47384cd533fa 100644 --- a/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp +++ b/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.cpp @@ -15,7 +15,6 @@ #include "Hexagon.h" #include "HexagonAsmPrinter.h" #include "HexagonInstPrinter.h" -#include "HexagonMCInst.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCExpr.h" @@ -38,50 +37,20 @@ StringRef HexagonInstPrinter::getRegName(unsigned RegNo) const { void HexagonInstPrinter::printInst(const MCInst *MI, raw_ostream &O, StringRef Annot) { - printInst((const HexagonMCInst*)(MI), O, Annot); -} - -void HexagonInstPrinter::printInst(const HexagonMCInst *MI, raw_ostream &O, - StringRef Annot) { const char packetPadding[] = " "; const char startPacket = '{', endPacket = '}'; // TODO: add outer HW loop when it's supported too. if (MI->getOpcode() == Hexagon::ENDLOOP0) { - // Ending a harware loop is different from ending an regular packet. - assert(MI->isEndPacket() && "Loop end must also end the packet"); - - if (MI->isStartPacket()) { - // There must be a packet to end a loop. - // FIXME: when shuffling is always run, this shouldn't be needed. - HexagonMCInst Nop; - StringRef NoAnnot; - - Nop.setOpcode (Hexagon::NOP); - Nop.setStartPacket (MI->isStartPacket()); - printInst (&Nop, O, NoAnnot); - } - - // Close the packet. - if (MI->isEndPacket()) - O << packetPadding << endPacket; + MCInst Nop; - printInstruction(MI, O); - } - else { - // Prefix the insn opening the packet. - if (MI->isStartPacket()) - O << packetPadding << startPacket << '\n'; - - printInstruction(MI, O); - - // Suffix the insn closing the packet. - if (MI->isEndPacket()) - // Suffix the packet in a new line always, since the GNU assembler has - // issues with a closing brace on the same line as CONST{32,64}. - O << '\n' << packetPadding << endPacket; + O << packetPadding << startPacket << '\n'; + Nop.setOpcode(Hexagon::NOP); + printInstruction(&Nop, O); + O << packetPadding << endPacket; } + printInstruction(MI, O); printAnnotation(O, Annot); } @@ -100,18 +69,18 @@ void HexagonInstPrinter::printOperand(const MCInst *MI, unsigned OpNo, } } -void HexagonInstPrinter::printImmOperand(const MCInst *MI, unsigned OpNo, - raw_ostream &O) const { +void HexagonInstPrinter::printImmOperand + (const MCInst *MI, unsigned OpNo, raw_ostream &O) const { O << MI->getOperand(OpNo).getImm(); } void HexagonInstPrinter::printExtOperand(const MCInst *MI, unsigned OpNo, - raw_ostream &O) const { + raw_ostream &O) const { O << MI->getOperand(OpNo).getImm(); } -void HexagonInstPrinter::printUnsignedImmOperand(const MCInst *MI, unsigned OpNo, - raw_ostream &O) const { +void HexagonInstPrinter::printUnsignedImmOperand + (const MCInst *MI, unsigned OpNo, raw_ostream &O) const { O << MI->getOperand(OpNo).getImm(); } @@ -120,13 +89,13 @@ void HexagonInstPrinter::printNegImmOperand(const MCInst *MI, unsigned OpNo, O << -MI->getOperand(OpNo).getImm(); } -void HexagonInstPrinter::printNOneImmOperand(const MCInst *MI, unsigned OpNo, - raw_ostream &O) const { +void HexagonInstPrinter::printNOneImmOperand + (const MCInst *MI, unsigned OpNo, raw_ostream &O) const { O << -1; } -void HexagonInstPrinter::printMEMriOperand(const MCInst *MI, unsigned OpNo, - raw_ostream &O) const { +void HexagonInstPrinter::printMEMriOperand + (const MCInst *MI, unsigned OpNo, raw_ostream &O) const { const MCOperand& MO0 = MI->getOperand(OpNo); const MCOperand& MO1 = MI->getOperand(OpNo + 1); @@ -134,8 +103,8 @@ void HexagonInstPrinter::printMEMriOperand(const MCInst *MI, unsigned OpNo, O << " + #" << MO1.getImm(); } -void HexagonInstPrinter::printFrameIndexOperand(const MCInst *MI, unsigned OpNo, - raw_ostream &O) const { +void HexagonInstPrinter::printFrameIndexOperand + (const MCInst *MI, unsigned OpNo, raw_ostream &O) const { const MCOperand& MO0 = MI->getOperand(OpNo); const MCOperand& MO1 = MI->getOperand(OpNo + 1); diff --git a/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h b/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h index 3ce7dfcbdbe2..dad4334c3eb4 100644 --- a/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h +++ b/lib/Target/Hexagon/InstPrinter/HexagonInstPrinter.h @@ -14,7 +14,6 @@ #ifndef HEXAGONINSTPRINTER_H #define HEXAGONINSTPRINTER_H -#include "HexagonMCInst.h" #include "llvm/MC/MCInstPrinter.h" namespace llvm { @@ -26,7 +25,6 @@ namespace llvm { : MCInstPrinter(MAI, MII, MRI) {} virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot); - void printInst(const HexagonMCInst *MI, raw_ostream &O, StringRef Annot); virtual StringRef getOpcodeName(unsigned Opcode) const; void printInstruction(const MCInst *MI, raw_ostream &O); StringRef getRegName(unsigned RegNo) const; diff --git a/lib/Target/Hexagon/MCTargetDesc/CMakeLists.txt b/lib/Target/Hexagon/MCTargetDesc/CMakeLists.txt index c9c5a6eadf88..8e3da99404ee 100644 --- a/lib/Target/Hexagon/MCTargetDesc/CMakeLists.txt +++ b/lib/Target/Hexagon/MCTargetDesc/CMakeLists.txt @@ -1,6 +1,6 @@ add_llvm_library(LLVMHexagonDesc - HexagonMCAsmInfo.cpp HexagonMCTargetDesc.cpp + HexagonMCAsmInfo.cpp ) add_dependencies(LLVMHexagonDesc HexagonCommonTableGen) diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h b/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h index 7221e906342e..ed55c3c1c15c 100644 --- a/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h +++ b/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h @@ -23,41 +23,14 @@ namespace llvm { /// instruction info tracks. /// namespace HexagonII { - // *** The code below must match HexagonInstrFormat*.td *** // - - // Insn types. - // *** Must match HexagonInstrFormat*.td *** - enum Type { - TypePSEUDO = 0, - TypeALU32 = 1, - TypeCR = 2, - TypeJR = 3, - TypeJ = 4, - TypeLD = 5, - TypeST = 6, - TypeSYSTEM = 7, - TypeXTYPE = 8, - TypeMEMOP = 9, - TypeNV = 10, - TypePREFIX = 30, // Such as extenders. - TypeMARKER = 31 // Such as end of a HW loop. - }; - + // *** The code below must match HexagonInstrFormat*.td *** // // MCInstrDesc TSFlags - // *** Must match HexagonInstrFormat*.td *** enum { - // This 5-bit field describes the insn type. - TypePos = 0, - TypeMask = 0x1f, - - // Solo instructions. - SoloPos = 5, - SoloMask = 0x1, // Predicated instructions. - PredicatedPos = 6, + PredicatedPos = 1, PredicatedMask = 0x1 }; diff --git a/lib/Target/MBlaze/MBlazeCallingConv.td b/lib/Target/MBlaze/MBlazeCallingConv.td index 4962573f96ab..00a4219d047b 100644 --- a/lib/Target/MBlaze/MBlazeCallingConv.td +++ b/lib/Target/MBlaze/MBlazeCallingConv.td @@ -9,10 +9,6 @@ // This describes the calling conventions for MBlaze architecture. //===----------------------------------------------------------------------===// -/// CCIfSubtarget - Match if the current subtarget has a feature F. -class CCIfSubtarget<string F, CCAction A>: - CCIf<!strconcat("State.getTarget().getSubtarget<MBlazeSubtarget>().", F), A>; - //===----------------------------------------------------------------------===// // MBlaze ABI Calling Convention //===----------------------------------------------------------------------===// diff --git a/lib/Target/MBlaze/MBlazeTargetMachine.cpp b/lib/Target/MBlaze/MBlazeTargetMachine.cpp index dd7de9bff36b..62393d0920bb 100644 --- a/lib/Target/MBlaze/MBlazeTargetMachine.cpp +++ b/lib/Target/MBlaze/MBlazeTargetMachine.cpp @@ -68,7 +68,7 @@ TargetPassConfig *MBlazeTargetMachine::createPassConfig(PassManagerBase &PM) { // Install an instruction selector pass using // the ISelDag to gen MBlaze code. bool MBlazePassConfig::addInstSelector() { - PM.add(createMBlazeISelDag(getMBlazeTargetMachine())); + PM->add(createMBlazeISelDag(getMBlazeTargetMachine())); return false; } @@ -76,6 +76,6 @@ bool MBlazePassConfig::addInstSelector() { // machine code is emitted. return true if -print-machineinstrs should // print out the code after the passes. bool MBlazePassConfig::addPreEmitPass() { - PM.add(createMBlazeDelaySlotFillerPass(getMBlazeTargetMachine())); + PM->add(createMBlazeDelaySlotFillerPass(getMBlazeTargetMachine())); return true; } diff --git a/lib/Target/MSP430/MSP430TargetMachine.cpp b/lib/Target/MSP430/MSP430TargetMachine.cpp index 9f2eda13d7fd..3acf96bb7d27 100644 --- a/lib/Target/MSP430/MSP430TargetMachine.cpp +++ b/lib/Target/MSP430/MSP430TargetMachine.cpp @@ -60,12 +60,12 @@ TargetPassConfig *MSP430TargetMachine::createPassConfig(PassManagerBase &PM) { bool MSP430PassConfig::addInstSelector() { // Install an instruction selector. - PM.add(createMSP430ISelDag(getMSP430TargetMachine(), getOptLevel())); + PM->add(createMSP430ISelDag(getMSP430TargetMachine(), getOptLevel())); return false; } bool MSP430PassConfig::addPreEmitPass() { // Must run branch selection immediately preceding the asm printer. - PM.add(createMSP430BranchSelectionPass()); + PM->add(createMSP430BranchSelectionPass()); return false; } diff --git a/lib/Target/Mips/CMakeLists.txt b/lib/Target/Mips/CMakeLists.txt index 13d17e4e5293..0500c5dc3832 100644 --- a/lib/Target/Mips/CMakeLists.txt +++ b/lib/Target/Mips/CMakeLists.txt @@ -2,12 +2,14 @@ set(LLVM_TARGET_DEFINITIONS Mips.td) tablegen(LLVM MipsGenRegisterInfo.inc -gen-register-info) tablegen(LLVM MipsGenInstrInfo.inc -gen-instr-info) +tablegen(LLVM MipsGenDisassemblerTables.inc -gen-disassembler) tablegen(LLVM MipsGenCodeEmitter.inc -gen-emitter) tablegen(LLVM MipsGenMCCodeEmitter.inc -gen-emitter -mc-emitter) tablegen(LLVM MipsGenAsmWriter.inc -gen-asm-writer) tablegen(LLVM MipsGenDAGISel.inc -gen-dag-isel) tablegen(LLVM MipsGenCallingConv.inc -gen-callingconv) tablegen(LLVM MipsGenSubtargetInfo.inc -gen-subtarget) +tablegen(LLVM MipsGenEDInfo.inc -gen-enhanced-disassembly-info) add_public_tablegen_target(MipsCommonTableGen) add_llvm_target(MipsCodeGen @@ -32,6 +34,7 @@ add_llvm_target(MipsCodeGen ) add_subdirectory(InstPrinter) +add_subdirectory(Disassembler) add_subdirectory(TargetInfo) add_subdirectory(MCTargetDesc) add_subdirectory(AsmParser) diff --git a/lib/Target/Mips/Disassembler/CMakeLists.txt b/lib/Target/Mips/Disassembler/CMakeLists.txt new file mode 100644 index 000000000000..fe1dc75776f1 --- /dev/null +++ b/lib/Target/Mips/Disassembler/CMakeLists.txt @@ -0,0 +1,15 @@ +include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/.. ) + +add_llvm_library(LLVMMipsDisassembler + MipsDisassembler.cpp + ) + +# workaround for hanging compilation on MSVC9 and 10 +if( MSVC_VERSION EQUAL 1400 OR MSVC_VERSION EQUAL 1500 OR MSVC_VERSION EQUAL 1600 ) +set_property( + SOURCE MipsDisassembler.cpp + PROPERTY COMPILE_FLAGS "/Od" + ) +endif() + +add_dependencies(LLVMMipsDisassembler MipsCommonTableGen) diff --git a/lib/Target/Mips/Disassembler/LLVMBuild.txt b/lib/Target/Mips/Disassembler/LLVMBuild.txt new file mode 100644 index 000000000000..048ad0ddac5b --- /dev/null +++ b/lib/Target/Mips/Disassembler/LLVMBuild.txt @@ -0,0 +1,23 @@ +;===- ./lib/Target/Mips/Disassembler/LLVMBuild.txt --------------*- Conf -*--===; +; +; The LLVM Compiler Infrastructure +; +; This file is distributed under the University of Illinois Open Source +; License. See LICENSE.TXT for details. +; +;===------------------------------------------------------------------------===; +; +; This is an LLVMBuild description file for the components in this subdirectory. +; +; For more information on the LLVMBuild system, please see: +; +; http://llvm.org/docs/LLVMBuild.html +; +;===------------------------------------------------------------------------===; + +[component_0] +type = Library +name = MipsDisassembler +parent = Mips +required_libraries = MC Support MipsInfo +add_to_library_groups = Mips diff --git a/lib/Target/Mips/Disassembler/Makefile b/lib/Target/Mips/Disassembler/Makefile new file mode 100644 index 000000000000..a78feba1f8df --- /dev/null +++ b/lib/Target/Mips/Disassembler/Makefile @@ -0,0 +1,16 @@ +##===- lib/Target/Mips/Disassembler/Makefile ----------------*- Makefile -*-===## +# +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +# +##===----------------------------------------------------------------------===## + +LEVEL = ../../../.. +LIBRARYNAME = LLVMMipsDisassembler + +# Hack: we need to include 'main' Mips target directory to grab private headers +CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/.. + +include $(LEVEL)/Makefile.common diff --git a/lib/Target/Mips/Disassembler/MipsDisassembler.cpp b/lib/Target/Mips/Disassembler/MipsDisassembler.cpp new file mode 100644 index 000000000000..78dbc0694902 --- /dev/null +++ b/lib/Target/Mips/Disassembler/MipsDisassembler.cpp @@ -0,0 +1,552 @@ +//===- MipsDisassembler.cpp - Disassembler for Mips -------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is part of the Mips Disassembler. +// +//===----------------------------------------------------------------------===// + +#include "Mips.h" +#include "MipsSubtarget.h" +#include "llvm/MC/EDInstInfo.h" +#include "llvm/MC/MCDisassembler.h" +#include "llvm/Support/MemoryObject.h" +#include "llvm/Support/TargetRegistry.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCRegisterInfo.h" +#include "llvm/Support/MathExtras.h" + + +#include "MipsGenEDInfo.inc" + +using namespace llvm; + +typedef MCDisassembler::DecodeStatus DecodeStatus; + +/// MipsDisassembler - a disasembler class for Mips32. +class MipsDisassembler : public MCDisassembler { +public: + /// Constructor - Initializes the disassembler. + /// + MipsDisassembler(const MCSubtargetInfo &STI, bool bigEndian) : + MCDisassembler(STI), isBigEndian(bigEndian) { + } + + ~MipsDisassembler() { + } + + /// getInstruction - See MCDisassembler. + DecodeStatus getInstruction(MCInst &instr, + uint64_t &size, + const MemoryObject ®ion, + uint64_t address, + raw_ostream &vStream, + raw_ostream &cStream) const; + + /// getEDInfo - See MCDisassembler. + const EDInstInfo *getEDInfo() const; + +private: + bool isBigEndian; +}; + + +/// Mips64Disassembler - a disasembler class for Mips64. +class Mips64Disassembler : public MCDisassembler { +public: + /// Constructor - Initializes the disassembler. + /// + Mips64Disassembler(const MCSubtargetInfo &STI, bool bigEndian) : + MCDisassembler(STI), isBigEndian(bigEndian) { + } + + ~Mips64Disassembler() { + } + + /// getInstruction - See MCDisassembler. + DecodeStatus getInstruction(MCInst &instr, + uint64_t &size, + const MemoryObject ®ion, + uint64_t address, + raw_ostream &vStream, + raw_ostream &cStream) const; + + /// getEDInfo - See MCDisassembler. + const EDInstInfo *getEDInfo() const; + +private: + bool isBigEndian; +}; + +const EDInstInfo *MipsDisassembler::getEDInfo() const { + return instInfoMips; +} + +const EDInstInfo *Mips64Disassembler::getEDInfo() const { + return instInfoMips; +} + +// Decoder tables for Mips register +static const unsigned CPURegsTable[] = { + Mips::ZERO, Mips::AT, Mips::V0, Mips::V1, + Mips::A0, Mips::A1, Mips::A2, Mips::A3, + Mips::T0, Mips::T1, Mips::T2, Mips::T3, + Mips::T4, Mips::T5, Mips::T6, Mips::T7, + Mips::S0, Mips::S1, Mips::S2, Mips::S3, + Mips::S4, Mips::S5, Mips::S6, Mips::S7, + Mips::T8, Mips::T9, Mips::K0, Mips::K1, + Mips::GP, Mips::SP, Mips::FP, Mips::RA +}; + +static const unsigned FGR32RegsTable[] = { + Mips::F0, Mips::F1, Mips::F2, Mips::F3, + Mips::F4, Mips::F5, Mips::F6, Mips::F7, + Mips::F8, Mips::F9, Mips::F10, Mips::F11, + Mips::F12, Mips::F13, Mips::F14, Mips::F15, + Mips::F16, Mips::F17, Mips::F18, Mips::F18, + Mips::F20, Mips::F21, Mips::F22, Mips::F23, + Mips::F24, Mips::F25, Mips::F26, Mips::F27, + Mips::F28, Mips::F29, Mips::F30, Mips::F31 +}; + +static const unsigned CPU64RegsTable[] = { + Mips::ZERO_64, Mips::AT_64, Mips::V0_64, Mips::V1_64, + Mips::A0_64, Mips::A1_64, Mips::A2_64, Mips::A3_64, + Mips::T0_64, Mips::T1_64, Mips::T2_64, Mips::T3_64, + Mips::T4_64, Mips::T5_64, Mips::T6_64, Mips::T7_64, + Mips::S0_64, Mips::S1_64, Mips::S2_64, Mips::S3_64, + Mips::S4_64, Mips::S5_64, Mips::S6_64, Mips::S7_64, + Mips::T8_64, Mips::T9_64, Mips::K0_64, Mips::K1_64, + Mips::GP_64, Mips::SP_64, Mips::FP_64, Mips::RA_64 +}; + +static const unsigned FGR64RegsTable[] = { + Mips::D0_64, Mips::D1_64, Mips::D2_64, Mips::D3_64, + Mips::D4_64, Mips::D5_64, Mips::D6_64, Mips::D7_64, + Mips::D8_64, Mips::D9_64, Mips::D10_64, Mips::D11_64, + Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64, + Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64, + Mips::D20_64, Mips::D21_64, Mips::D22_64, Mips::D23_64, + Mips::D24_64, Mips::D25_64, Mips::D26_64, Mips::D27_64, + Mips::D28_64, Mips::D29_64, Mips::D30_64, Mips::D31_64 +}; + +static const unsigned AFGR64RegsTable[] = { + Mips::D0, Mips::D1, Mips::D2, Mips::D3, + Mips::D4, Mips::D5, Mips::D6, Mips::D7, + Mips::D8, Mips::D9, Mips::D10, Mips::D11, + Mips::D12, Mips::D13, Mips::D14, Mips::D15 +}; + +// Forward declare these because the autogenerated code will reference them. +// Definitions are further down. +static DecodeStatus DecodeCPU64RegsRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeCPURegsRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeFGR64RegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeFGR32RegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeCCRRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeHWRegsRegisterClass(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeAFGR64RegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeHWRegs64RegisterClass(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeBranchTarget(MCInst &Inst, + unsigned Offset, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeBC1(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder); + + +static DecodeStatus DecodeJumpTarget(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeMem(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeFMem(MCInst &Inst, unsigned Insn, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeSimm16(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeCondCode(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeInsSize(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder); + +static DecodeStatus DecodeExtSize(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder); + +namespace llvm { +extern Target TheMipselTarget, TheMipsTarget, TheMips64Target, + TheMips64elTarget; +} + +static MCDisassembler *createMipsDisassembler( + const Target &T, + const MCSubtargetInfo &STI) { + return new MipsDisassembler(STI,true); +} + +static MCDisassembler *createMipselDisassembler( + const Target &T, + const MCSubtargetInfo &STI) { + return new MipsDisassembler(STI,false); +} + +static MCDisassembler *createMips64Disassembler( + const Target &T, + const MCSubtargetInfo &STI) { + return new Mips64Disassembler(STI,true); +} + +static MCDisassembler *createMips64elDisassembler( + const Target &T, + const MCSubtargetInfo &STI) { + return new Mips64Disassembler(STI, false); +} + +extern "C" void LLVMInitializeMipsDisassembler() { + // Register the disassembler. + TargetRegistry::RegisterMCDisassembler(TheMipsTarget, + createMipsDisassembler); + TargetRegistry::RegisterMCDisassembler(TheMipselTarget, + createMipselDisassembler); + TargetRegistry::RegisterMCDisassembler(TheMips64Target, + createMips64Disassembler); + TargetRegistry::RegisterMCDisassembler(TheMips64elTarget, + createMips64elDisassembler); +} + + +#include "MipsGenDisassemblerTables.inc" + + /// readInstruction - read four bytes from the MemoryObject + /// and return 32 bit word sorted according to the given endianess +static DecodeStatus readInstruction32(const MemoryObject ®ion, + uint64_t address, + uint64_t &size, + uint32_t &insn, + bool isBigEndian) { + uint8_t Bytes[4]; + + // We want to read exactly 4 Bytes of data. + if (region.readBytes(address, 4, (uint8_t*)Bytes, NULL) == -1) { + size = 0; + return MCDisassembler::Fail; + } + + if (isBigEndian) { + // Encoded as a big-endian 32-bit word in the stream. + insn = (Bytes[3] << 0) | + (Bytes[2] << 8) | + (Bytes[1] << 16) | + (Bytes[0] << 24); + } + else { + // Encoded as a small-endian 32-bit word in the stream. + insn = (Bytes[0] << 0) | + (Bytes[1] << 8) | + (Bytes[2] << 16) | + (Bytes[3] << 24); + } + + return MCDisassembler::Success; +} + +DecodeStatus +MipsDisassembler::getInstruction(MCInst &instr, + uint64_t &Size, + const MemoryObject &Region, + uint64_t Address, + raw_ostream &vStream, + raw_ostream &cStream) const { + uint32_t Insn; + + DecodeStatus Result = readInstruction32(Region, Address, Size, + Insn, isBigEndian); + if (Result == MCDisassembler::Fail) + return MCDisassembler::Fail; + + // Calling the auto-generated decoder function. + Result = decodeMipsInstruction32(instr, Insn, Address, this, STI); + if (Result != MCDisassembler::Fail) { + Size = 4; + return Result; + } + + return MCDisassembler::Fail; +} + +DecodeStatus +Mips64Disassembler::getInstruction(MCInst &instr, + uint64_t &Size, + const MemoryObject &Region, + uint64_t Address, + raw_ostream &vStream, + raw_ostream &cStream) const { + uint32_t Insn; + + DecodeStatus Result = readInstruction32(Region, Address, Size, + Insn, isBigEndian); + if (Result == MCDisassembler::Fail) + return MCDisassembler::Fail; + + // Calling the auto-generated decoder function. + Result = decodeMips64Instruction32(instr, Insn, Address, this, STI); + if (Result != MCDisassembler::Fail) { + Size = 4; + return Result; + } + // If we fail to decode in Mips64 decoder space we can try in Mips32 + Result = decodeMipsInstruction32(instr, Insn, Address, this, STI); + if (Result != MCDisassembler::Fail) { + Size = 4; + return Result; + } + + return MCDisassembler::Fail; +} + +static DecodeStatus DecodeCPU64RegsRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { + + if (RegNo > 31) + return MCDisassembler::Fail; + + Inst.addOperand(MCOperand::CreateReg(CPU64RegsTable[RegNo])); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeCPURegsRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo > 31) + return MCDisassembler::Fail; + + Inst.addOperand(MCOperand::CreateReg(CPURegsTable[RegNo])); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeFGR64RegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo > 31) + return MCDisassembler::Fail; + + Inst.addOperand(MCOperand::CreateReg(FGR64RegsTable[RegNo])); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeFGR32RegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo > 31) + return MCDisassembler::Fail; + + Inst.addOperand(MCOperand::CreateReg(FGR32RegsTable[RegNo])); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeCCRRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { + Inst.addOperand(MCOperand::CreateReg(RegNo)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeMem(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder) { + int Offset = SignExtend32<16>(Insn & 0xffff); + int Reg = (int)fieldFromInstruction32(Insn, 16, 5); + int Base = (int)fieldFromInstruction32(Insn, 21, 5); + + if(Inst.getOpcode() == Mips::SC){ + Inst.addOperand(MCOperand::CreateReg(CPURegsTable[Reg])); + } + + Inst.addOperand(MCOperand::CreateReg(CPURegsTable[Reg])); + Inst.addOperand(MCOperand::CreateReg(CPURegsTable[Base])); + Inst.addOperand(MCOperand::CreateImm(Offset)); + + return MCDisassembler::Success; +} + +static DecodeStatus DecodeFMem(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder) { + int Offset = SignExtend32<16>(Insn & 0xffff); + int Reg = (int)fieldFromInstruction32(Insn, 16, 5); + int Base = (int)fieldFromInstruction32(Insn, 21, 5); + + Inst.addOperand(MCOperand::CreateReg(FGR64RegsTable[Reg])); + Inst.addOperand(MCOperand::CreateReg(CPURegsTable[Base])); + Inst.addOperand(MCOperand::CreateImm(Offset)); + + return MCDisassembler::Success; +} + + +static DecodeStatus DecodeHWRegsRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { + // Currently only hardware register 29 is supported. + if (RegNo != 29) + return MCDisassembler::Fail; + Inst.addOperand(MCOperand::CreateReg(Mips::HWR29)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeCondCode(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder) { + int CondCode = Insn & 0xf; + Inst.addOperand(MCOperand::CreateImm(CondCode)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeAFGR64RegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo > 31) + return MCDisassembler::Fail; + + Inst.addOperand(MCOperand::CreateReg(AFGR64RegsTable[RegNo])); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeHWRegs64RegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { + //Currently only hardware register 29 is supported + if (RegNo != 29) + return MCDisassembler::Fail; + Inst.addOperand(MCOperand::CreateReg(Mips::HWR29)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeBranchTarget(MCInst &Inst, + unsigned Offset, + uint64_t Address, + const void *Decoder) { + unsigned BranchOffset = Offset & 0xffff; + BranchOffset = SignExtend32<18>(BranchOffset << 2) + 4; + Inst.addOperand(MCOperand::CreateImm(BranchOffset)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeBC1(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder) { + unsigned BranchOffset = Insn & 0xffff; + BranchOffset = SignExtend32<18>(BranchOffset << 2) + 4; + Inst.addOperand(MCOperand::CreateImm(BranchOffset)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeJumpTarget(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder) { + + unsigned JumpOffset = fieldFromInstruction32(Insn, 0, 26) << 2; + Inst.addOperand(MCOperand::CreateImm(JumpOffset)); + return MCDisassembler::Success; +} + + +static DecodeStatus DecodeSimm16(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder) { + Inst.addOperand(MCOperand::CreateImm(SignExtend32<16>(Insn))); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeInsSize(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder) { + // First we need to grab the pos(lsb) from MCInst. + int Pos = Inst.getOperand(2).getImm(); + int Size = (int) Insn - Pos + 1; + Inst.addOperand(MCOperand::CreateImm(SignExtend32<16>(Size))); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeExtSize(MCInst &Inst, + unsigned Insn, + uint64_t Address, + const void *Decoder) { + int Size = (int) Insn + 1; + Inst.addOperand(MCOperand::CreateImm(SignExtend32<16>(Size))); + return MCDisassembler::Success; +} diff --git a/lib/Target/Mips/LLVMBuild.txt b/lib/Target/Mips/LLVMBuild.txt index abbed8c90fc8..a95d6bc1352a 100644 --- a/lib/Target/Mips/LLVMBuild.txt +++ b/lib/Target/Mips/LLVMBuild.txt @@ -16,7 +16,7 @@ ;===------------------------------------------------------------------------===; [common] -subdirectories = AsmParser InstPrinter MCTargetDesc TargetInfo +subdirectories = AsmParser Disassembler InstPrinter MCTargetDesc TargetInfo [component_0] type = TargetGroup @@ -24,6 +24,7 @@ name = Mips parent = Target has_asmparser = 1 has_asmprinter = 1 +has_disassembler = 1 has_jit = 1 [component_1] diff --git a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp index e79be3363623..9b4caf65cbff 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp +++ b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp @@ -12,7 +12,6 @@ //===----------------------------------------------------------------------===// // -#include "MipsBaseInfo.h" #include "MipsFixupKinds.h" #include "MCTargetDesc/MipsMCTargetDesc.h" #include "llvm/MC/MCAsmBackend.h" @@ -85,9 +84,8 @@ public: uint64_t Value) const { MCFixupKind Kind = Fixup.getKind(); Value = adjustFixupValue((unsigned)Kind, Value); - int64_t SymOffset = MipsGetSymAndOffset(Fixup).second; - if (!Value && !SymOffset) + if (!Value) return; // Doesn't change encoding. // Where do we start in the object @@ -118,7 +116,7 @@ public: } uint64_t Mask = ((uint64_t)(-1) >> (64 - getFixupKindInfo(Kind).TargetSize)); - CurVal |= (Value + SymOffset) & Mask; + CurVal |= Value & Mask; // Write out the fixed up bytes back to the code/data bits. for (unsigned i = 0; i != NumBytes; ++i) { diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp index 27954b174ed9..4ed2be0f430e 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp +++ b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp @@ -194,7 +194,7 @@ getMachineOpValue(const MCInst &MI, const MCOperand &MO, assert (Kind == MCExpr::SymbolRef); - Mips::Fixups FixupKind; + Mips::Fixups FixupKind = Mips::Fixups(0); switch(cast<MCSymbolRefExpr>(Expr)->getKind()) { case MCSymbolRefExpr::VK_Mips_GPREL: diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp index 3c544f6aec90..f634f082be5a 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp +++ b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp @@ -34,6 +34,38 @@ using namespace llvm; +static std::string ParseMipsTriple(StringRef TT, StringRef CPU) { + std::string MipsArchFeature; + size_t DashPosition = 0; + StringRef TheTriple; + + // Let's see if there is a dash, like mips-unknown-linux. + DashPosition = TT.find('-'); + + if (DashPosition == StringRef::npos) { + // No dash, we check the string size. + TheTriple = TT.substr(0); + } else { + // We are only interested in substring before dash. + TheTriple = TT.substr(0,DashPosition); + } + + if (TheTriple == "mips" || TheTriple == "mipsel") { + if (CPU.empty() || CPU == "mips32") { + MipsArchFeature = "+mips32"; + } else if (CPU == "mips32r2") { + MipsArchFeature = "+mips32r2"; + } + } else { + if (CPU.empty() || CPU == "mips64") { + MipsArchFeature = "+mips64"; + } else if (CPU == "mips64r2") { + MipsArchFeature = "+mips64r2"; + } + } + return MipsArchFeature; +} + static MCInstrInfo *createMipsMCInstrInfo() { MCInstrInfo *X = new MCInstrInfo(); InitMipsMCInstrInfo(X); @@ -48,8 +80,15 @@ static MCRegisterInfo *createMipsMCRegisterInfo(StringRef TT) { static MCSubtargetInfo *createMipsMCSubtargetInfo(StringRef TT, StringRef CPU, StringRef FS) { + std::string ArchFS = ParseMipsTriple(TT,CPU); + if (!FS.empty()) { + if (!ArchFS.empty()) + ArchFS = ArchFS + "," + FS.str(); + else + ArchFS = FS; + } MCSubtargetInfo *X = new MCSubtargetInfo(); - InitMipsMCSubtargetInfo(X, TT, CPU, FS); + InitMipsMCSubtargetInfo(X, TT, CPU, ArchFS); return X; } diff --git a/lib/Target/Mips/Makefile b/lib/Target/Mips/Makefile index 168635c96beb..596f07145a27 100644 --- a/lib/Target/Mips/Makefile +++ b/lib/Target/Mips/Makefile @@ -15,9 +15,9 @@ TARGET = Mips BUILT_SOURCES = MipsGenRegisterInfo.inc MipsGenInstrInfo.inc \ MipsGenAsmWriter.inc MipsGenCodeEmitter.inc \ MipsGenDAGISel.inc MipsGenCallingConv.inc \ - MipsGenSubtargetInfo.inc MipsGenMCCodeEmitter.inc - -DIRS = InstPrinter AsmParser TargetInfo MCTargetDesc + MipsGenSubtargetInfo.inc MipsGenMCCodeEmitter.inc \ + MipsGenEDInfo.inc MipsGenDisassemblerTables.inc +DIRS = InstPrinter Disassembler AsmParser TargetInfo MCTargetDesc include $(LEVEL)/Makefile.common diff --git a/lib/Target/Mips/Mips64InstrInfo.td b/lib/Target/Mips/Mips64InstrInfo.td index 427e8d97ad9c..0382869255f4 100644 --- a/lib/Target/Mips/Mips64InstrInfo.td +++ b/lib/Target/Mips/Mips64InstrInfo.td @@ -36,6 +36,7 @@ def immZExt6 : ImmLeaf<i32, [{return Imm == (Imm & 0x3f);}]>; //===----------------------------------------------------------------------===// // Shifts // 64-bit shift instructions. +let DecoderNamespace = "Mips64" in { class shift_rotate_imm64<bits<6> func, bits<5> isRotate, string instr_asm, SDNode OpNode>: shift_rotate_imm<func, isRotate, instr_asm, OpNode, immZExt6, shamt, @@ -49,16 +50,21 @@ class Div64<SDNode op, bits<6> func, string instr_asm, InstrItinClass itin>: multiclass Atomic2Ops64<PatFrag Op, string Opstr> { def #NAME# : Atomic2Ops<Op, Opstr, CPU64Regs, CPURegs>, Requires<[NotN64]>; - def _P8 : Atomic2Ops<Op, Opstr, CPU64Regs, CPU64Regs>, Requires<[IsN64]>; + def _P8 : Atomic2Ops<Op, Opstr, CPU64Regs, CPU64Regs>, Requires<[IsN64]> { + let isCodeGenOnly = 1; + } } multiclass AtomicCmpSwap64<PatFrag Op, string Width> { def #NAME# : AtomicCmpSwap<Op, Width, CPU64Regs, CPURegs>, Requires<[NotN64]>; def _P8 : AtomicCmpSwap<Op, Width, CPU64Regs, CPU64Regs>, - Requires<[IsN64]>; + Requires<[IsN64]> { + let isCodeGenOnly = 1; + } } - -let usesCustomInserter = 1, Predicates = [HasMips64] in { +} +let usesCustomInserter = 1, Predicates = [HasMips64], + DecoderNamespace = "Mips64" in { defm ATOMIC_LOAD_ADD_I64 : Atomic2Ops64<atomic_load_add_64, "load_add_64">; defm ATOMIC_LOAD_SUB_I64 : Atomic2Ops64<atomic_load_sub_64, "load_sub_64">; defm ATOMIC_LOAD_AND_I64 : Atomic2Ops64<atomic_load_and_64, "load_and_64">; @@ -72,7 +78,7 @@ let usesCustomInserter = 1, Predicates = [HasMips64] in { //===----------------------------------------------------------------------===// // Instruction definition //===----------------------------------------------------------------------===// - +let DecoderNamespace = "Mips64" in { /// Arithmetic Instructions (ALU Immediate) def DADDiu : ArithLogicI<0x19, "daddiu", add, simm16_64, immSExt16, CPU64Regs>; @@ -97,16 +103,17 @@ def NOR64 : LogicNOR<0x00, 0x27, "nor", CPU64Regs>; def DSLL : shift_rotate_imm64<0x38, 0x00, "dsll", shl>; def DSRL : shift_rotate_imm64<0x3a, 0x00, "dsrl", srl>; def DSRA : shift_rotate_imm64<0x3b, 0x00, "dsra", sra>; -def DSLLV : shift_rotate_reg<0x24, 0x00, "dsllv", shl, CPU64Regs>; -def DSRLV : shift_rotate_reg<0x26, 0x00, "dsrlv", srl, CPU64Regs>; -def DSRAV : shift_rotate_reg<0x27, 0x00, "dsrav", sra, CPU64Regs>; - +def DSLLV : shift_rotate_reg<0x14, 0x00, "dsllv", shl, CPU64Regs>; +def DSRLV : shift_rotate_reg<0x16, 0x00, "dsrlv", srl, CPU64Regs>; +def DSRAV : shift_rotate_reg<0x17, 0x00, "dsrav", sra, CPU64Regs>; +} // Rotate Instructions -let Predicates = [HasMips64r2] in { +let Predicates = [HasMips64r2], DecoderNamespace = "Mips64" in { def DROTR : shift_rotate_imm64<0x3a, 0x01, "drotr", rotr>; def DROTRV : shift_rotate_reg<0x16, 0x01, "drotrv", rotr, CPU64Regs>; } +let DecoderNamespace = "Mips64" in { /// Load and Store Instructions /// aligned defm LB64 : LoadM64<0x20, "lb", sextloadi8>; @@ -132,9 +139,13 @@ defm USD : StoreM64<0x3f, "usd", store_u, 1>; /// Load-linked, Store-conditional def LLD : LLBase<0x34, "lld", CPU64Regs, mem>, Requires<[NotN64]>; -def LLD_P8 : LLBase<0x34, "lld", CPU64Regs, mem64>, Requires<[IsN64]>; +def LLD_P8 : LLBase<0x34, "lld", CPU64Regs, mem64>, Requires<[IsN64]> { + let isCodeGenOnly = 1; +} def SCD : SCBase<0x3c, "scd", CPU64Regs, mem>, Requires<[NotN64]>; -def SCD_P8 : SCBase<0x3c, "scd", CPU64Regs, mem64>, Requires<[IsN64]>; +def SCD_P8 : SCBase<0x3c, "scd", CPU64Regs, mem64>, Requires<[IsN64]> { + let isCodeGenOnly = 1; +} /// Jump and Branch Instructions def JR64 : JumpFR<0x00, 0x08, "jr", CPU64Regs>; @@ -142,11 +153,13 @@ def BEQ64 : CBranch<0x04, "beq", seteq, CPU64Regs>; def BNE64 : CBranch<0x05, "bne", setne, CPU64Regs>; def BGEZ64 : CBranchZero<0x01, 1, "bgez", setge, CPU64Regs>; def BGTZ64 : CBranchZero<0x07, 0, "bgtz", setgt, CPU64Regs>; -def BLEZ64 : CBranchZero<0x07, 0, "blez", setle, CPU64Regs>; +def BLEZ64 : CBranchZero<0x06, 0, "blez", setle, CPU64Regs>; def BLTZ64 : CBranchZero<0x01, 0, "bltz", setlt, CPU64Regs>; - +} +let DecoderNamespace = "Mips64" in def JALR64 : JumpLinkReg<0x00, 0x09, "jalr", CPU64Regs>; +let DecoderNamespace = "Mips64" in { /// Multiply and Divide Instructions. def DMULT : Mult64<0x1c, "dmult", IIImul>; def DMULTu : Mult64<0x1d, "dmultu", IIImul>; @@ -171,11 +184,13 @@ def DSBH : SubwordSwap<0x24, 0x2, "dsbh", CPU64Regs>; def DSHD : SubwordSwap<0x24, 0x5, "dshd", CPU64Regs>; def LEA_ADDiu64 : EffectiveAddress<"daddiu\t$rt, $addr", CPU64Regs, mem_ea_64>; - -let Uses = [SP_64] in +} +let Uses = [SP_64], DecoderNamespace = "Mips64" in def DynAlloc64 : EffectiveAddress<"daddiu\t$rt, $addr", CPU64Regs, mem_ea_64>, - Requires<[IsN64]>; - + Requires<[IsN64]> { + let isCodeGenOnly = 1; +} +let DecoderNamespace = "Mips64" in { def RDHWR64 : ReadHardware<CPU64Regs, HWRegs64>; def DEXT : ExtBase<3, "dext", CPU64Regs>; @@ -183,12 +198,12 @@ def DINS : InsBase<7, "dins", CPU64Regs>; def DSLL64_32 : FR<0x3c, 0x00, (outs CPU64Regs:$rd), (ins CPURegs:$rt), "dsll\t$rd, $rt, 32", [], IIAlu>; - def SLL64_32 : FR<0x0, 0x00, (outs CPU64Regs:$rd), (ins CPURegs:$rt), "sll\t$rd, $rt, 0", [], IIAlu>; +let isCodeGenOnly = 1 in def SLL64_64 : FR<0x0, 0x00, (outs CPU64Regs:$rd), (ins CPU64Regs:$rt), "sll\t$rd, $rt, 0", [], IIAlu>; - +} //===----------------------------------------------------------------------===// // Arbitrary patterns that map to one or more instructions //===----------------------------------------------------------------------===// diff --git a/lib/Target/Mips/MipsCondMov.td b/lib/Target/Mips/MipsCondMov.td index 075a3e807b1f..da336804e510 100644 --- a/lib/Target/Mips/MipsCondMov.td +++ b/lib/Target/Mips/MipsCondMov.td @@ -95,45 +95,65 @@ multiclass MovnPats<RegisterClass CRC, RegisterClass DRC, Instruction MOVNInst, // Instantiation of instructions. def MOVZ_I_I : CondMovIntInt<CPURegs, CPURegs, 0x0a, "movz">; -let Predicates = [HasMips64] in { +let Predicates = [HasMips64],DecoderNamespace = "Mips64" in { def MOVZ_I_I64 : CondMovIntInt<CPURegs, CPU64Regs, 0x0a, "movz">; - def MOVZ_I64_I : CondMovIntInt<CPU64Regs, CPURegs, 0x0a, "movz">; - def MOVZ_I64_I64 : CondMovIntInt<CPU64Regs, CPU64Regs, 0x0a, "movz">; + def MOVZ_I64_I : CondMovIntInt<CPU64Regs, CPURegs, 0x0a, "movz"> { + let isCodeGenOnly = 1; + } + def MOVZ_I64_I64 : CondMovIntInt<CPU64Regs, CPU64Regs, 0x0a, "movz"> { + let isCodeGenOnly = 1; + } } def MOVN_I_I : CondMovIntInt<CPURegs, CPURegs, 0x0b, "movn">; -let Predicates = [HasMips64] in { +let Predicates = [HasMips64],DecoderNamespace = "Mips64" in { def MOVN_I_I64 : CondMovIntInt<CPURegs, CPU64Regs, 0x0b, "movn">; - def MOVN_I64_I : CondMovIntInt<CPU64Regs, CPURegs, 0x0b, "movn">; - def MOVN_I64_I64 : CondMovIntInt<CPU64Regs, CPU64Regs, 0x0b, "movn">; + def MOVN_I64_I : CondMovIntInt<CPU64Regs, CPURegs, 0x0b, "movn"> { + let isCodeGenOnly = 1; + } + def MOVN_I64_I64 : CondMovIntInt<CPU64Regs, CPU64Regs, 0x0b, "movn"> { + let isCodeGenOnly = 1; + } } def MOVZ_I_S : CondMovIntFP<CPURegs, FGR32, 16, 18, "movz.s">; def MOVZ_I64_S : CondMovIntFP<CPU64Regs, FGR32, 16, 18, "movz.s">, - Requires<[HasMips64]>; + Requires<[HasMips64]> { + let DecoderNamespace = "Mips64"; +} def MOVN_I_S : CondMovIntFP<CPURegs, FGR32, 16, 19, "movn.s">; def MOVN_I64_S : CondMovIntFP<CPU64Regs, FGR32, 16, 19, "movn.s">, - Requires<[HasMips64]>; + Requires<[HasMips64]> { + let DecoderNamespace = "Mips64"; +} let Predicates = [NotFP64bit] in { def MOVZ_I_D32 : CondMovIntFP<CPURegs, AFGR64, 17, 18, "movz.d">; def MOVN_I_D32 : CondMovIntFP<CPURegs, AFGR64, 17, 19, "movn.d">; } -let Predicates = [IsFP64bit] in { +let Predicates = [IsFP64bit],DecoderNamespace = "Mips64" in { def MOVZ_I_D64 : CondMovIntFP<CPURegs, FGR64, 17, 18, "movz.d">; - def MOVZ_I64_D64 : CondMovIntFP<CPU64Regs, FGR64, 17, 18, "movz.d">; + def MOVZ_I64_D64 : CondMovIntFP<CPU64Regs, FGR64, 17, 18, "movz.d"> { + let isCodeGenOnly = 1; + } def MOVN_I_D64 : CondMovIntFP<CPURegs, FGR64, 17, 19, "movn.d">; - def MOVN_I64_D64 : CondMovIntFP<CPU64Regs, FGR64, 17, 19, "movn.d">; + def MOVN_I64_D64 : CondMovIntFP<CPU64Regs, FGR64, 17, 19, "movn.d"> { + let isCodeGenOnly = 1; + } } def MOVT_I : CondMovFPInt<CPURegs, MipsCMovFP_T, 1, "movt">; def MOVT_I64 : CondMovFPInt<CPU64Regs, MipsCMovFP_T, 1, "movt">, - Requires<[HasMips64]>; + Requires<[HasMips64]> { + let DecoderNamespace = "Mips64"; +} def MOVF_I : CondMovFPInt<CPURegs, MipsCMovFP_F, 0, "movf">; def MOVF_I64 : CondMovFPInt<CPU64Regs, MipsCMovFP_F, 0, "movf">, - Requires<[HasMips64]>; + Requires<[HasMips64]> { + let DecoderNamespace = "Mips64"; +} def MOVT_S : CondMovFPFP<FGR32, MipsCMovFP_T, 16, 1, "movt.s">; def MOVF_S : CondMovFPFP<FGR32, MipsCMovFP_F, 16, 0, "movf.s">; @@ -142,7 +162,7 @@ let Predicates = [NotFP64bit] in { def MOVT_D32 : CondMovFPFP<AFGR64, MipsCMovFP_T, 17, 1, "movt.d">; def MOVF_D32 : CondMovFPFP<AFGR64, MipsCMovFP_F, 17, 0, "movf.d">; } -let Predicates = [IsFP64bit] in { +let Predicates = [IsFP64bit], DecoderNamespace = "Mips64" in { def MOVT_D64 : CondMovFPFP<FGR64, MipsCMovFP_T, 17, 1, "movt.d">; def MOVF_D64 : CondMovFPFP<FGR64, MipsCMovFP_F, 17, 0, "movf.d">; } diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp index 6a23bc3d1d7c..ace47ab07910 100644 --- a/lib/Target/Mips/MipsISelLowering.cpp +++ b/lib/Target/Mips/MipsISelLowering.cpp @@ -34,6 +34,8 @@ #include "llvm/CodeGen/ValueTypes.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" + using namespace llvm; // If I is a shifted mask, set the size (Size) and the first bit of the diff --git a/lib/Target/Mips/MipsInstrFPU.td b/lib/Target/Mips/MipsInstrFPU.td index b6559452fecf..14d8f1e7ba94 100644 --- a/lib/Target/Mips/MipsInstrFPU.td +++ b/lib/Target/Mips/MipsInstrFPU.td @@ -47,17 +47,17 @@ def MipsExtractElementF64 : SDNode<"MipsISD::ExtractElementF64", SDT_MipsExtractElementF64>; // Operand for printing out a condition code. -let PrintMethod = "printFCCOperand" in +let PrintMethod = "printFCCOperand", DecoderMethod = "DecodeCondCode" in def condcode : Operand<i32>; //===----------------------------------------------------------------------===// // Feature predicates. //===----------------------------------------------------------------------===// -def IsFP64bit : Predicate<"Subtarget.isFP64bit()">; -def NotFP64bit : Predicate<"!Subtarget.isFP64bit()">; -def IsSingleFloat : Predicate<"Subtarget.isSingleFloat()">; -def IsNotSingleFloat : Predicate<"!Subtarget.isSingleFloat()">; +def IsFP64bit : Predicate<"Subtarget.isFP64bit()">, AssemblerPredicate<"FeatureFP64Bit">; +def NotFP64bit : Predicate<"!Subtarget.isFP64bit()">, AssemblerPredicate<"!FeatureFP64Bit">; +def IsSingleFloat : Predicate<"Subtarget.isSingleFloat()">, AssemblerPredicate<"FeatureSingleFloat">; +def IsNotSingleFloat : Predicate<"!Subtarget.isSingleFloat()">, AssemblerPredicate<"!FeatureSingleFloat">; // FP immediate patterns. def fpimm0 : PatLeaf<(fpimm), [{ @@ -83,6 +83,7 @@ def fpimm0neg : PatLeaf<(fpimm), [{ //===----------------------------------------------------------------------===// // FP load. +let DecoderMethod = "DecodeFMem" in { class FPLoad<bits<6> op, string opstr, RegisterClass RC, Operand MemOpnd>: FMem<op, (outs RC:$ft), (ins MemOpnd:$addr), !strconcat(opstr, "\t$ft, $addr"), [(set RC:$ft, (load_a addr:$addr))], @@ -93,7 +94,7 @@ class FPStore<bits<6> op, string opstr, RegisterClass RC, Operand MemOpnd>: FMem<op, (outs), (ins RC:$ft, MemOpnd:$addr), !strconcat(opstr, "\t$ft, $addr"), [(store_a RC:$ft, addr:$addr)], IIStore>; - +} // FP indexed load. class FPIdxLoad<bits<6> funct, string opstr, RegisterClass DRC, RegisterClass PRC, PatFrag FOp>: @@ -118,11 +119,13 @@ multiclass FFR1_W_M<bits<6> funct, string opstr> { def _D32 : FFR1<funct, 17, opstr, "w.d", FGR32, AFGR64>, Requires<[NotFP64bit]>; def _D64 : FFR1<funct, 17, opstr, "w.d", FGR32, FGR64>, - Requires<[IsFP64bit]>; + Requires<[IsFP64bit]> { + let DecoderNamespace = "Mips64"; + } } // Instructions that convert an FP value to 64-bit fixed point. -let Predicates = [IsFP64bit] in +let Predicates = [IsFP64bit], DecoderNamespace = "Mips64" in multiclass FFR1_L_M<bits<6> funct, string opstr> { def _S : FFR1<funct, 16, opstr, "l.s", FGR64, FGR32>; def _D64 : FFR1<funct, 17, opstr, "l.d", FGR64, FGR64>; @@ -134,7 +137,9 @@ multiclass FFR1P_M<bits<6> funct, string opstr, SDNode OpNode> { def _D32 : FFR1P<funct, 17, opstr, "d", AFGR64, AFGR64, OpNode>, Requires<[NotFP64bit]>; def _D64 : FFR1P<funct, 17, opstr, "d", FGR64, FGR64, OpNode>, - Requires<[IsFP64bit]>; + Requires<[IsFP64bit]> { + let DecoderNamespace = "Mips64"; + } } multiclass FFR2P_M<bits<6> funct, string opstr, SDNode OpNode, bit isComm = 0> { @@ -143,9 +148,11 @@ multiclass FFR2P_M<bits<6> funct, string opstr, SDNode OpNode, bit isComm = 0> { def _D32 : FFR2P<funct, 17, opstr, "d", AFGR64, OpNode>, Requires<[NotFP64bit]>; def _D64 : FFR2P<funct, 17, opstr, "d", FGR64, OpNode>, - Requires<[IsFP64bit]>; + Requires<[IsFP64bit]> { + let DecoderNamespace = "Mips64"; } } +} // FP madd/msub/nmadd/nmsub instruction classes. class FMADDSUB<bits<3> funct, bits<3> fmt, string opstr, string fmtstr, @@ -172,9 +179,11 @@ defm CEIL_L : FFR1_L_M<0xa, "ceil">; defm FLOOR_W : FFR1_W_M<0xf, "floor">; defm FLOOR_L : FFR1_L_M<0xb, "floor">; defm CVT_W : FFR1_W_M<0x24, "cvt">; -defm CVT_L : FFR1_L_M<0x25, "cvt">; +//defm CVT_L : FFR1_L_M<0x25, "cvt">; def CVT_S_W : FFR1<0x20, 20, "cvt", "s.w", FGR32, FGR32>; +def CVT_L_S : FFR1<0x25, 16, "cvt", "l.s", FGR64, FGR32>; +def CVT_L_D64: FFR1<0x25, 17, "cvt", "l.d", FGR64, FGR64>; let Predicates = [NotFP64bit] in { def CVT_S_D32 : FFR1<0x20, 17, "cvt", "s.d", FGR32, AFGR64>; @@ -182,7 +191,7 @@ let Predicates = [NotFP64bit] in { def CVT_D32_S : FFR1<0x21, 16, "cvt", "d.s", AFGR64, FGR32>; } -let Predicates = [IsFP64bit] in { +let Predicates = [IsFP64bit], DecoderNamespace = "Mips64" in { def CVT_S_D64 : FFR1<0x20, 17, "cvt", "s.d", FGR32, FGR64>; def CVT_S_L : FFR1<0x20, 21, "cvt", "s.l", FGR32, FGR64>; def CVT_D64_W : FFR1<0x21, 20, "cvt", "d.w", FGR64, FGR32>; @@ -235,14 +244,20 @@ def FMOV_S : FFR1<0x6, 16, "mov", "s", FGR32, FGR32>; def FMOV_D32 : FFR1<0x6, 17, "mov", "d", AFGR64, AFGR64>, Requires<[NotFP64bit]>; def FMOV_D64 : FFR1<0x6, 17, "mov", "d", FGR64, FGR64>, - Requires<[IsFP64bit]>; + Requires<[IsFP64bit]> { + let DecoderNamespace = "Mips64"; +} /// Floating Point Memory Instructions -let Predicates = [IsN64] in { +let Predicates = [IsN64], DecoderNamespace = "Mips64" in { def LWC1_P8 : FPLoad<0x31, "lwc1", FGR32, mem64>; def SWC1_P8 : FPStore<0x39, "swc1", FGR32, mem64>; - def LDC164_P8 : FPLoad<0x35, "ldc1", FGR64, mem64>; - def SDC164_P8 : FPStore<0x3d, "sdc1", FGR64, mem64>; + def LDC164_P8 : FPLoad<0x35, "ldc1", FGR64, mem64> { + let isCodeGenOnly =1; + } + def SDC164_P8 : FPStore<0x3d, "sdc1", FGR64, mem64> { + let isCodeGenOnly =1; + } } let Predicates = [NotN64] in { @@ -250,7 +265,7 @@ let Predicates = [NotN64] in { def SWC1 : FPStore<0x39, "swc1", FGR32, mem>; } -let Predicates = [NotN64, HasMips64] in { +let Predicates = [NotN64, HasMips64], DecoderNamespace = "Mips64" in { def LDC164 : FPLoad<0x35, "ldc1", FGR64, mem>; def SDC164 : FPStore<0x3d, "sdc1", FGR64, mem>; } @@ -273,13 +288,13 @@ let Predicates = [HasMips32r2, NotMips64] in { def SDXC1 : FPIdxStore<0x9, "sdxc1", AFGR64, CPURegs, store_a>; } -let Predicates = [HasMips64, NotN64] in { +let Predicates = [HasMips64, NotN64], DecoderNamespace="Mips64" in { def LDXC164 : FPIdxLoad<0x1, "ldxc1", FGR64, CPURegs, load_a>; def SDXC164 : FPIdxStore<0x9, "sdxc1", FGR64, CPURegs, store_a>; } // n64 -let Predicates = [IsN64] in { +let Predicates = [IsN64], isCodeGenOnly=1 in { def LWXC1_P8 : FPIdxLoad<0x0, "lwxc1", FGR32, CPU64Regs, load_a>; def LUXC1_P8 : FPIdxLoad<0x5, "luxc1", FGR32, CPU64Regs, load_u>; def LDXC164_P8 : FPIdxLoad<0x1, "ldxc1", FGR64, CPU64Regs, load_a>; @@ -314,12 +329,12 @@ let Predicates = [HasMips32r2, NotFP64bit, NoNaNsFPMath] in { def NMSUB_D32 : FNMADDSUB<0x7, 1, "nmsub", "d", fsub, AFGR64>; } -let Predicates = [HasMips32r2, IsFP64bit] in { +let Predicates = [HasMips32r2, IsFP64bit], isCodeGenOnly=1 in { def MADD_D64 : FMADDSUB<0x4, 1, "madd", "d", fadd, FGR64>; def MSUB_D64 : FMADDSUB<0x5, 1, "msub", "d", fsub, FGR64>; } -let Predicates = [HasMips32r2, IsFP64bit, NoNaNsFPMath] in { +let Predicates = [HasMips32r2, IsFP64bit, NoNaNsFPMath], isCodeGenOnly=1 in { def NMADD_D64 : FNMADDSUB<0x6, 1, "nmadd", "d", fadd, FGR64>; def NMSUB_D64 : FNMADDSUB<0x7, 1, "nmsub", "d", fsub, FGR64>; } @@ -342,9 +357,10 @@ let isBranch=1, isTerminator=1, hasDelaySlot=1, base=0x8, Uses=[FCR31] in let Inst{16} = tf; } +let DecoderMethod = "DecodeBC1" in { def BC1F : FBRANCH<0, 0, MIPS_BRANCH_F, "bc1f">; def BC1T : FBRANCH<0, 1, MIPS_BRANCH_T, "bc1t">; - +} //===----------------------------------------------------------------------===// // Floating Point Flag Conditions //===----------------------------------------------------------------------===// @@ -376,7 +392,9 @@ class FCMP<bits<5> fmt, RegisterClass RC, string typestr> : let Defs=[FCR31] in { def FCMP_S32 : FCMP<0x10, FGR32, "s">; def FCMP_D32 : FCMP<0x11, AFGR64, "d">, Requires<[NotFP64bit]>; - def FCMP_D64 : FCMP<0x11, FGR64, "d">, Requires<[IsFP64bit]>; + def FCMP_D64 : FCMP<0x11, FGR64, "d">, Requires<[IsFP64bit]> { + let DecoderNamespace = "Mips64"; + } } //===----------------------------------------------------------------------===// @@ -438,13 +456,13 @@ let Predicates = [IsFP64bit] in { // Patterns for unaligned floating point loads and stores. let Predicates = [HasMips32r2Or64, NotN64] in { - def : Pat<(f32 (load_u CPURegs:$addr)), (LUXC1 CPURegs:$addr, ZERO)>; + def : Pat<(f32 (load_u CPURegs:$addr)), (LUXC1 CPURegs:$addr, ZERO)>; def : Pat<(store_u FGR32:$src, CPURegs:$addr), (SUXC1 FGR32:$src, CPURegs:$addr, ZERO)>; } let Predicates = [IsN64] in { - def : Pat<(f32 (load_u CPU64Regs:$addr)), (LUXC1_P8 CPU64Regs:$addr, ZERO_64)>; + def : Pat<(f32 (load_u CPU64Regs:$addr)), (LUXC1_P8 CPU64Regs:$addr, ZERO_64)>; def : Pat<(store_u FGR32:$src, CPU64Regs:$addr), (SUXC1_P8 FGR32:$src, CPU64Regs:$addr, ZERO_64)>; } diff --git a/lib/Target/Mips/MipsInstrFormats.td b/lib/Target/Mips/MipsInstrFormats.td index 455530389eba..841eba0ec0a2 100644 --- a/lib/Target/Mips/MipsInstrFormats.td +++ b/lib/Target/Mips/MipsInstrFormats.td @@ -45,6 +45,8 @@ class MipsInst<dag outs, dag ins, string asmstr, list<dag> pattern, let Namespace = "Mips"; + let Size = 4; + bits<6> Opcode = 0; // Top 6 bits are the 'opcode' field @@ -64,6 +66,10 @@ class MipsInst<dag outs, dag ins, string asmstr, list<dag> pattern, // TSFlags layout should be kept in sync with MipsInstrInfo.h. let TSFlags{3-0} = FormBits; + + let DecoderNamespace = "Mips"; + + field bits<32> SoftFail = 0; } // Mips Pseudo Instructions Format diff --git a/lib/Target/Mips/MipsInstrInfo.td b/lib/Target/Mips/MipsInstrInfo.td index be74f8e5230c..873d2bd99aed 100644 --- a/lib/Target/Mips/MipsInstrInfo.td +++ b/lib/Target/Mips/MipsInstrInfo.td @@ -121,21 +121,36 @@ def MipsIns : SDNode<"MipsISD::Ins", SDT_Ins>; //===----------------------------------------------------------------------===// // Mips Instruction Predicate Definitions. //===----------------------------------------------------------------------===// -def HasSEInReg : Predicate<"Subtarget.hasSEInReg()">; -def HasBitCount : Predicate<"Subtarget.hasBitCount()">; -def HasSwap : Predicate<"Subtarget.hasSwap()">; -def HasCondMov : Predicate<"Subtarget.hasCondMov()">; -def HasMips32 : Predicate<"Subtarget.hasMips32()">; -def HasMips32r2 : Predicate<"Subtarget.hasMips32r2()">; -def HasMips64 : Predicate<"Subtarget.hasMips64()">; -def HasMips32r2Or64 : Predicate<"Subtarget.hasMips32r2Or64()">; -def NotMips64 : Predicate<"!Subtarget.hasMips64()">; -def HasMips64r2 : Predicate<"Subtarget.hasMips64r2()">; -def IsN64 : Predicate<"Subtarget.isABI_N64()">; -def NotN64 : Predicate<"!Subtarget.isABI_N64()">; -def RelocStatic : Predicate<"TM.getRelocationModel() == Reloc::Static">; -def RelocPIC : Predicate<"TM.getRelocationModel() == Reloc::PIC_">; -def NoNaNsFPMath : Predicate<"TM.Options.NoNaNsFPMath">; +def HasSEInReg : Predicate<"Subtarget.hasSEInReg()">, + AssemblerPredicate<"FeatureSEInReg">; +def HasBitCount : Predicate<"Subtarget.hasBitCount()">, + AssemblerPredicate<"FeatureBitCount">; +def HasSwap : Predicate<"Subtarget.hasSwap()">, + AssemblerPredicate<"FeatureSwap">; +def HasCondMov : Predicate<"Subtarget.hasCondMov()">, + AssemblerPredicate<"FeatureCondMov">; +def HasMips32 : Predicate<"Subtarget.hasMips32()">, + AssemblerPredicate<"FeatureMips32">; +def HasMips32r2 : Predicate<"Subtarget.hasMips32r2()">, + AssemblerPredicate<"FeatureMips32r2">; +def HasMips64 : Predicate<"Subtarget.hasMips64()">, + AssemblerPredicate<"FeatureMips64">; +def HasMips32r2Or64 : Predicate<"Subtarget.hasMips32r2Or64()">, + AssemblerPredicate<"FeatureMips32r2,FeatureMips64">; +def NotMips64 : Predicate<"!Subtarget.hasMips64()">, + AssemblerPredicate<"!FeatureMips64">; +def HasMips64r2 : Predicate<"Subtarget.hasMips64r2()">, + AssemblerPredicate<"FeatureMips64r2">; +def IsN64 : Predicate<"Subtarget.isABI_N64()">, + AssemblerPredicate<"FeatureN64">; +def NotN64 : Predicate<"!Subtarget.isABI_N64()">, + AssemblerPredicate<"!FeatureN64">; +def RelocStatic : Predicate<"TM.getRelocationModel() == Reloc::Static">, + AssemblerPredicate<"FeatureMips32">; +def RelocPIC : Predicate<"TM.getRelocationModel() == Reloc::PIC_">, + AssemblerPredicate<"FeatureMips32">; +def NoNaNsFPMath : Predicate<"TM.Options.NoNaNsFPMath">, + AssemblerPredicate<"FeatureMips32">; //===----------------------------------------------------------------------===// // Mips Operand, Complex Patterns and Transformations Definitions. @@ -148,12 +163,15 @@ def jmptarget : Operand<OtherVT> { def brtarget : Operand<OtherVT> { let EncoderMethod = "getBranchTargetOpValue"; let OperandType = "OPERAND_PCREL"; + let DecoderMethod = "DecodeBranchTarget"; } def calltarget : Operand<iPTR> { let EncoderMethod = "getJumpTargetOpValue"; } def calltarget64: Operand<i64>; -def simm16 : Operand<i32>; +def simm16 : Operand<i32> { + let DecoderMethod= "DecodeSimm16"; +} def simm16_64 : Operand<i64>; def shamt : Operand<i32>; @@ -189,11 +207,13 @@ def mem_ea_64 : Operand<i64> { // size operand of ext instruction def size_ext : Operand<i32> { let EncoderMethod = "getSizeExtEncoding"; + let DecoderMethod = "DecodeExtSize"; } // size operand of ins instruction def size_ins : Operand<i32> { let EncoderMethod = "getSizeInsEncoding"; + let DecoderMethod = "DecodeInsSize"; } // Transformation Function - get the lower 16 bits. @@ -295,6 +315,7 @@ class ArithLogicR<bits<6> op, bits<6> func, string instr_asm, SDNode OpNode, [(set RC:$rd, (OpNode RC:$rs, RC:$rt))], itin> { let shamt = 0; let isCommutable = isComm; + let isReMaterializable = 1; } class ArithOverflowR<bits<6> op, bits<6> func, string instr_asm, @@ -310,7 +331,9 @@ class ArithLogicI<bits<6> op, string instr_asm, SDNode OpNode, Operand Od, PatLeaf imm_type, RegisterClass RC> : FI<op, (outs RC:$rt), (ins RC:$rs, Od:$imm16), !strconcat(instr_asm, "\t$rt, $rs, $imm16"), - [(set RC:$rt, (OpNode RC:$rs, imm_type:$imm16))], IIAlu>; + [(set RC:$rt, (OpNode RC:$rs, imm_type:$imm16))], IIAlu> { + let isReMaterializable = 1; +} class ArithOverflowI<bits<6> op, string instr_asm, SDNode OpNode, Operand Od, PatLeaf imm_type, RegisterClass RC> : @@ -366,6 +389,7 @@ class LoadUpper<bits<6> op, string instr_asm, RegisterClass RC, Operand Imm>: !strconcat(instr_asm, "\t$rt, $imm16"), [], IIAlu> { let rs = 0; let neverHasSideEffects = 1; + let isReMaterializable = 1; } class FMem<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern, @@ -373,6 +397,7 @@ class FMem<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern, bits<21> addr; let Inst{25-21} = addr{20-16}; let Inst{15-0} = addr{15-0}; + let DecoderMethod = "DecodeMem"; } // Memory Load/Store @@ -407,7 +432,10 @@ multiclass LoadM32<bits<6> op, string instr_asm, PatFrag OpNode, def #NAME# : LoadM<op, instr_asm, OpNode, CPURegs, mem, Pseudo>, Requires<[NotN64]>; def _P8 : LoadM<op, instr_asm, OpNode, CPURegs, mem64, Pseudo>, - Requires<[IsN64]>; + Requires<[IsN64]> { + let DecoderNamespace = "Mips64"; + let isCodeGenOnly = 1; + } } // 64-bit load. @@ -416,7 +444,10 @@ multiclass LoadM64<bits<6> op, string instr_asm, PatFrag OpNode, def #NAME# : LoadM<op, instr_asm, OpNode, CPU64Regs, mem, Pseudo>, Requires<[NotN64]>; def _P8 : LoadM<op, instr_asm, OpNode, CPU64Regs, mem64, Pseudo>, - Requires<[IsN64]>; + Requires<[IsN64]> { + let DecoderNamespace = "Mips64"; + let isCodeGenOnly = 1; + } } // 32-bit load. @@ -424,7 +455,10 @@ multiclass LoadUnAlign32<bits<6> op> { def #NAME# : LoadUnAlign<op, CPURegs, mem>, Requires<[NotN64]>; def _P8 : LoadUnAlign<op, CPURegs, mem64>, - Requires<[IsN64]>; + Requires<[IsN64]> { + let DecoderNamespace = "Mips64"; + let isCodeGenOnly = 1; + } } // 32-bit store. multiclass StoreM32<bits<6> op, string instr_asm, PatFrag OpNode, @@ -432,7 +466,10 @@ multiclass StoreM32<bits<6> op, string instr_asm, PatFrag OpNode, def #NAME# : StoreM<op, instr_asm, OpNode, CPURegs, mem, Pseudo>, Requires<[NotN64]>; def _P8 : StoreM<op, instr_asm, OpNode, CPURegs, mem64, Pseudo>, - Requires<[IsN64]>; + Requires<[IsN64]> { + let DecoderNamespace = "Mips64"; + let isCodeGenOnly = 1; + } } // 64-bit store. @@ -441,7 +478,10 @@ multiclass StoreM64<bits<6> op, string instr_asm, PatFrag OpNode, def #NAME# : StoreM<op, instr_asm, OpNode, CPU64Regs, mem, Pseudo>, Requires<[NotN64]>; def _P8 : StoreM<op, instr_asm, OpNode, CPU64Regs, mem64, Pseudo>, - Requires<[IsN64]>; + Requires<[IsN64]> { + let DecoderNamespace = "Mips64"; + let isCodeGenOnly = 1; + } } // 32-bit store. @@ -449,7 +489,10 @@ multiclass StoreUnAlign32<bits<6> op> { def #NAME# : StoreUnAlign<op, CPURegs, mem>, Requires<[NotN64]>; def _P8 : StoreUnAlign<op, CPURegs, mem64>, - Requires<[IsN64]>; + Requires<[IsN64]> { + let DecoderNamespace = "Mips64"; + let isCodeGenOnly = 1; + } } // Conditional Branch @@ -499,6 +542,7 @@ class JumpFJ<bits<6> op, string instr_asm>: let isBarrier=1; let hasDelaySlot = 1; let Predicates = [RelocStatic]; + let DecoderMethod = "DecodeJumpTarget"; } // Unconditional branch @@ -529,7 +573,9 @@ let isCall=1, hasDelaySlot=1 in { class JumpLink<bits<6> op, string instr_asm>: FJ<op, (outs), (ins calltarget:$target, variable_ops), !strconcat(instr_asm, "\t$target"), [(MipsJmpLink imm:$target)], - IIBranch>; + IIBranch> { + let DecoderMethod = "DecodeJumpTarget"; + } class JumpLinkReg<bits<6> op, bits<6> func, string instr_asm, RegisterClass RC>: @@ -685,7 +731,9 @@ class Atomic2Ops<PatFrag Op, string Opstr, RegisterClass DRC, multiclass Atomic2Ops32<PatFrag Op, string Opstr> { def #NAME# : Atomic2Ops<Op, Opstr, CPURegs, CPURegs>, Requires<[NotN64]>; - def _P8 : Atomic2Ops<Op, Opstr, CPURegs, CPU64Regs>, Requires<[IsN64]>; + def _P8 : Atomic2Ops<Op, Opstr, CPURegs, CPU64Regs>, Requires<[IsN64]> { + let DecoderNamespace = "Mips64"; + } } // Atomic Compare & Swap. @@ -697,7 +745,9 @@ class AtomicCmpSwap<PatFrag Op, string Width, RegisterClass DRC, multiclass AtomicCmpSwap32<PatFrag Op, string Width> { def #NAME# : AtomicCmpSwap<Op, Width, CPURegs, CPURegs>, Requires<[NotN64]>; - def _P8 : AtomicCmpSwap<Op, Width, CPURegs, CPU64Regs>, Requires<[IsN64]>; + def _P8 : AtomicCmpSwap<Op, Width, CPURegs, CPU64Regs>, Requires<[IsN64]> { + let DecoderNamespace = "Mips64"; + } } class LLBase<bits<6> Opc, string opstring, RegisterClass RC, Operand Mem> : @@ -868,9 +918,14 @@ def SYNC : MipsInst<(outs), (ins i32imm:$stype), "sync $stype", /// Load-linked, Store-conditional def LL : LLBase<0x30, "ll", CPURegs, mem>, Requires<[NotN64]>; -def LL_P8 : LLBase<0x30, "ll", CPURegs, mem64>, Requires<[IsN64]>; +def LL_P8 : LLBase<0x30, "ll", CPURegs, mem64>, Requires<[IsN64]> { + let DecoderNamespace = "Mips64"; +} + def SC : SCBase<0x38, "sc", CPURegs, mem>, Requires<[NotN64]>; -def SC_P8 : SCBase<0x38, "sc", CPURegs, mem64>, Requires<[IsN64]>; +def SC_P8 : SCBase<0x38, "sc", CPURegs, mem64>, Requires<[IsN64]> { + let DecoderNamespace = "Mips64"; +} /// Jump and Branch Instructions def J : JumpFJ<0x02, "j">; @@ -888,7 +943,7 @@ def JALR : JumpLinkReg<0x00, 0x09, "jalr", CPURegs>; def BGEZAL : BranchLink<"bgezal", 0x11, CPURegs>; def BLTZAL : BranchLink<"bltzal", 0x10, CPURegs>; -let isReturn=1, isTerminator=1, hasDelaySlot=1, +let isReturn=1, isTerminator=1, hasDelaySlot=1, isCodeGenOnly=1, isBarrier=1, hasCtrlDep=1, rd=0, rt=0, shamt=0 in def RET : FR <0x00, 0x08, (outs), (ins CPURegs:$target), "jr\t$target", [(MipsRet CPURegs:$target)], IIBranch>; @@ -923,13 +978,17 @@ let addr=0 in // instructions. The same not happens for stack address copies, so an // add op with mem ComplexPattern is used and the stack address copy // can be matched. It's similar to Sparc LEA_ADDRi -def LEA_ADDiu : EffectiveAddress<"addiu\t$rt, $addr", CPURegs, mem_ea>; +def LEA_ADDiu : EffectiveAddress<"addiu\t$rt, $addr", CPURegs, mem_ea> { + let isCodeGenOnly = 1; +} // DynAlloc node points to dynamically allocated stack space. // $sp is added to the list of implicitly used registers to prevent dead code // elimination from removing instructions that modify $sp. let Uses = [SP] in -def DynAlloc : EffectiveAddress<"addiu\t$rt, $addr", CPURegs, mem_ea>; +def DynAlloc : EffectiveAddress<"addiu\t$rt, $addr", CPURegs, mem_ea> { + let isCodeGenOnly = 1; +} // MADD*/MSUB* def MADD : MArithR<0, "madd", MipsMAdd, 1>; diff --git a/lib/Target/Mips/MipsTargetMachine.cpp b/lib/Target/Mips/MipsTargetMachine.cpp index ad022311ed7d..858723bad9d7 100644 --- a/lib/Target/Mips/MipsTargetMachine.cpp +++ b/lib/Target/Mips/MipsTargetMachine.cpp @@ -117,18 +117,16 @@ TargetPassConfig *MipsTargetMachine::createPassConfig(PassManagerBase &PM) { // Install an instruction selector pass using // the ISelDag to gen Mips code. -bool MipsPassConfig::addInstSelector() -{ - PM.add(createMipsISelDag(getMipsTargetMachine())); +bool MipsPassConfig::addInstSelector() { + PM->add(createMipsISelDag(getMipsTargetMachine())); return false; } // Implemented by targets that want to run passes immediately before // machine code is emitted. return true if -print-machineinstrs should // print out the code after the passes. -bool MipsPassConfig::addPreEmitPass() -{ - PM.add(createMipsDelaySlotFillerPass(getMipsTargetMachine())); +bool MipsPassConfig::addPreEmitPass() { + PM->add(createMipsDelaySlotFillerPass(getMipsTargetMachine())); return true; } @@ -136,12 +134,12 @@ bool MipsPassConfig::addPreRegAlloc() { // Do not restore $gp if target is Mips64. // In N32/64, $gp is a callee-saved register. if (!getMipsSubtarget().hasMips64()) - PM.add(createMipsEmitGPRestorePass(getMipsTargetMachine())); + PM->add(createMipsEmitGPRestorePass(getMipsTargetMachine())); return true; } bool MipsPassConfig::addPreSched2() { - PM.add(createMipsExpandPseudoPass(getMipsTargetMachine())); + PM->add(createMipsExpandPseudoPass(getMipsTargetMachine())); return true; } diff --git a/lib/Target/PTX/PTXTargetMachine.cpp b/lib/Target/PTX/PTXTargetMachine.cpp index c55a658dc375..97b8de1a0b44 100644 --- a/lib/Target/PTX/PTXTargetMachine.cpp +++ b/lib/Target/PTX/PTXTargetMachine.cpp @@ -130,7 +130,7 @@ TargetPassConfig *PTXTargetMachine::createPassConfig(PassManagerBase &PM) { } bool PTXPassConfig::addInstSelector() { - PM.add(createPTXISelDag(getPTXTargetMachine(), getOptLevel())); + PM->add(createPTXISelDag(getPTXTargetMachine(), getOptLevel())); return false; } @@ -145,7 +145,7 @@ void PTXPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) { bool PTXPassConfig::addPostRegAlloc() { // PTXMFInfoExtract must after register allocation! - //PM.add(createPTXMFInfoExtract(getPTXTargetMachine())); + //PM->add(createPTXMFInfoExtract(getPTXTargetMachine())); return false; } @@ -159,7 +159,7 @@ void PTXPassConfig::addMachineLateOptimization() { } bool PTXPassConfig::addPreEmitPass() { - PM.add(createPTXMFInfoExtract(getPTXTargetMachine(), getOptLevel())); - PM.add(createPTXFPRoundingModePass(getPTXTargetMachine(), getOptLevel())); + PM->add(createPTXMFInfoExtract(getPTXTargetMachine(), getOptLevel())); + PM->add(createPTXFPRoundingModePass(getPTXTargetMachine(), getOptLevel())); return true; } diff --git a/lib/Target/PowerPC/PPCCallingConv.td b/lib/Target/PowerPC/PPCCallingConv.td index 9883c2e42995..b2b53648561f 100644 --- a/lib/Target/PowerPC/PPCCallingConv.td +++ b/lib/Target/PowerPC/PPCCallingConv.td @@ -12,10 +12,6 @@ // //===----------------------------------------------------------------------===// -/// CCIfSubtarget - Match if the current subtarget has a feature F. -class CCIfSubtarget<string F, CCAction A> - : CCIf<!strconcat("State.getTarget().getSubtarget<PPCSubtarget>().", F), A>; - //===----------------------------------------------------------------------===// // Return Value Calling Convention //===----------------------------------------------------------------------===// diff --git a/lib/Target/PowerPC/PPCTargetMachine.cpp b/lib/Target/PowerPC/PPCTargetMachine.cpp index d11397669912..50f3db8b27fd 100644 --- a/lib/Target/PowerPC/PPCTargetMachine.cpp +++ b/lib/Target/PowerPC/PPCTargetMachine.cpp @@ -98,13 +98,13 @@ TargetPassConfig *PPCTargetMachine::createPassConfig(PassManagerBase &PM) { bool PPCPassConfig::addInstSelector() { // Install an instruction selector. - PM.add(createPPCISelDag(getPPCTargetMachine())); + PM->add(createPPCISelDag(getPPCTargetMachine())); return false; } bool PPCPassConfig::addPreEmitPass() { // Must run branch selection immediately preceding the asm printer. - PM.add(createPPCBranchSelectionPass()); + PM->add(createPPCBranchSelectionPass()); return false; } diff --git a/lib/Target/Sparc/SparcTargetMachine.cpp b/lib/Target/Sparc/SparcTargetMachine.cpp index 6f313562c101..cc253077a980 100644 --- a/lib/Target/Sparc/SparcTargetMachine.cpp +++ b/lib/Target/Sparc/SparcTargetMachine.cpp @@ -59,7 +59,7 @@ TargetPassConfig *SparcTargetMachine::createPassConfig(PassManagerBase &PM) { } bool SparcPassConfig::addInstSelector() { - PM.add(createSparcISelDag(getSparcTargetMachine())); + PM->add(createSparcISelDag(getSparcTargetMachine())); return false; } @@ -67,8 +67,8 @@ bool SparcPassConfig::addInstSelector() { /// passes immediately before machine code is emitted. This should return /// true if -print-machineinstrs should print out the code after the passes. bool SparcPassConfig::addPreEmitPass(){ - PM.add(createSparcFPMoverPass(getSparcTargetMachine())); - PM.add(createSparcDelaySlotFillerPass(getSparcTargetMachine())); + PM->add(createSparcFPMoverPass(getSparcTargetMachine())); + PM->add(createSparcDelaySlotFillerPass(getSparcTargetMachine())); return true; } diff --git a/lib/Target/TargetLibraryInfo.cpp b/lib/Target/TargetLibraryInfo.cpp index 269958fd7f17..ec95ad4deeab 100644 --- a/lib/Target/TargetLibraryInfo.cpp +++ b/lib/Target/TargetLibraryInfo.cpp @@ -56,7 +56,7 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] = "exp2f", "expm1", "expm1l", - "expl1f", + "expm1f", "fabs", "fabsl", "fabsf", @@ -95,6 +95,9 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] = "rint", "rintf", "rintl", + "round", + "roundf", + "roundl", "sin", "sinl", "sinf", @@ -155,6 +158,81 @@ static void initialize(TargetLibraryInfo &TLI, const Triple &T) { TLI.setUnavailable(LibFunc::siprintf); TLI.setUnavailable(LibFunc::fiprintf); } + + if (T.getOS() == Triple::Win32) { + // Win32 does not support long double + TLI.setUnavailable(LibFunc::acosl); + TLI.setUnavailable(LibFunc::asinl); + TLI.setUnavailable(LibFunc::atanl); + TLI.setUnavailable(LibFunc::atan2l); + TLI.setUnavailable(LibFunc::ceill); + TLI.setUnavailable(LibFunc::copysignl); + TLI.setUnavailable(LibFunc::cosl); + TLI.setUnavailable(LibFunc::coshl); + TLI.setUnavailable(LibFunc::expl); + TLI.setUnavailable(LibFunc::fabsf); // Win32 and Win64 both lack fabsf + TLI.setUnavailable(LibFunc::fabsl); + TLI.setUnavailable(LibFunc::floorl); + TLI.setUnavailable(LibFunc::fmodl); + TLI.setUnavailable(LibFunc::logl); + TLI.setUnavailable(LibFunc::powl); + TLI.setUnavailable(LibFunc::sinl); + TLI.setUnavailable(LibFunc::sinhl); + TLI.setUnavailable(LibFunc::sqrtl); + TLI.setUnavailable(LibFunc::tanl); + TLI.setUnavailable(LibFunc::tanhl); + + // Win32 only has C89 math + TLI.setUnavailable(LibFunc::exp2); + TLI.setUnavailable(LibFunc::exp2f); + TLI.setUnavailable(LibFunc::exp2l); + TLI.setUnavailable(LibFunc::expm1); + TLI.setUnavailable(LibFunc::expm1f); + TLI.setUnavailable(LibFunc::expm1l); + TLI.setUnavailable(LibFunc::log2); + TLI.setUnavailable(LibFunc::log2f); + TLI.setUnavailable(LibFunc::log2l); + TLI.setUnavailable(LibFunc::log1p); + TLI.setUnavailable(LibFunc::log1pf); + TLI.setUnavailable(LibFunc::log1pl); + TLI.setUnavailable(LibFunc::nearbyint); + TLI.setUnavailable(LibFunc::nearbyintf); + TLI.setUnavailable(LibFunc::nearbyintl); + TLI.setUnavailable(LibFunc::rint); + TLI.setUnavailable(LibFunc::rintf); + TLI.setUnavailable(LibFunc::rintl); + TLI.setUnavailable(LibFunc::round); + TLI.setUnavailable(LibFunc::roundf); + TLI.setUnavailable(LibFunc::roundl); + TLI.setUnavailable(LibFunc::trunc); + TLI.setUnavailable(LibFunc::truncf); + TLI.setUnavailable(LibFunc::truncl); + + // Win32 provides some C99 math with mangled names + TLI.setAvailableWithName(LibFunc::copysign, "_copysign"); + + if (T.getArch() == Triple::x86) { + // Win32 on x86 implements single-precision math functions as macros + TLI.setUnavailable(LibFunc::acosf); + TLI.setUnavailable(LibFunc::asinf); + TLI.setUnavailable(LibFunc::atanf); + TLI.setUnavailable(LibFunc::atan2f); + TLI.setUnavailable(LibFunc::ceilf); + TLI.setUnavailable(LibFunc::copysignf); + TLI.setUnavailable(LibFunc::cosf); + TLI.setUnavailable(LibFunc::coshf); + TLI.setUnavailable(LibFunc::expf); + TLI.setUnavailable(LibFunc::floorf); + TLI.setUnavailable(LibFunc::fmodf); + TLI.setUnavailable(LibFunc::logf); + TLI.setUnavailable(LibFunc::powf); + TLI.setUnavailable(LibFunc::sinf); + TLI.setUnavailable(LibFunc::sinhf); + TLI.setUnavailable(LibFunc::sqrtf); + TLI.setUnavailable(LibFunc::tanf); + TLI.setUnavailable(LibFunc::tanhf); + } + } } diff --git a/lib/Target/X86/Utils/X86ShuffleDecode.cpp b/lib/Target/X86/Utils/X86ShuffleDecode.cpp index 32c722acc437..a802333002d8 100644 --- a/lib/Target/X86/Utils/X86ShuffleDecode.cpp +++ b/lib/Target/X86/Utils/X86ShuffleDecode.cpp @@ -169,6 +169,9 @@ void DecodeUNPCKLMask(EVT VT, SmallVectorImpl<int> &ShuffleMask) { void DecodeVPERM2X128Mask(EVT VT, unsigned Imm, SmallVectorImpl<int> &ShuffleMask) { + if (Imm & 0x88) + return; // Not a shuffle + unsigned HalfSize = VT.getVectorNumElements()/2; unsigned FstHalfBegin = (Imm & 0x3) * HalfSize; unsigned SndHalfBegin = ((Imm >> 4) & 0x3) * HalfSize; diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 9b83aade3381..04299f300801 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -2935,6 +2935,7 @@ static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, case X86ISD::PSHUFHW: case X86ISD::PSHUFLW: case X86ISD::VPERMILP: + case X86ISD::VPERMI: return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8)); } } @@ -3976,6 +3977,27 @@ unsigned X86::getInsertVINSERTF128Immediate(SDNode *N) { return Index / NumElemsPerChunk; } +/// getShuffleCLImmediate - Return the appropriate immediate to shuffle +/// the specified VECTOR_SHUFFLE mask with VPERMQ and VPERMPD instructions. +/// Handles 256-bit. +static unsigned getShuffleCLImmediate(ShuffleVectorSDNode *N) { + EVT VT = N->getValueType(0); + + unsigned NumElts = VT.getVectorNumElements(); + + assert((VT.is256BitVector() && NumElts == 4) && + "Unsupported vector type for VPERMQ/VPERMPD"); + + unsigned Mask = 0; + for (unsigned i = 0; i != NumElts; ++i) { + int Elt = N->getMaskElt(i); + if (Elt < 0) + continue; + Mask |= Elt << (i*2); + } + + return Mask; +} /// isZeroNode - Returns true if Elt is a constant zero or a floating point /// constant +0.0. bool X86::isZeroNode(SDValue Elt) { @@ -4408,6 +4430,7 @@ static bool getTargetShuffleMask(SDNode *N, EVT VT, case X86ISD::VPERM2X128: ImmN = N->getOperand(N->getNumOperands()-1); DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); + if (Mask.empty()) return false; break; case X86ISD::MOVDDUP: case X86ISD::MOVLHPD: @@ -6628,6 +6651,23 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { if (BlendOp.getNode()) return BlendOp; + if (V2IsUndef && HasAVX2 && (VT == MVT::v8i32 || VT == MVT::v8f32)) { + SmallVector<SDValue, 8> permclMask; + for (unsigned i = 0; i != 8; ++i) { + permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MVT::i32)); + } + SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, + &permclMask[0], 8); + // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32 + return DAG.getNode(X86ISD::VPERMV, dl, VT, + DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1); + } + + if (V2IsUndef && HasAVX2 && (VT == MVT::v4i64 || VT == MVT::v4f64)) + return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, + getShuffleCLImmediate(SVOp), DAG); + + //===--------------------------------------------------------------------===// // Since no target specific shuffle was selected for this generic one, // lower it into other known shuffles. FIXME: this isn't true yet, but @@ -9552,12 +9592,12 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const case Intrinsic::x86_avx2_vperm2i128: return DAG.getNode(X86ISD::VPERM2X128, dl, Op.getValueType(), Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); - case Intrinsic::x86_avx_vpermil_ps: - case Intrinsic::x86_avx_vpermil_pd: - case Intrinsic::x86_avx_vpermil_ps_256: - case Intrinsic::x86_avx_vpermil_pd_256: - return DAG.getNode(X86ISD::VPERMILP, dl, Op.getValueType(), - Op.getOperand(1), Op.getOperand(2)); + case Intrinsic::x86_avx2_permd: + case Intrinsic::x86_avx2_permps: + // Operands intentionally swapped. Mask is last operand to intrinsic, + // but second operand for node/intruction. + return DAG.getNode(X86ISD::VPERMV, dl, Op.getValueType(), + Op.getOperand(2), Op.getOperand(1)); // ptest and testp intrinsics. The intrinsic these come from are designed to // return an integer value, not just an instruction so lower it to the ptest @@ -11141,6 +11181,8 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST"; case X86ISD::VPERMILP: return "X86ISD::VPERMILP"; case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128"; + case X86ISD::VPERMV: return "X86ISD::VPERMV"; + case X86ISD::VPERMI: return "X86ISD::VPERMI"; case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ"; case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS"; case X86ISD::VAARG_64: return "X86ISD::VAARG_64"; @@ -11298,14 +11340,15 @@ X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr, unsigned notOpc, unsigned EAXreg, const TargetRegisterClass *RC, - bool invSrc) const { + bool Invert) const { // For the atomic bitwise operator, we generate // thisMBB: // newMBB: // ld t1 = [bitinstr.addr] // op t2 = t1, [bitinstr.val] + // not t3 = t2 (if Invert) // mov EAX = t1 - // lcs dest = [bitinstr.addr], t2 [EAX is implicit] + // lcs dest = [bitinstr.addr], t3 [EAX is implicit] // bz newMBB // fallthrough -->nextMBB const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); @@ -11353,13 +11396,6 @@ X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr, for (int i=0; i <= lastAddrIndx; ++i) (*MIB).addOperand(*argOpers[i]); - unsigned tt = F->getRegInfo().createVirtualRegister(RC); - if (invSrc) { - MIB = BuildMI(newMBB, dl, TII->get(notOpc), tt).addReg(t1); - } - else - tt = t1; - unsigned t2 = F->getRegInfo().createVirtualRegister(RC); assert((argOpers[valArgIndx]->isReg() || argOpers[valArgIndx]->isImm()) && @@ -11368,16 +11404,23 @@ X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr, MIB = BuildMI(newMBB, dl, TII->get(regOpc), t2); else MIB = BuildMI(newMBB, dl, TII->get(immOpc), t2); - MIB.addReg(tt); + MIB.addReg(t1); (*MIB).addOperand(*argOpers[valArgIndx]); + unsigned t3 = F->getRegInfo().createVirtualRegister(RC); + if (Invert) { + MIB = BuildMI(newMBB, dl, TII->get(notOpc), t3).addReg(t2); + } + else + t3 = t2; + MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), EAXreg); MIB.addReg(t1); MIB = BuildMI(newMBB, dl, TII->get(CXchgOpc)); for (int i=0; i <= lastAddrIndx; ++i) (*MIB).addOperand(*argOpers[i]); - MIB.addReg(t2); + MIB.addReg(t3); assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand"); (*MIB).setMemRefs(bInstr->memoperands_begin(), bInstr->memoperands_end()); @@ -11400,7 +11443,7 @@ X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr, unsigned regOpcH, unsigned immOpcL, unsigned immOpcH, - bool invSrc) const { + bool Invert) const { // For the atomic bitwise operator, we generate // thisMBB (instructions are in pairs, except cmpxchg8b) // ld t1,t2 = [bitinstr.addr] @@ -11408,6 +11451,7 @@ X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr, // out1, out2 = phi (thisMBB, t1/t2) (newMBB, t3/t4) // op t5, t6 <- out1, out2, [bitinstr.val] // (for SWAP, substitute: mov t5, t6 <- [bitinstr.val]) + // neg t7, t8 < t5, t6 (if Invert) // mov ECX, EBX <- t5, t6 // mov EAX, EDX <- t1, t2 // cmpxchg8b [bitinstr.addr] [EAX, EDX, EBX, ECX implicit] @@ -11491,16 +11535,9 @@ X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr, .addReg(t2).addMBB(thisMBB).addReg(t4).addMBB(newMBB); // The subsequent operations should be using the destination registers of - //the PHI instructions. - if (invSrc) { - t1 = F->getRegInfo().createVirtualRegister(RC); - t2 = F->getRegInfo().createVirtualRegister(RC); - MIB = BuildMI(newMBB, dl, TII->get(NotOpc), t1).addReg(dest1Oper.getReg()); - MIB = BuildMI(newMBB, dl, TII->get(NotOpc), t2).addReg(dest2Oper.getReg()); - } else { - t1 = dest1Oper.getReg(); - t2 = dest2Oper.getReg(); - } + // the PHI instructions. + t1 = dest1Oper.getReg(); + t2 = dest2Oper.getReg(); int valArgIndx = lastAddrIndx + 1; assert((argOpers[valArgIndx]->isReg() || @@ -11527,15 +11564,26 @@ X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr, MIB.addReg(t2); (*MIB).addOperand(*argOpers[valArgIndx + 1]); + unsigned t7, t8; + if (Invert) { + t7 = F->getRegInfo().createVirtualRegister(RC); + t8 = F->getRegInfo().createVirtualRegister(RC); + MIB = BuildMI(newMBB, dl, TII->get(NotOpc), t7).addReg(t5); + MIB = BuildMI(newMBB, dl, TII->get(NotOpc), t8).addReg(t6); + } else { + t7 = t5; + t8 = t6; + } + MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EAX); MIB.addReg(t1); MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EDX); MIB.addReg(t2); MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EBX); - MIB.addReg(t5); + MIB.addReg(t7); MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::ECX); - MIB.addReg(t6); + MIB.addReg(t8); MIB = BuildMI(newMBB, dl, TII->get(X86::LCMPXCHG8B)); for (int i=0; i <= lastAddrIndx; ++i) diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index 4e0073365a73..09116e88af57 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -285,6 +285,8 @@ namespace llvm { UNPCKL, UNPCKH, VPERMILP, + VPERMV, + VPERMI, VPERM2X128, VBROADCAST, @@ -855,7 +857,7 @@ namespace llvm { unsigned notOpc, unsigned EAXreg, const TargetRegisterClass *RC, - bool invSrc = false) const; + bool Invert = false) const; MachineBasicBlock *EmitAtomicBit6432WithCustomInserter( MachineInstr *BInstr, @@ -864,7 +866,7 @@ namespace llvm { unsigned regOpcH, unsigned immOpcL, unsigned immOpcH, - bool invSrc = false) const; + bool Invert = false) const; /// Utility function to emit atomic min and max. It takes the min/max /// instruction to expand, the associated basic block, and the associated diff --git a/lib/Target/X86/X86InstrFragmentsSIMD.td b/lib/Target/X86/X86InstrFragmentsSIMD.td index 041a64f336f8..35801e43229b 100644 --- a/lib/Target/X86/X86InstrFragmentsSIMD.td +++ b/lib/Target/X86/X86InstrFragmentsSIMD.td @@ -155,6 +155,8 @@ def X86Unpckl : SDNode<"X86ISD::UNPCKL", SDTShuff2Op>; def X86Unpckh : SDNode<"X86ISD::UNPCKH", SDTShuff2Op>; def X86VPermilp : SDNode<"X86ISD::VPERMILP", SDTShuff2OpI>; +def X86VPermv : SDNode<"X86ISD::VPERMV", SDTShuff2Op>; +def X86VPermi : SDNode<"X86ISD::VPERMI", SDTShuff2OpI>; def X86VPerm2x128 : SDNode<"X86ISD::VPERM2X128", SDTShuff3OpI>; diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 307c96b8c43f..b12c1db0fe1d 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -1049,9 +1049,9 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) { X86::VPCMPGTWYrr, X86::VPCMPGTWYrm, TB_ALIGN_32 }, { X86::VPERM2I128rr, X86::VPERM2I128rm, TB_ALIGN_32 }, { X86::VPERMDYrr, X86::VPERMDYrm, TB_ALIGN_32 }, - { X86::VPERMPDYrr, X86::VPERMPDYrm, TB_ALIGN_32 }, + { X86::VPERMPDYri, X86::VPERMPDYmi, TB_ALIGN_32 }, { X86::VPERMPSYrr, X86::VPERMPSYrm, TB_ALIGN_32 }, - { X86::VPERMQYrr, X86::VPERMQYrm, TB_ALIGN_32 }, + { X86::VPERMQYri, X86::VPERMQYmi, TB_ALIGN_32 }, { X86::VPHADDDYrr, X86::VPHADDDYrm, TB_ALIGN_32 }, { X86::VPHADDSWrr256, X86::VPHADDSWrm256, TB_ALIGN_32 }, { X86::VPHADDWYrr, X86::VPHADDWYrm, TB_ALIGN_32 }, diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index 408ab16778d1..65e3c1e19fa9 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -6742,6 +6742,16 @@ let Predicates = [HasAVX] in { def : Pat<(v4f64 (X86Blendpd (v4f64 VR256:$src1), (v4f64 VR256:$src2), (imm:$mask))), (VBLENDPDYrri VR256:$src2, VR256:$src1, imm:$mask)>; + + def : Pat<(v8i16 (X86Blendpw (v8i16 VR128:$src1), (v8i16 VR128:$src2), + (imm:$mask))), + (VPBLENDWrri VR128:$src2, VR128:$src1, imm:$mask)>; + def : Pat<(v4f32 (X86Blendps (v4f32 VR128:$src1), (v4f32 VR128:$src2), + (imm:$mask))), + (VBLENDPSrri VR128:$src2, VR128:$src1, imm:$mask)>; + def : Pat<(v2f64 (X86Blendpd (v2f64 VR128:$src1), (v2f64 VR128:$src2), + (imm:$mask))), + (VBLENDPDrri VR128:$src2, VR128:$src1, imm:$mask)>; } let Predicates = [HasAVX2] in { @@ -6802,13 +6812,13 @@ let Predicates = [HasSSE41] in { def : Pat<(v8i16 (X86Blendpw (v8i16 VR128:$src1), (v8i16 VR128:$src2), (imm:$mask))), - (VPBLENDWrri VR128:$src2, VR128:$src1, imm:$mask)>; + (PBLENDWrri VR128:$src2, VR128:$src1, imm:$mask)>; def : Pat<(v4f32 (X86Blendps (v4f32 VR128:$src1), (v4f32 VR128:$src2), (imm:$mask))), - (VBLENDPSrri VR128:$src2, VR128:$src1, imm:$mask)>; + (BLENDPSrri VR128:$src2, VR128:$src1, imm:$mask)>; def : Pat<(v2f64 (X86Blendpd (v2f64 VR128:$src1), (v2f64 VR128:$src2), (imm:$mask))), - (VBLENDPDrri VR128:$src2, VR128:$src1, imm:$mask)>; + (BLENDPDrri VR128:$src2, VR128:$src1, imm:$mask)>; } @@ -7725,45 +7735,47 @@ def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))), // multiclass avx2_perm<bits<8> opc, string OpcodeStr, PatFrag mem_frag, - Intrinsic Int> { + ValueType OpVT> { def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src1, VR256:$src2), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), - [(set VR256:$dst, (Int VR256:$src1, VR256:$src2))]>, VEX_4V; + [(set VR256:$dst, + (OpVT (X86VPermv VR256:$src1, VR256:$src2)))]>, VEX_4V; def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst), (ins VR256:$src1, i256mem:$src2), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), - [(set VR256:$dst, (Int VR256:$src1, - (bitconvert (mem_frag addr:$src2))))]>, + [(set VR256:$dst, + (OpVT (X86VPermv VR256:$src1, + (bitconvert (mem_frag addr:$src2)))))]>, VEX_4V; } -defm VPERMD : avx2_perm<0x36, "vpermd", memopv4i64, int_x86_avx2_permd>; +defm VPERMD : avx2_perm<0x36, "vpermd", memopv4i64, v8i32>; let ExeDomain = SSEPackedSingle in -defm VPERMPS : avx2_perm<0x16, "vpermps", memopv8f32, int_x86_avx2_permps>; +defm VPERMPS : avx2_perm<0x16, "vpermps", memopv8f32, v8f32>; multiclass avx2_perm_imm<bits<8> opc, string OpcodeStr, PatFrag mem_frag, - Intrinsic Int> { - def Yrr : AVX2AIi8<opc, MRMSrcReg, (outs VR256:$dst), + ValueType OpVT> { + def Yri : AVX2AIi8<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src1, i8imm:$src2), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), - [(set VR256:$dst, (Int VR256:$src1, imm:$src2))]>, VEX; - def Yrm : AVX2AIi8<opc, MRMSrcMem, (outs VR256:$dst), + [(set VR256:$dst, + (OpVT (X86VPermi VR256:$src1, (i8 imm:$src2))))]>, VEX; + def Ymi : AVX2AIi8<opc, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src1, i8imm:$src2), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), - [(set VR256:$dst, (Int (mem_frag addr:$src1), imm:$src2))]>, - VEX; + [(set VR256:$dst, + (OpVT (X86VPermi (mem_frag addr:$src1), + (i8 imm:$src2))))]>, VEX; } -defm VPERMQ : avx2_perm_imm<0x00, "vpermq", memopv4i64, int_x86_avx2_permq>, - VEX_W; +defm VPERMQ : avx2_perm_imm<0x00, "vpermq", memopv4i64, v4i64>, VEX_W; let ExeDomain = SSEPackedDouble in -defm VPERMPD : avx2_perm_imm<0x01, "vpermpd", memopv4f64, int_x86_avx2_permpd>, - VEX_W; +defm VPERMPD : avx2_perm_imm<0x01, "vpermpd", memopv4f64, v4f64>, VEX_W; //===----------------------------------------------------------------------===// // VPERM2I128 - Permute Floating-Point Values in 128-bit chunks diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp index 452dd7eba326..ed1a40965aa9 100644 --- a/lib/Target/X86/X86Subtarget.cpp +++ b/lib/Target/X86/X86Subtarget.cpp @@ -424,7 +424,9 @@ bool X86Subtarget::enablePostRAScheduler( CodeGenOpt::Level OptLevel, TargetSubtargetInfo::AntiDepBreakMode& Mode, RegClassVector& CriticalPathRCs) const { - Mode = TargetSubtargetInfo::ANTIDEP_CRITICAL; + //TODO: change back to ANTIDEP_CRITICAL when the + // X86 subtarget properly sets up post RA liveness. + Mode = TargetSubtargetInfo::ANTIDEP_NONE; CriticalPathRCs.clear(); return PostRAScheduler && OptLevel >= CodeGenOpt::Default; } diff --git a/lib/Target/X86/X86TargetMachine.cpp b/lib/Target/X86/X86TargetMachine.cpp index f4b7a6277ade..89c388415bdc 100644 --- a/lib/Target/X86/X86TargetMachine.cpp +++ b/lib/Target/X86/X86TargetMachine.cpp @@ -145,34 +145,34 @@ TargetPassConfig *X86TargetMachine::createPassConfig(PassManagerBase &PM) { bool X86PassConfig::addInstSelector() { // Install an instruction selector. - PM.add(createX86ISelDag(getX86TargetMachine(), getOptLevel())); + PM->add(createX86ISelDag(getX86TargetMachine(), getOptLevel())); // For 32-bit, prepend instructions to set the "global base reg" for PIC. if (!getX86Subtarget().is64Bit()) - PM.add(createGlobalBaseRegPass()); + PM->add(createGlobalBaseRegPass()); return false; } bool X86PassConfig::addPreRegAlloc() { - PM.add(createX86MaxStackAlignmentHeuristicPass()); + PM->add(createX86MaxStackAlignmentHeuristicPass()); return false; // -print-machineinstr shouldn't print after this. } bool X86PassConfig::addPostRegAlloc() { - PM.add(createX86FloatingPointStackifierPass()); + PM->add(createX86FloatingPointStackifierPass()); return true; // -print-machineinstr should print after this. } bool X86PassConfig::addPreEmitPass() { bool ShouldPrint = false; if (getOptLevel() != CodeGenOpt::None && getX86Subtarget().hasSSE2()) { - PM.add(createExecutionDependencyFixPass(&X86::VR128RegClass)); + PM->add(createExecutionDependencyFixPass(&X86::VR128RegClass)); ShouldPrint = true; } if (getX86Subtarget().hasAVX() && UseVZeroUpper) { - PM.add(createX86IssueVZeroUpperPass()); + PM->add(createX86IssueVZeroUpperPass()); ShouldPrint = true; } diff --git a/lib/Target/XCore/XCoreTargetMachine.cpp b/lib/Target/XCore/XCoreTargetMachine.cpp index f65297e54a79..5afd5a1affcc 100644 --- a/lib/Target/XCore/XCoreTargetMachine.cpp +++ b/lib/Target/XCore/XCoreTargetMachine.cpp @@ -55,7 +55,7 @@ TargetPassConfig *XCoreTargetMachine::createPassConfig(PassManagerBase &PM) { } bool XCorePassConfig::addInstSelector() { - PM.add(createXCoreISelDag(getXCoreTargetMachine(), getOptLevel())); + PM->add(createXCoreISelDag(getXCoreTargetMachine(), getOptLevel())); return false; } diff --git a/lib/Transforms/IPO/Internalize.cpp b/lib/Transforms/IPO/Internalize.cpp index cd29e7a7a7da..fb5869ede2bb 100644 --- a/lib/Transforms/IPO/Internalize.cpp +++ b/lib/Transforms/IPO/Internalize.cpp @@ -123,6 +123,8 @@ bool InternalizePass::runOnModule(Module &M) { bool Changed = false; // Never internalize functions which code-gen might insert. + // FIXME: We should probably add this (and the __stack_chk_guard) via some + // type of call-back in CodeGen. ExternalNames.insert("__stack_chk_fail"); // Mark all functions not in the api as internal. diff --git a/lib/Transforms/IPO/PassManagerBuilder.cpp b/lib/Transforms/IPO/PassManagerBuilder.cpp index a1b0a4580bf5..43b4ab5efa4d 100644 --- a/lib/Transforms/IPO/PassManagerBuilder.cpp +++ b/lib/Transforms/IPO/PassManagerBuilder.cpp @@ -35,6 +35,11 @@ using namespace llvm; static cl::opt<bool> RunVectorization("vectorize", cl::desc("Run vectorization passes")); +static cl::opt<bool> +UseGVNAfterVectorization("use-gvn-after-vectorization", + cl::init(false), cl::Hidden, + cl::desc("Run GVN instead of Early CSE after vectorization passes")); + PassManagerBuilder::PassManagerBuilder() { OptLevel = 2; SizeLevel = 0; @@ -182,8 +187,10 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) { if (Vectorize) { MPM.add(createBBVectorizePass()); MPM.add(createInstructionCombiningPass()); - if (OptLevel > 1) - MPM.add(createGVNPass()); // Remove redundancies + if (OptLevel > 1 && UseGVNAfterVectorization) + MPM.add(createGVNPass()); // Remove redundancies + else + MPM.add(createEarlyCSEPass()); // Catch trivial redundancies } MPM.add(createAggressiveDCEPass()); // Delete dead instructions diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp index d57ec22f44ab..b085b006e4dc 100644 --- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -77,6 +77,12 @@ #include <algorithm> using namespace llvm; +/// MaxIVUsers is an arbitrary threshold that provides an early opportunitiy for +/// bail out. This threshold is far beyond the number of users that LSR can +/// conceivably solve, so it should not affect generated code, but catches the +/// worst cases before LSR burns too much compile time and stack space. +static const unsigned MaxIVUsers = 200; + // Temporary flag to cleanup congruent phis after LSR phi expansion. // It's currently disabled until we can determine whether it's truly useful or // not. The flag should be removed after the v3.0 release. @@ -4102,7 +4108,7 @@ LSRInstance::HoistInsertPosition(BasicBlock::iterator IP, // Attempt to find an insert position in the middle of the block, // instead of at the end, so that it can be used for other expansions. if (IDom == Inst->getParent() && - (!BetterPos || DT.dominates(BetterPos, Inst))) + (!BetterPos || !DT.dominates(Inst, BetterPos))) BetterPos = llvm::next(BasicBlock::iterator(Inst)); } if (!AllDominate) @@ -4519,6 +4525,17 @@ LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P) // If there's no interesting work to be done, bail early. if (IU.empty()) return; + // If there's too much analysis to be done, bail early. We won't be able to + // model the problem anyway. + unsigned NumUsers = 0; + for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { + if (++NumUsers > MaxIVUsers) { + DEBUG(dbgs() << "LSR skipping loop, too many IV Users in " << *L + << "\n"); + return; + } + } + #ifndef NDEBUG // All dominating loops must have preheaders, or SCEVExpander may not be able // to materialize an AddRecExpr whose Start is an outer AddRecExpr. diff --git a/lib/Transforms/Scalar/LoopUnswitch.cpp b/lib/Transforms/Scalar/LoopUnswitch.cpp index ee232687ffde..930980f528ae 100644 --- a/lib/Transforms/Scalar/LoopUnswitch.cpp +++ b/lib/Transforms/Scalar/LoopUnswitch.cpp @@ -624,11 +624,10 @@ bool LoopUnswitch::IsTrivialUnswitchCondition(Value *Cond, Constant **Val, /// LoopCond == Val to simplify the loop. If we decide that this is profitable, /// unswitch the loop, reprocess the pieces, then return true. bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val) { - Function *F = loopHeader->getParent(); - Constant *CondVal = 0; BasicBlock *ExitBlock = 0; + if (IsTrivialUnswitchCondition(LoopCond, &CondVal, &ExitBlock)) { // If the condition is trivial, always unswitch. There is no code growth // for this case. @@ -688,8 +687,8 @@ void LoopUnswitch::EmitPreheaderBranchOnCondition(Value *LIC, Constant *Val, // If either edge is critical, split it. This helps preserve LoopSimplify // form for enclosing loops. - SplitCriticalEdge(BI, 0, this); - SplitCriticalEdge(BI, 1, this); + SplitCriticalEdge(BI, 0, this, false, false, true); + SplitCriticalEdge(BI, 1, this, false, false, true); } /// UnswitchTrivialCondition - Given a loop that has a trivial unswitchable diff --git a/lib/Transforms/Scalar/ObjCARC.cpp b/lib/Transforms/Scalar/ObjCARC.cpp index 40b0b2061689..7e3e69b78d30 100644 --- a/lib/Transforms/Scalar/ObjCARC.cpp +++ b/lib/Transforms/Scalar/ObjCARC.cpp @@ -162,6 +162,7 @@ namespace { IC_MoveWeak, ///< objc_moveWeak (derived) IC_CopyWeak, ///< objc_copyWeak (derived) IC_DestroyWeak, ///< objc_destroyWeak (derived) + IC_StoreStrong, ///< objc_storeStrong (derived) IC_CallOrUser, ///< could call objc_release and/or "use" pointers IC_Call, ///< could call objc_release IC_User, ///< could "use" a pointer @@ -262,6 +263,7 @@ static InstructionClass GetFunctionClass(const Function *F) { return StringSwitch<InstructionClass>(F->getName()) .Case("objc_storeWeak", IC_StoreWeak) .Case("objc_initWeak", IC_InitWeak) + .Case("objc_storeStrong", IC_StoreStrong) .Default(IC_CallOrUser); // Second argument is i8**. if (PointerType *Pte1 = dyn_cast<PointerType>(ETy1)) @@ -618,22 +620,35 @@ static bool DoesObjCBlockEscape(const Value *BlockPtr) { const User *UUser = *UI; // Special - Use by a call (callee or argument) is not considered // to be an escape. - if (isa<CallInst>(UUser) || isa<InvokeInst>(UUser)) - continue; - // Use by an instruction which copies the value is an escape if the - // result is an escape. - if (isa<BitCastInst>(UUser) || isa<GetElementPtrInst>(UUser) || - isa<PHINode>(UUser) || isa<SelectInst>(UUser)) { - Worklist.push_back(UUser); + switch (GetBasicInstructionClass(UUser)) { + case IC_StoreWeak: + case IC_InitWeak: + case IC_StoreStrong: + case IC_Autorelease: + case IC_AutoreleaseRV: + // These special functions make copies of their pointer arguments. + return true; + case IC_User: + case IC_None: + // Use by an instruction which copies the value is an escape if the + // result is an escape. + if (isa<BitCastInst>(UUser) || isa<GetElementPtrInst>(UUser) || + isa<PHINode>(UUser) || isa<SelectInst>(UUser)) { + Worklist.push_back(UUser); + continue; + } + // Use by a load is not an escape. + if (isa<LoadInst>(UUser)) + continue; + // Use by a store is not an escape if the use is the address. + if (const StoreInst *SI = dyn_cast<StoreInst>(UUser)) + if (V != SI->getValueOperand()) + continue; + break; + default: + // Regular calls and other stuff are not considered escapes. continue; } - // Use by a load is not an escape. - if (isa<LoadInst>(UUser)) - continue; - // Use by a store is not an escape if the use is the address. - if (const StoreInst *SI = dyn_cast<StoreInst>(UUser)) - if (V != SI->getValueOperand()) - continue; // Otherwise, conservatively assume an escape. return true; } @@ -883,7 +898,7 @@ bool ObjCARCExpand::runOnFunction(Function &F) { // These calls return their argument verbatim, as a low-level // optimization. However, this makes high-level optimizations // harder. Undo any uses of this optimization that the front-end - // emitted here. We'll redo them in a later pass. + // emitted here. We'll redo them in the contract pass. Changed = true; Inst->replaceAllUsesWith(cast<CallInst>(Inst)->getArgOperand(0)); break; @@ -997,7 +1012,11 @@ bool ObjCARCAPElim::runOnModule(Module &M) { return false; // Find the llvm.global_ctors variable, as the first step in - // identifying the global constructors. + // identifying the global constructors. In theory, unnecessary autorelease + // pools could occur anywhere, but in practice it's pretty rare. Global + // ctors are a place where autorelease pools get inserted automatically, + // so it's pretty common for them to be unnecessary, and it's pretty + // profitable to eliminate them. GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors"); if (!GV) return false; @@ -2263,6 +2282,7 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { case IC_DestroyWeak: { CallInst *CI = cast<CallInst>(Inst); if (isNullOrUndef(CI->getArgOperand(0))) { + Changed = true; Type *Ty = CI->getArgOperand(0)->getType(); new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()), Constant::getNullValue(Ty), @@ -2278,6 +2298,7 @@ void ObjCARCOpt::OptimizeIndividualCalls(Function &F) { CallInst *CI = cast<CallInst>(Inst); if (isNullOrUndef(CI->getArgOperand(0)) || isNullOrUndef(CI->getArgOperand(1))) { + Changed = true; Type *Ty = CI->getArgOperand(0)->getType(); new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()), Constant::getNullValue(Ty), @@ -3165,6 +3186,8 @@ void ObjCARCOpt::MoveCalls(Value *Arg, } } +/// PerformCodePlacement - Identify pairings between the retains and releases, +/// and delete and/or move them. bool ObjCARCOpt::PerformCodePlacement(DenseMap<const BasicBlock *, BBState> &BBStates, @@ -3178,6 +3201,7 @@ ObjCARCOpt::PerformCodePlacement(DenseMap<const BasicBlock *, BBState> SmallVector<Instruction *, 4> NewReleases; SmallVector<Instruction *, 8> DeadInsts; + // Visit each retain. for (MapVector<Value *, RRInfo>::const_iterator I = Retains.begin(), E = Retains.end(); I != E; ++I) { Value *V = I->first; @@ -3651,6 +3675,7 @@ bool ObjCARCOpt::doInitialization(Module &M) { if (!EnableARCOpts) return false; + // If nothing in the Module uses ARC, don't do anything. Run = ModuleHasARC(M); if (!Run) return false; @@ -3985,6 +4010,7 @@ void ObjCARCContract::ContractRelease(Instruction *Release, } bool ObjCARCContract::doInitialization(Module &M) { + // If nothing in the Module uses ARC, don't do anything. Run = ModuleHasARC(M); if (!Run) return false; @@ -4060,6 +4086,7 @@ bool ObjCARCContract::runOnFunction(Function &F) { --BBI; while (isNoopInstruction(BBI)) --BBI; if (&*BBI == GetObjCArg(Inst)) { + Changed = true; InlineAsm *IA = InlineAsm::get(FunctionType::get(Type::getVoidTy(Inst->getContext()), /*isVarArg=*/false), @@ -4109,6 +4136,13 @@ bool ObjCARCContract::runOnFunction(Function &F) { Use &U = UI.getUse(); unsigned OperandNo = UI.getOperandNo(); ++UI; // Increment UI now, because we may unlink its element. + + // If the call's return value dominates a use of the call's argument + // value, rewrite the use to use the return value. We check for + // reachability here because an unreachable call is considered to + // trivially dominate itself, which would lead us to rewriting its + // argument in terms of its return value, which would lead to + // infinite loops in GetObjCArg. if (DT->isReachableFromEntry(U) && DT->dominates(Inst, U)) { Changed = true; @@ -4123,6 +4157,9 @@ bool ObjCARCContract::runOnFunction(Function &F) { if (Replacement->getType() != UseTy) Replacement = new BitCastInst(Replacement, UseTy, "", &BB->back()); + // While we're here, rewrite all edges for this PHI, rather + // than just one use at a time, to minimize the number of + // bitcasts we emit. for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i) if (PHI->getIncomingBlock(i) == BB) { diff --git a/lib/Transforms/Scalar/Reassociate.cpp b/lib/Transforms/Scalar/Reassociate.cpp index cb408a137eab..5de00d1ab5eb 100644 --- a/lib/Transforms/Scalar/Reassociate.cpp +++ b/lib/Transforms/Scalar/Reassociate.cpp @@ -559,7 +559,8 @@ static unsigned FindInOperandList(SmallVectorImpl<ValueEntry> &Ops, unsigned i, /// EmitAddTreeOfValues - Emit a tree of add instructions, summing Ops together /// and returning the result. Insert the tree before I. -static Value *EmitAddTreeOfValues(Instruction *I, SmallVectorImpl<Value*> &Ops){ +static Value *EmitAddTreeOfValues(Instruction *I, + SmallVectorImpl<WeakVH> &Ops){ if (Ops.size() == 1) return Ops.back(); Value *V1 = Ops.back(); @@ -833,7 +834,7 @@ Value *Reassociate::OptimizeAdd(Instruction *I, // from an expression will drop a use of maxocc, and this can cause // RemoveFactorFromExpression on successive values to behave differently. Instruction *DummyInst = BinaryOperator::CreateAdd(MaxOccVal, MaxOccVal); - SmallVector<Value*, 4> NewMulOps; + SmallVector<WeakVH, 4> NewMulOps; for (unsigned i = 0; i != Ops.size(); ++i) { // Only try to remove factors from expressions we're allowed to. BinaryOperator *BOp = dyn_cast<BinaryOperator>(Ops[i].Op); diff --git a/lib/Transforms/Scalar/SimplifyLibCalls.cpp b/lib/Transforms/Scalar/SimplifyLibCalls.cpp index 9c49ec1c84d2..f7b69411b1d3 100644 --- a/lib/Transforms/Scalar/SimplifyLibCalls.cpp +++ b/lib/Transforms/Scalar/SimplifyLibCalls.cpp @@ -1583,21 +1583,16 @@ void SimplifyLibCalls::InitOptimizations() { Optimizations["llvm.exp2.f64"] = &Exp2; Optimizations["llvm.exp2.f32"] = &Exp2; -#ifdef HAVE_FLOORF - Optimizations["floor"] = &UnaryDoubleFP; -#endif -#ifdef HAVE_CEILF - Optimizations["ceil"] = &UnaryDoubleFP; -#endif -#ifdef HAVE_ROUNDF - Optimizations["round"] = &UnaryDoubleFP; -#endif -#ifdef HAVE_RINTF - Optimizations["rint"] = &UnaryDoubleFP; -#endif -#ifdef HAVE_NEARBYINTF - Optimizations["nearbyint"] = &UnaryDoubleFP; -#endif + if (TLI->has(LibFunc::floor) && TLI->has(LibFunc::floorf)) + Optimizations["floor"] = &UnaryDoubleFP; + if (TLI->has(LibFunc::ceil) && TLI->has(LibFunc::ceilf)) + Optimizations["ceil"] = &UnaryDoubleFP; + if (TLI->has(LibFunc::round) && TLI->has(LibFunc::roundf)) + Optimizations["round"] = &UnaryDoubleFP; + if (TLI->has(LibFunc::rint) && TLI->has(LibFunc::rintf)) + Optimizations["rint"] = &UnaryDoubleFP; + if (TLI->has(LibFunc::nearbyint) && TLI->has(LibFunc::nearbyintf)) + Optimizations["nearbyint"] = &UnaryDoubleFP; // Integer Optimizations Optimizations["ffs"] = &FFS; diff --git a/lib/Transforms/Utils/BreakCriticalEdges.cpp b/lib/Transforms/Utils/BreakCriticalEdges.cpp index f752d7981c5f..2a8e9b834e31 100644 --- a/lib/Transforms/Utils/BreakCriticalEdges.cpp +++ b/lib/Transforms/Utils/BreakCriticalEdges.cpp @@ -117,33 +117,38 @@ bool llvm::isCriticalEdge(const TerminatorInst *TI, unsigned SuccNum, return false; } -/// CreatePHIsForSplitLoopExit - When a loop exit edge is split, LCSSA form +/// createPHIsForSplitLoopExit - When a loop exit edge is split, LCSSA form /// may require new PHIs in the new exit block. This function inserts the -/// new PHIs, as needed. Preds is a list of preds inside the loop, SplitBB +/// new PHIs, as needed. Preds is a list of preds inside the loop, SplitBB /// is the new loop exit block, and DestBB is the old loop exit, now the /// successor of SplitBB. -static void CreatePHIsForSplitLoopExit(SmallVectorImpl<BasicBlock *> &Preds, +static void createPHIsForSplitLoopExit(SmallVectorImpl<BasicBlock *> &Preds, BasicBlock *SplitBB, BasicBlock *DestBB) { // SplitBB shouldn't have anything non-trivial in it yet. - assert(SplitBB->getFirstNonPHI() == SplitBB->getTerminator() && - "SplitBB has non-PHI nodes!"); + assert((SplitBB->getFirstNonPHI() == SplitBB->getTerminator() || + SplitBB->isLandingPad()) && "SplitBB has non-PHI nodes!"); - // For each PHI in the destination block... + // For each PHI in the destination block. for (BasicBlock::iterator I = DestBB->begin(); PHINode *PN = dyn_cast<PHINode>(I); ++I) { unsigned Idx = PN->getBasicBlockIndex(SplitBB); Value *V = PN->getIncomingValue(Idx); + // If the input is a PHI which already satisfies LCSSA, don't create // a new one. if (const PHINode *VP = dyn_cast<PHINode>(V)) if (VP->getParent() == SplitBB) continue; + // Otherwise a new PHI is needed. Create one and populate it. - PHINode *NewPN = PHINode::Create(PN->getType(), Preds.size(), "split", - SplitBB->getTerminator()); + PHINode *NewPN = + PHINode::Create(PN->getType(), Preds.size(), "split", + SplitBB->isLandingPad() ? + SplitBB->begin() : SplitBB->getTerminator()); for (unsigned i = 0, e = Preds.size(); i != e; ++i) NewPN->addIncoming(V, Preds[i]); + // Update the original PHI. PN->setIncomingValue(Idx, NewPN); } @@ -168,7 +173,8 @@ static void CreatePHIsForSplitLoopExit(SmallVectorImpl<BasicBlock *> &Preds, /// BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum, Pass *P, bool MergeIdenticalEdges, - bool DontDeleteUselessPhis) { + bool DontDeleteUselessPhis, + bool SplitLandingPads) { if (!isCriticalEdge(TI, SuccNum, MergeIdenticalEdges)) return 0; assert(!isa<IndirectBrInst>(TI) && @@ -338,7 +344,7 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum, if (P->mustPreserveAnalysisID(LCSSAID)) { SmallVector<BasicBlock *, 1> OrigPred; OrigPred.push_back(TIBB); - CreatePHIsForSplitLoopExit(OrigPred, NewBB, DestBB); + createPHIsForSplitLoopExit(OrigPred, NewBB, DestBB); } // For each unique exit block... @@ -371,10 +377,19 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum, // getUniqueExitBlocks above because that depends on LoopSimplify // form, which we're in the process of restoring! if (!Preds.empty() && HasPredOutsideOfLoop) { - BasicBlock *NewExitBB = - SplitBlockPredecessors(Exit, Preds, "split", P); - if (P->mustPreserveAnalysisID(LCSSAID)) - CreatePHIsForSplitLoopExit(Preds, NewExitBB, Exit); + if (!Exit->isLandingPad()) { + BasicBlock *NewExitBB = + SplitBlockPredecessors(Exit, Preds, "split", P); + if (P->mustPreserveAnalysisID(LCSSAID)) + createPHIsForSplitLoopExit(Preds, NewExitBB, Exit); + } else if (SplitLandingPads) { + SmallVector<BasicBlock*, 8> NewBBs; + SplitLandingPadPredecessors(Exit, Preds, + ".split1", ".split2", + P, NewBBs); + if (P->mustPreserveAnalysisID(LCSSAID)) + createPHIsForSplitLoopExit(Preds, NewBBs[0], Exit); + } } } } diff --git a/lib/Transforms/Vectorize/BBVectorize.cpp b/lib/Transforms/Vectorize/BBVectorize.cpp index 286b54f2f067..9d62306dce5a 100644 --- a/lib/Transforms/Vectorize/BBVectorize.cpp +++ b/lib/Transforms/Vectorize/BBVectorize.cpp @@ -84,6 +84,10 @@ NoFloats("bb-vectorize-no-floats", cl::init(false), cl::Hidden, cl::desc("Don't try to vectorize floating-point values")); static cl::opt<bool> +NoPointers("bb-vectorize-no-pointers", cl::init(false), cl::Hidden, + cl::desc("Don't try to vectorize pointer values")); + +static cl::opt<bool> NoCasts("bb-vectorize-no-casts", cl::init(false), cl::Hidden, cl::desc("Don't try to vectorize casting (conversion) operations")); @@ -96,6 +100,14 @@ NoFMA("bb-vectorize-no-fma", cl::init(false), cl::Hidden, cl::desc("Don't try to vectorize the fused-multiply-add intrinsic")); static cl::opt<bool> +NoSelect("bb-vectorize-no-select", cl::init(false), cl::Hidden, + cl::desc("Don't try to vectorize select instructions")); + +static cl::opt<bool> +NoGEP("bb-vectorize-no-gep", cl::init(false), cl::Hidden, + cl::desc("Don't try to vectorize getelementptr instructions")); + +static cl::opt<bool> NoMemOps("bb-vectorize-no-mem-ops", cl::init(false), cl::Hidden, cl::desc("Don't try to vectorize loads and stores")); @@ -546,11 +558,21 @@ namespace { return false; Type *SrcTy = C->getSrcTy(); - if (!SrcTy->isSingleValueType() || SrcTy->isPointerTy()) + if (!SrcTy->isSingleValueType()) return false; Type *DestTy = C->getDestTy(); - if (!DestTy->isSingleValueType() || DestTy->isPointerTy()) + if (!DestTy->isSingleValueType()) + return false; + } else if (isa<SelectInst>(I)) { + if (!Config.VectorizeSelect) + return false; + } else if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(I)) { + if (!Config.VectorizeGEP) + return false; + + // Currently, vector GEPs exist only with one index. + if (G->getNumIndices() != 1) return false; } else if (!(I->isBinaryOp() || isa<ShuffleVectorInst>(I) || isa<ExtractElementInst>(I) || isa<InsertElementInst>(I))) { @@ -590,6 +612,11 @@ namespace { && (T1->isFPOrFPVectorTy() || T2->isFPOrFPVectorTy())) return false; + if ((!Config.VectorizePointers || TD == 0) && + (T1->getScalarType()->isPointerTy() || + T2->getScalarType()->isPointerTy())) + return false; + if (T1->getPrimitiveSizeInBits() > Config.VectorBits/2 || T2->getPrimitiveSizeInBits() > Config.VectorBits/2) return false; @@ -828,16 +855,33 @@ namespace { std::vector<Value *> &PairableInsts, std::multimap<ValuePair, ValuePair> &ConnectedPairs, ValuePair P) { + StoreInst *SI, *SJ; + // For each possible pairing for this variable, look at the uses of // the first value... for (Value::use_iterator I = P.first->use_begin(), E = P.first->use_end(); I != E; ++I) { + if (isa<LoadInst>(*I)) { + // A pair cannot be connected to a load because the load only takes one + // operand (the address) and it is a scalar even after vectorization. + continue; + } else if ((SI = dyn_cast<StoreInst>(*I)) && + P.first == SI->getPointerOperand()) { + // Similarly, a pair cannot be connected to a store through its + // pointer operand. + continue; + } + VPIteratorPair IPairRange = CandidatePairs.equal_range(*I); // For each use of the first variable, look for uses of the second // variable... for (Value::use_iterator J = P.second->use_begin(), E2 = P.second->use_end(); J != E2; ++J) { + if ((SJ = dyn_cast<StoreInst>(*J)) && + P.second == SJ->getPointerOperand()) + continue; + VPIteratorPair JPairRange = CandidatePairs.equal_range(*J); // Look for <I, J>: @@ -853,6 +897,10 @@ namespace { // Look for cases where just the first value in the pair is used by // both members of another pair (splatting). for (Value::use_iterator J = P.first->use_begin(); J != E; ++J) { + if ((SJ = dyn_cast<StoreInst>(*J)) && + P.first == SJ->getPointerOperand()) + continue; + if (isSecondInIteratorPair<Value*>(*J, IPairRange)) ConnectedPairs.insert(VPPair(P, ValuePair(*I, *J))); } @@ -863,9 +911,19 @@ namespace { // both members of another pair (splatting). for (Value::use_iterator I = P.second->use_begin(), E = P.second->use_end(); I != E; ++I) { + if (isa<LoadInst>(*I)) + continue; + else if ((SI = dyn_cast<StoreInst>(*I)) && + P.second == SI->getPointerOperand()) + continue; + VPIteratorPair IPairRange = CandidatePairs.equal_range(*I); for (Value::use_iterator J = P.second->use_begin(); J != E; ++J) { + if ((SJ = dyn_cast<StoreInst>(*J)) && + P.second == SJ->getPointerOperand()) + continue; + if (isSecondInIteratorPair<Value*>(*J, IPairRange)) ConnectedPairs.insert(VPPair(P, ValuePair(*I, *J))); } @@ -1891,9 +1949,12 @@ VectorizeConfig::VectorizeConfig() { VectorBits = ::VectorBits; VectorizeInts = !::NoInts; VectorizeFloats = !::NoFloats; + VectorizePointers = !::NoPointers; VectorizeCasts = !::NoCasts; VectorizeMath = !::NoMath; VectorizeFMA = !::NoFMA; + VectorizeSelect = !::NoSelect; + VectorizeGEP = !::NoGEP; VectorizeMemOps = !::NoMemOps; AlignedOnly = ::AlignedOnly; ReqChainDepth= ::ReqChainDepth; diff --git a/lib/VMCore/AutoUpgrade.cpp b/lib/VMCore/AutoUpgrade.cpp index ea3d4ba22436..2e16372fb1c9 100644 --- a/lib/VMCore/AutoUpgrade.cpp +++ b/lib/VMCore/AutoUpgrade.cpp @@ -18,9 +18,6 @@ #include "llvm/LLVMContext.h" #include "llvm/Module.h" #include "llvm/IntrinsicInst.h" -#include "llvm/ADT/DenseMap.h" -#include "llvm/ADT/SmallPtrSet.h" -#include "llvm/ADT/SmallVector.h" #include "llvm/Support/CallSite.h" #include "llvm/Support/CFG.h" #include "llvm/Support/ErrorHandling.h" @@ -59,7 +56,8 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) { if (Name.startswith("x86.sse2.pcmpeq.") || Name.startswith("x86.sse2.pcmpgt.") || Name.startswith("x86.avx2.pcmpeq.") || - Name.startswith("x86.avx2.pcmpgt.")) { + Name.startswith("x86.avx2.pcmpgt.") || + Name.startswith("x86.avx.vpermil.")) { NewFn = 0; return true; } @@ -121,7 +119,42 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { // need to sign extend since icmp returns vector of i1 Rep = Builder.CreateSExt(Rep, CI->getType(), ""); } else { - llvm_unreachable("Unknown function for CallInst upgrade."); + bool PD128 = false, PD256 = false, PS128 = false, PS256 = false; + if (Name.startswith("llvm.x86.avx.vpermil.pd.256")) + PD256 = true; + else if (Name.startswith("llvm.x86.avx.vpermil.pd")) + PD128 = true; + else if (Name.startswith("llvm.x86.avx.vpermil.ps.256")) + PS256 = true; + else if (Name.startswith("llvm.x86.avx.vpermil.ps")) + PS128 = true; + + if (PD256 || PD128 || PS256 || PS128) { + Value *Op0 = CI->getArgOperand(0); + unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); + SmallVector<Constant*, 8> Idxs; + + if (PD128) + for (unsigned i = 0; i != 2; ++i) + Idxs.push_back(Builder.getInt32((Imm >> i) & 0x1)); + else if (PD256) + for (unsigned l = 0; l != 4; l+=2) + for (unsigned i = 0; i != 2; ++i) + Idxs.push_back(Builder.getInt32(((Imm >> (l+i)) & 0x1) + l)); + else if (PS128) + for (unsigned i = 0; i != 4; ++i) + Idxs.push_back(Builder.getInt32((Imm >> (2 * i)) & 0x3)); + else if (PS256) + for (unsigned l = 0; l != 8; l+=4) + for (unsigned i = 0; i != 4; ++i) + Idxs.push_back(Builder.getInt32(((Imm >> (2 * i)) & 0x3) + l)); + else + llvm_unreachable("Unexpected function"); + + Rep = Builder.CreateShuffleVector(Op0, Op0, ConstantVector::get(Idxs)); + } else { + llvm_unreachable("Unknown function for CallInst upgrade."); + } } CI->replaceAllUsesWith(Rep); diff --git a/lib/VMCore/Instructions.cpp b/lib/VMCore/Instructions.cpp index 8db6ac9a33f4..6c5db3287641 100644 --- a/lib/VMCore/Instructions.cpp +++ b/lib/VMCore/Instructions.cpp @@ -2003,6 +2003,23 @@ bool BinaryOperator::isExact() const { } //===----------------------------------------------------------------------===// +// FPMathOperator Class +//===----------------------------------------------------------------------===// + +/// getFPAccuracy - Get the maximum error permitted by this operation in ULPs. +/// An accuracy of 0.0 means that the operation should be performed with the +/// default precision. +float FPMathOperator::getFPAccuracy() const { + const MDNode *MD = + cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath); + if (!MD) + return 0.0; + ConstantFP *Accuracy = cast<ConstantFP>(MD->getOperand(0)); + return Accuracy->getValueAPF().convertToFloat(); +} + + +//===----------------------------------------------------------------------===// // CastInst Class //===----------------------------------------------------------------------===// diff --git a/lib/VMCore/LLVMContext.cpp b/lib/VMCore/LLVMContext.cpp index 68c56212bc6c..f07f0b393926 100644 --- a/lib/VMCore/LLVMContext.cpp +++ b/lib/VMCore/LLVMContext.cpp @@ -44,9 +44,9 @@ LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) { unsigned ProfID = getMDKindID("prof"); assert(ProfID == MD_prof && "prof kind id drifted"); (void)ProfID; - // Create the 'fpaccuracy' metadata kind. - unsigned FPAccuracyID = getMDKindID("fpaccuracy"); - assert(FPAccuracyID == MD_fpaccuracy && "fpaccuracy kind id drifted"); + // Create the 'fpmath' metadata kind. + unsigned FPAccuracyID = getMDKindID("fpmath"); + assert(FPAccuracyID == MD_fpmath && "fpmath kind id drifted"); (void)FPAccuracyID; // Create the 'range' metadata kind. diff --git a/lib/VMCore/Module.cpp b/lib/VMCore/Module.cpp index e8bc6dbe9706..3c67191e09c2 100644 --- a/lib/VMCore/Module.cpp +++ b/lib/VMCore/Module.cpp @@ -434,7 +434,7 @@ bool Module::MaterializeAllPermanently(std::string *ErrInfo) { // -// dropAllReferences() - This function causes all the subelementss to "let go" +// dropAllReferences() - This function causes all the subelements to "let go" // of all references that they are maintaining. This allows one to 'delete' a // whole module at a time, even though there may be circular references... first // all references are dropped, and all use counts go to zero. Then everything diff --git a/lib/VMCore/Verifier.cpp b/lib/VMCore/Verifier.cpp index 96492e44d56f..47baef3e29df 100644 --- a/lib/VMCore/Verifier.cpp +++ b/lib/VMCore/Verifier.cpp @@ -1653,16 +1653,18 @@ void Verifier::visitInstruction(Instruction &I) { } } - if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpaccuracy)) { + if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) { Assert1(I.getType()->isFPOrFPVectorTy(), - "fpaccuracy requires a floating point result!", &I); - Assert1(MD->getNumOperands() == 1, "fpaccuracy takes one operand!", &I); - ConstantFP *Op = dyn_cast_or_null<ConstantFP>(MD->getOperand(0)); - Assert1(Op, "fpaccuracy ULPs not a floating point number!", &I); - APFloat ULPs = Op->getValueAPF(); - Assert1(ULPs.isNormal() || ULPs.isZero(), - "fpaccuracy ULPs not a normal number!", &I); - Assert1(!ULPs.isNegative(), "fpaccuracy ULPs is negative!", &I); + "fpmath requires a floating point result!", &I); + Assert1(MD->getNumOperands() == 1, "fpmath takes one operand!", &I); + Value *Op0 = MD->getOperand(0); + if (ConstantFP *CFP0 = dyn_cast_or_null<ConstantFP>(Op0)) { + APFloat Accuracy = CFP0->getValueAPF(); + Assert1(Accuracy.isNormal() && !Accuracy.isNegative(), + "fpmath accuracy not a positive number!", &I); + } else { + Assert1(false, "invalid fpmath accuracy!", &I); + } } MDNode *MD = I.getMetadata(LLVMContext::MD_range); |