diff options
Diffstat (limited to 'lib/Target/Hexagon')
21 files changed, 6684 insertions, 62 deletions
diff --git a/lib/Target/Hexagon/BitTracker.cpp b/lib/Target/Hexagon/BitTracker.cpp new file mode 100644 index 000000000000..cb7e633fb82f --- /dev/null +++ b/lib/Target/Hexagon/BitTracker.cpp @@ -0,0 +1,1127 @@ +//===--- BitTracker.cpp ---------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +// SSA-based bit propagation. +// +// The purpose of this code is, for a given virtual register, to provide +// information about the value of each bit in the register. The values +// of bits are represented by the class BitValue, and take one of four +// cases: 0, 1, "ref" and "bottom". The 0 and 1 are rather clear, the +// "ref" value means that the bit is a copy of another bit (which itself +// cannot be a copy of yet another bit---such chains are not allowed). +// A "ref" value is associated with a BitRef structure, which indicates +// which virtual register, and which bit in that register is the origin +// of the value. For example, given an instruction +// vreg2 = ASL vreg1, 1 +// assuming that nothing is known about bits of vreg1, bit 1 of vreg2 +// will be a "ref" to (vreg1, 0). If there is a subsequent instruction +// vreg3 = ASL vreg2, 2 +// then bit 3 of vreg3 will be a "ref" to (vreg1, 0) as well. +// The "bottom" case means that the bit's value cannot be determined, +// and that this virtual register actually defines it. The "bottom" case +// is discussed in detail in BitTracker.h. In fact, "bottom" is a "ref +// to self", so for the vreg1 above, the bit 0 of it will be a "ref" to +// (vreg1, 0), bit 1 will be a "ref" to (vreg1, 1), etc. +// +// The tracker implements the Wegman-Zadeck algorithm, originally developed +// for SSA-based constant propagation. Each register is represented as +// a sequence of bits, with the convention that bit 0 is the least signi- +// ficant bit. Each bit is propagated individually. The class RegisterCell +// implements the register's representation, and is also the subject of +// the lattice operations in the tracker. +// +// The intended usage of the bit tracker is to create a target-specific +// machine instruction evaluator, pass the evaluator to the BitTracker +// object, and run the tracker. The tracker will then collect the bit +// value information for a given machine function. After that, it can be +// queried for the cells for each virtual register. +// Sample code: +// const TargetSpecificEvaluator TSE(TRI, MRI); +// BitTracker BT(TSE, MF); +// BT.run(); +// ... +// unsigned Reg = interestingRegister(); +// RegisterCell RC = BT.get(Reg); +// if (RC[3].is(1)) +// Reg0bit3 = 1; +// +// The code below is intended to be fully target-independent. + +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/IR/Constants.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Target/TargetRegisterInfo.h" + +#include "BitTracker.h" + +using namespace llvm; + +typedef BitTracker BT; + +namespace { + // Local trickery to pretty print a register (without the whole "%vreg" + // business). + struct printv { + printv(unsigned r) : R(r) {} + unsigned R; + }; + raw_ostream &operator<< (raw_ostream &OS, const printv &PV) { + if (PV.R) + OS << 'v' << TargetRegisterInfo::virtReg2Index(PV.R); + else + OS << 's'; + return OS; + } +} + +raw_ostream &llvm::operator<<(raw_ostream &OS, const BT::BitValue &BV) { + switch (BV.Type) { + case BT::BitValue::Top: + OS << 'T'; + break; + case BT::BitValue::Zero: + OS << '0'; + break; + case BT::BitValue::One: + OS << '1'; + break; + case BT::BitValue::Ref: + OS << printv(BV.RefI.Reg) << '[' << BV.RefI.Pos << ']'; + break; + } + return OS; +} + +raw_ostream &llvm::operator<<(raw_ostream &OS, const BT::RegisterCell &RC) { + unsigned n = RC.Bits.size(); + OS << "{ w:" << n; + // Instead of printing each bit value individually, try to group them + // into logical segments, such as sequences of 0 or 1 bits or references + // to consecutive bits (e.g. "bits 3-5 are same as bits 7-9 of reg xyz"). + // "Start" will be the index of the beginning of the most recent segment. + unsigned Start = 0; + bool SeqRef = false; // A sequence of refs to consecutive bits. + bool ConstRef = false; // A sequence of refs to the same bit. + + for (unsigned i = 1, n = RC.Bits.size(); i < n; ++i) { + const BT::BitValue &V = RC[i]; + const BT::BitValue &SV = RC[Start]; + bool IsRef = (V.Type == BT::BitValue::Ref); + // If the current value is the same as Start, skip to the next one. + if (!IsRef && V == SV) + continue; + if (IsRef && SV.Type == BT::BitValue::Ref && V.RefI.Reg == SV.RefI.Reg) { + if (Start+1 == i) { + SeqRef = (V.RefI.Pos == SV.RefI.Pos+1); + ConstRef = (V.RefI.Pos == SV.RefI.Pos); + } + if (SeqRef && V.RefI.Pos == SV.RefI.Pos+(i-Start)) + continue; + if (ConstRef && V.RefI.Pos == SV.RefI.Pos) + continue; + } + + // The current value is different. Print the previous one and reset + // the Start. + OS << " [" << Start; + unsigned Count = i - Start; + if (Count == 1) { + OS << "]:" << SV; + } else { + OS << '-' << i-1 << "]:"; + if (SV.Type == BT::BitValue::Ref && SeqRef) + OS << printv(SV.RefI.Reg) << '[' << SV.RefI.Pos << '-' + << SV.RefI.Pos+(Count-1) << ']'; + else + OS << SV; + } + Start = i; + SeqRef = ConstRef = false; + } + + OS << " [" << Start; + unsigned Count = n - Start; + if (n-Start == 1) { + OS << "]:" << RC[Start]; + } else { + OS << '-' << n-1 << "]:"; + const BT::BitValue &SV = RC[Start]; + if (SV.Type == BT::BitValue::Ref && SeqRef) + OS << printv(SV.RefI.Reg) << '[' << SV.RefI.Pos << '-' + << SV.RefI.Pos+(Count-1) << ']'; + else + OS << SV; + } + OS << " }"; + + return OS; +} + +BitTracker::BitTracker(const MachineEvaluator &E, MachineFunction &F) + : Trace(false), ME(E), MF(F), MRI(F.getRegInfo()), Map(*new CellMapType) {} + +BitTracker::~BitTracker() { + delete ⤅ +} + + +// If we were allowed to update a cell for a part of a register, the meet +// operation would need to be parametrized by the register number and the +// exact part of the register, so that the computer BitRefs correspond to +// the actual bits of the "self" register. +// While this cannot happen in the current implementation, I'm not sure +// if this should be ruled out in the future. +bool BT::RegisterCell::meet(const RegisterCell &RC, unsigned SelfR) { + // An example when "meet" can be invoked with SelfR == 0 is a phi node + // with a physical register as an operand. + assert(SelfR == 0 || TargetRegisterInfo::isVirtualRegister(SelfR)); + bool Changed = false; + for (uint16_t i = 0, n = Bits.size(); i < n; ++i) { + const BitValue &RCV = RC[i]; + Changed |= Bits[i].meet(RCV, BitRef(SelfR, i)); + } + return Changed; +} + + +// Insert the entire cell RC into the current cell at position given by M. +BT::RegisterCell &BT::RegisterCell::insert(const BT::RegisterCell &RC, + const BitMask &M) { + uint16_t B = M.first(), E = M.last(), W = width(); + // Sanity: M must be a valid mask for *this. + assert(B < W && E < W); + // Sanity: the masked part of *this must have the same number of bits + // as the source. + assert(B > E || E-B+1 == RC.width()); // B <= E => E-B+1 = |RC|. + assert(B <= E || E+(W-B)+1 == RC.width()); // E < B => E+(W-B)+1 = |RC|. + if (B <= E) { + for (uint16_t i = 0; i <= E-B; ++i) + Bits[i+B] = RC[i]; + } else { + for (uint16_t i = 0; i < W-B; ++i) + Bits[i+B] = RC[i]; + for (uint16_t i = 0; i <= E; ++i) + Bits[i] = RC[i+(W-B)]; + } + return *this; +} + + +BT::RegisterCell BT::RegisterCell::extract(const BitMask &M) const { + uint16_t B = M.first(), E = M.last(), W = width(); + assert(B < W && E < W); + if (B <= E) { + RegisterCell RC(E-B+1); + for (uint16_t i = B; i <= E; ++i) + RC.Bits[i-B] = Bits[i]; + return RC; + } + + RegisterCell RC(E+(W-B)+1); + for (uint16_t i = 0; i < W-B; ++i) + RC.Bits[i] = Bits[i+B]; + for (uint16_t i = 0; i <= E; ++i) + RC.Bits[i+(W-B)] = Bits[i]; + return RC; +} + + +BT::RegisterCell &BT::RegisterCell::rol(uint16_t Sh) { + // Rotate left (i.e. towards increasing bit indices). + // Swap the two parts: [0..W-Sh-1] [W-Sh..W-1] + uint16_t W = width(); + Sh = Sh % W; + if (Sh == 0) + return *this; + + RegisterCell Tmp(W-Sh); + // Tmp = [0..W-Sh-1]. + for (uint16_t i = 0; i < W-Sh; ++i) + Tmp[i] = Bits[i]; + // Shift [W-Sh..W-1] to [0..Sh-1]. + for (uint16_t i = 0; i < Sh; ++i) + Bits[i] = Bits[W-Sh+i]; + // Copy Tmp to [Sh..W-1]. + for (uint16_t i = 0; i < W-Sh; ++i) + Bits[i+Sh] = Tmp.Bits[i]; + return *this; +} + + +BT::RegisterCell &BT::RegisterCell::fill(uint16_t B, uint16_t E, + const BitValue &V) { + assert(B <= E); + while (B < E) + Bits[B++] = V; + return *this; +} + + +BT::RegisterCell &BT::RegisterCell::cat(const RegisterCell &RC) { + // Append the cell given as the argument to the "this" cell. + // Bit 0 of RC becomes bit W of the result, where W is this->width(). + uint16_t W = width(), WRC = RC.width(); + Bits.resize(W+WRC); + for (uint16_t i = 0; i < WRC; ++i) + Bits[i+W] = RC.Bits[i]; + return *this; +} + + +uint16_t BT::RegisterCell::ct(bool B) const { + uint16_t W = width(); + uint16_t C = 0; + BitValue V = B; + while (C < W && Bits[C] == V) + C++; + return C; +} + + +uint16_t BT::RegisterCell::cl(bool B) const { + uint16_t W = width(); + uint16_t C = 0; + BitValue V = B; + while (C < W && Bits[W-(C+1)] == V) + C++; + return C; +} + + +bool BT::RegisterCell::operator== (const RegisterCell &RC) const { + uint16_t W = Bits.size(); + if (RC.Bits.size() != W) + return false; + for (uint16_t i = 0; i < W; ++i) + if (Bits[i] != RC[i]) + return false; + return true; +} + + +uint16_t BT::MachineEvaluator::getRegBitWidth(const RegisterRef &RR) const { + // The general problem is with finding a register class that corresponds + // to a given reference reg:sub. There can be several such classes, and + // since we only care about the register size, it does not matter which + // such class we would find. + // The easiest way to accomplish what we want is to + // 1. find a physical register PhysR from the same class as RR.Reg, + // 2. find a physical register PhysS that corresponds to PhysR:RR.Sub, + // 3. find a register class that contains PhysS. + unsigned PhysR; + if (TargetRegisterInfo::isVirtualRegister(RR.Reg)) { + const TargetRegisterClass *VC = MRI.getRegClass(RR.Reg); + assert(VC->begin() != VC->end() && "Empty register class"); + PhysR = *VC->begin(); + } else { + assert(TargetRegisterInfo::isPhysicalRegister(RR.Reg)); + PhysR = RR.Reg; + } + + unsigned PhysS = (RR.Sub == 0) ? PhysR : TRI.getSubReg(PhysR, RR.Sub); + const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(PhysS); + uint16_t BW = RC->getSize()*8; + return BW; +} + + +BT::RegisterCell BT::MachineEvaluator::getCell(const RegisterRef &RR, + const CellMapType &M) const { + uint16_t BW = getRegBitWidth(RR); + + // Physical registers are assumed to be present in the map with an unknown + // value. Don't actually insert anything in the map, just return the cell. + if (TargetRegisterInfo::isPhysicalRegister(RR.Reg)) + return RegisterCell::self(0, BW); + + assert(TargetRegisterInfo::isVirtualRegister(RR.Reg)); + // For virtual registers that belong to a class that is not tracked, + // generate an "unknown" value as well. + const TargetRegisterClass *C = MRI.getRegClass(RR.Reg); + if (!track(C)) + return RegisterCell::self(0, BW); + + CellMapType::const_iterator F = M.find(RR.Reg); + if (F != M.end()) { + if (!RR.Sub) + return F->second; + BitMask M = mask(RR.Reg, RR.Sub); + return F->second.extract(M); + } + // If not found, create a "top" entry, but do not insert it in the map. + return RegisterCell::top(BW); +} + + +void BT::MachineEvaluator::putCell(const RegisterRef &RR, RegisterCell RC, + CellMapType &M) const { + // While updating the cell map can be done in a meaningful way for + // a part of a register, it makes little sense to implement it as the + // SSA representation would never contain such "partial definitions". + if (!TargetRegisterInfo::isVirtualRegister(RR.Reg)) + return; + assert(RR.Sub == 0 && "Unexpected sub-register in definition"); + // Eliminate all ref-to-reg-0 bit values: replace them with "self". + for (unsigned i = 0, n = RC.width(); i < n; ++i) { + const BitValue &V = RC[i]; + if (V.Type == BitValue::Ref && V.RefI.Reg == 0) + RC[i].RefI = BitRef(RR.Reg, i); + } + M[RR.Reg] = RC; +} + + +// Check if the cell represents a compile-time integer value. +bool BT::MachineEvaluator::isInt(const RegisterCell &A) const { + uint16_t W = A.width(); + for (uint16_t i = 0; i < W; ++i) + if (!A[i].is(0) && !A[i].is(1)) + return false; + return true; +} + + +// Convert a cell to the integer value. The result must fit in uint64_t. +uint64_t BT::MachineEvaluator::toInt(const RegisterCell &A) const { + assert(isInt(A)); + uint64_t Val = 0; + uint16_t W = A.width(); + for (uint16_t i = 0; i < W; ++i) { + Val <<= 1; + Val |= A[i].is(1); + } + return Val; +} + + +// Evaluator helper functions. These implement some common operation on +// register cells that can be used to implement target-specific instructions +// in a target-specific evaluator. + +BT::RegisterCell BT::MachineEvaluator::eIMM(int64_t V, uint16_t W) const { + RegisterCell Res(W); + // For bits beyond the 63rd, this will generate the sign bit of V. + for (uint16_t i = 0; i < W; ++i) { + Res[i] = BitValue(V & 1); + V >>= 1; + } + return Res; +} + + +BT::RegisterCell BT::MachineEvaluator::eIMM(const ConstantInt *CI) const { + APInt A = CI->getValue(); + uint16_t BW = A.getBitWidth(); + assert((unsigned)BW == A.getBitWidth() && "BitWidth overflow"); + RegisterCell Res(BW); + for (uint16_t i = 0; i < BW; ++i) + Res[i] = A[i]; + return Res; +} + + +BT::RegisterCell BT::MachineEvaluator::eADD(const RegisterCell &A1, + const RegisterCell &A2) const { + uint16_t W = A1.width(); + assert(W == A2.width()); + RegisterCell Res(W); + bool Carry = false; + uint16_t I; + for (I = 0; I < W; ++I) { + const BitValue &V1 = A1[I]; + const BitValue &V2 = A2[I]; + if (!V1.num() || !V2.num()) + break; + unsigned S = bool(V1) + bool(V2) + Carry; + Res[I] = BitValue(S & 1); + Carry = (S > 1); + } + for (; I < W; ++I) { + const BitValue &V1 = A1[I]; + const BitValue &V2 = A2[I]; + // If the next bit is same as Carry, the result will be 0 plus the + // other bit. The Carry bit will remain unchanged. + if (V1.is(Carry)) + Res[I] = BitValue::ref(V2); + else if (V2.is(Carry)) + Res[I] = BitValue::ref(V1); + else + break; + } + for (; I < W; ++I) + Res[I] = BitValue::self(); + return Res; +} + + +BT::RegisterCell BT::MachineEvaluator::eSUB(const RegisterCell &A1, + const RegisterCell &A2) const { + uint16_t W = A1.width(); + assert(W == A2.width()); + RegisterCell Res(W); + bool Borrow = false; + uint16_t I; + for (I = 0; I < W; ++I) { + const BitValue &V1 = A1[I]; + const BitValue &V2 = A2[I]; + if (!V1.num() || !V2.num()) + break; + unsigned S = bool(V1) - bool(V2) - Borrow; + Res[I] = BitValue(S & 1); + Borrow = (S > 1); + } + for (; I < W; ++I) { + const BitValue &V1 = A1[I]; + const BitValue &V2 = A2[I]; + if (V1.is(Borrow)) { + Res[I] = BitValue::ref(V2); + break; + } + if (V2.is(Borrow)) + Res[I] = BitValue::ref(V1); + else + break; + } + for (; I < W; ++I) + Res[I] = BitValue::self(); + return Res; +} + + +BT::RegisterCell BT::MachineEvaluator::eMLS(const RegisterCell &A1, + const RegisterCell &A2) const { + uint16_t W = A1.width() + A2.width(); + uint16_t Z = A1.ct(0) + A2.ct(0); + RegisterCell Res(W); + Res.fill(0, Z, BitValue::Zero); + Res.fill(Z, W, BitValue::self()); + return Res; +} + + +BT::RegisterCell BT::MachineEvaluator::eMLU(const RegisterCell &A1, + const RegisterCell &A2) const { + uint16_t W = A1.width() + A2.width(); + uint16_t Z = A1.ct(0) + A2.ct(0); + RegisterCell Res(W); + Res.fill(0, Z, BitValue::Zero); + Res.fill(Z, W, BitValue::self()); + return Res; +} + + +BT::RegisterCell BT::MachineEvaluator::eASL(const RegisterCell &A1, + uint16_t Sh) const { + assert(Sh <= A1.width()); + RegisterCell Res = RegisterCell::ref(A1); + Res.rol(Sh); + Res.fill(0, Sh, BitValue::Zero); + return Res; +} + + +BT::RegisterCell BT::MachineEvaluator::eLSR(const RegisterCell &A1, + uint16_t Sh) const { + uint16_t W = A1.width(); + assert(Sh <= W); + RegisterCell Res = RegisterCell::ref(A1); + Res.rol(W-Sh); + Res.fill(W-Sh, W, BitValue::Zero); + return Res; +} + + +BT::RegisterCell BT::MachineEvaluator::eASR(const RegisterCell &A1, + uint16_t Sh) const { + uint16_t W = A1.width(); + assert(Sh <= W); + RegisterCell Res = RegisterCell::ref(A1); + BitValue Sign = Res[W-1]; + Res.rol(W-Sh); + Res.fill(W-Sh, W, Sign); + return Res; +} + + +BT::RegisterCell BT::MachineEvaluator::eAND(const RegisterCell &A1, + const RegisterCell &A2) const { + uint16_t W = A1.width(); + assert(W == A2.width()); + RegisterCell Res(W); + for (uint16_t i = 0; i < W; ++i) { + const BitValue &V1 = A1[i]; + const BitValue &V2 = A2[i]; + if (V1.is(1)) + Res[i] = BitValue::ref(V2); + else if (V2.is(1)) + Res[i] = BitValue::ref(V1); + else if (V1.is(0) || V2.is(0)) + Res[i] = BitValue::Zero; + else if (V1 == V2) + Res[i] = V1; + else + Res[i] = BitValue::self(); + } + return Res; +} + + +BT::RegisterCell BT::MachineEvaluator::eORL(const RegisterCell &A1, + const RegisterCell &A2) const { + uint16_t W = A1.width(); + assert(W == A2.width()); + RegisterCell Res(W); + for (uint16_t i = 0; i < W; ++i) { + const BitValue &V1 = A1[i]; + const BitValue &V2 = A2[i]; + if (V1.is(1) || V2.is(1)) + Res[i] = BitValue::One; + else if (V1.is(0)) + Res[i] = BitValue::ref(V2); + else if (V2.is(0)) + Res[i] = BitValue::ref(V1); + else if (V1 == V2) + Res[i] = V1; + else + Res[i] = BitValue::self(); + } + return Res; +} + + +BT::RegisterCell BT::MachineEvaluator::eXOR(const RegisterCell &A1, + const RegisterCell &A2) const { + uint16_t W = A1.width(); + assert(W == A2.width()); + RegisterCell Res(W); + for (uint16_t i = 0; i < W; ++i) { + const BitValue &V1 = A1[i]; + const BitValue &V2 = A2[i]; + if (V1.is(0)) + Res[i] = BitValue::ref(V2); + else if (V2.is(0)) + Res[i] = BitValue::ref(V1); + else if (V1 == V2) + Res[i] = BitValue::Zero; + else + Res[i] = BitValue::self(); + } + return Res; +} + + +BT::RegisterCell BT::MachineEvaluator::eNOT(const RegisterCell &A1) const { + uint16_t W = A1.width(); + RegisterCell Res(W); + for (uint16_t i = 0; i < W; ++i) { + const BitValue &V = A1[i]; + if (V.is(0)) + Res[i] = BitValue::One; + else if (V.is(1)) + Res[i] = BitValue::Zero; + else + Res[i] = BitValue::self(); + } + return Res; +} + + +BT::RegisterCell BT::MachineEvaluator::eSET(const RegisterCell &A1, + uint16_t BitN) const { + assert(BitN < A1.width()); + RegisterCell Res = RegisterCell::ref(A1); + Res[BitN] = BitValue::One; + return Res; +} + + +BT::RegisterCell BT::MachineEvaluator::eCLR(const RegisterCell &A1, + uint16_t BitN) const { + assert(BitN < A1.width()); + RegisterCell Res = RegisterCell::ref(A1); + Res[BitN] = BitValue::Zero; + return Res; +} + + +BT::RegisterCell BT::MachineEvaluator::eCLB(const RegisterCell &A1, bool B, + uint16_t W) const { + uint16_t C = A1.cl(B), AW = A1.width(); + // If the last leading non-B bit is not a constant, then we don't know + // the real count. + if ((C < AW && A1[AW-1-C].num()) || C == AW) + return eIMM(C, W); + return RegisterCell::self(0, W); +} + + +BT::RegisterCell BT::MachineEvaluator::eCTB(const RegisterCell &A1, bool B, + uint16_t W) const { + uint16_t C = A1.ct(B), AW = A1.width(); + // If the last trailing non-B bit is not a constant, then we don't know + // the real count. + if ((C < AW && A1[C].num()) || C == AW) + return eIMM(C, W); + return RegisterCell::self(0, W); +} + + +BT::RegisterCell BT::MachineEvaluator::eSXT(const RegisterCell &A1, + uint16_t FromN) const { + uint16_t W = A1.width(); + assert(FromN <= W); + RegisterCell Res = RegisterCell::ref(A1); + BitValue Sign = Res[FromN-1]; + // Sign-extend "inreg". + Res.fill(FromN, W, Sign); + return Res; +} + + +BT::RegisterCell BT::MachineEvaluator::eZXT(const RegisterCell &A1, + uint16_t FromN) const { + uint16_t W = A1.width(); + assert(FromN <= W); + RegisterCell Res = RegisterCell::ref(A1); + Res.fill(FromN, W, BitValue::Zero); + return Res; +} + + +BT::RegisterCell BT::MachineEvaluator::eXTR(const RegisterCell &A1, + uint16_t B, uint16_t E) const { + uint16_t W = A1.width(); + assert(B < W && E <= W); + if (B == E) + return RegisterCell(0); + uint16_t Last = (E > 0) ? E-1 : W-1; + RegisterCell Res = RegisterCell::ref(A1).extract(BT::BitMask(B, Last)); + // Return shorter cell. + return Res; +} + + +BT::RegisterCell BT::MachineEvaluator::eINS(const RegisterCell &A1, + const RegisterCell &A2, uint16_t AtN) const { + uint16_t W1 = A1.width(), W2 = A2.width(); + (void)W1; + assert(AtN < W1 && AtN+W2 <= W1); + // Copy bits from A1, insert A2 at position AtN. + RegisterCell Res = RegisterCell::ref(A1); + if (W2 > 0) + Res.insert(RegisterCell::ref(A2), BT::BitMask(AtN, AtN+W2-1)); + return Res; +} + + +BT::BitMask BT::MachineEvaluator::mask(unsigned Reg, unsigned Sub) const { + assert(Sub == 0 && "Generic BitTracker::mask called for Sub != 0"); + uint16_t W = getRegBitWidth(Reg); + assert(W > 0 && "Cannot generate mask for empty register"); + return BitMask(0, W-1); +} + + +bool BT::MachineEvaluator::evaluate(const MachineInstr *MI, + const CellMapType &Inputs, CellMapType &Outputs) const { + unsigned Opc = MI->getOpcode(); + switch (Opc) { + case TargetOpcode::REG_SEQUENCE: { + RegisterRef RD = MI->getOperand(0); + assert(RD.Sub == 0); + RegisterRef RS = MI->getOperand(1); + unsigned SS = MI->getOperand(2).getImm(); + RegisterRef RT = MI->getOperand(3); + unsigned ST = MI->getOperand(4).getImm(); + assert(SS != ST); + + uint16_t W = getRegBitWidth(RD); + RegisterCell Res(W); + Res.insert(RegisterCell::ref(getCell(RS, Inputs)), mask(RD.Reg, SS)); + Res.insert(RegisterCell::ref(getCell(RT, Inputs)), mask(RD.Reg, ST)); + putCell(RD, Res, Outputs); + break; + } + + case TargetOpcode::COPY: { + // COPY can transfer a smaller register into a wider one. + // If that is the case, fill the remaining high bits with 0. + RegisterRef RD = MI->getOperand(0); + RegisterRef RS = MI->getOperand(1); + assert(RD.Sub == 0); + uint16_t WD = getRegBitWidth(RD); + uint16_t WS = getRegBitWidth(RS); + assert(WD >= WS); + RegisterCell Src = getCell(RS, Inputs); + RegisterCell Res(WD); + Res.insert(Src, BitMask(0, WS-1)); + Res.fill(WS, WD, BitValue::Zero); + putCell(RD, Res, Outputs); + break; + } + + default: + return false; + } + + return true; +} + + +// Main W-Z implementation. + +void BT::visitPHI(const MachineInstr *PI) { + int ThisN = PI->getParent()->getNumber(); + if (Trace) + dbgs() << "Visit FI(BB#" << ThisN << "): " << *PI; + + const MachineOperand &MD = PI->getOperand(0); + assert(MD.getSubReg() == 0 && "Unexpected sub-register in definition"); + RegisterRef DefRR(MD); + uint16_t DefBW = ME.getRegBitWidth(DefRR); + + RegisterCell DefC = ME.getCell(DefRR, Map); + if (DefC == RegisterCell::self(DefRR.Reg, DefBW)) // XXX slow + return; + + bool Changed = false; + + for (unsigned i = 1, n = PI->getNumOperands(); i < n; i += 2) { + const MachineBasicBlock *PB = PI->getOperand(i+1).getMBB(); + int PredN = PB->getNumber(); + if (Trace) + dbgs() << " edge BB#" << PredN << "->BB#" << ThisN; + if (!EdgeExec.count(CFGEdge(PredN, ThisN))) { + if (Trace) + dbgs() << " not executable\n"; + continue; + } + + RegisterRef RU = PI->getOperand(i); + RegisterCell ResC = ME.getCell(RU, Map); + if (Trace) + dbgs() << " input reg: " << PrintReg(RU.Reg, &ME.TRI, RU.Sub) + << " cell: " << ResC << "\n"; + Changed |= DefC.meet(ResC, DefRR.Reg); + } + + if (Changed) { + if (Trace) + dbgs() << "Output: " << PrintReg(DefRR.Reg, &ME.TRI, DefRR.Sub) + << " cell: " << DefC << "\n"; + ME.putCell(DefRR, DefC, Map); + visitUsesOf(DefRR.Reg); + } +} + + +void BT::visitNonBranch(const MachineInstr *MI) { + if (Trace) { + int ThisN = MI->getParent()->getNumber(); + dbgs() << "Visit MI(BB#" << ThisN << "): " << *MI; + } + if (MI->isDebugValue()) + return; + assert(!MI->isBranch() && "Unexpected branch instruction"); + + CellMapType ResMap; + bool Eval = ME.evaluate(MI, Map, ResMap); + + if (Trace && Eval) { + for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) { + const MachineOperand &MO = MI->getOperand(i); + if (!MO.isReg() || !MO.isUse()) + continue; + RegisterRef RU(MO); + dbgs() << " input reg: " << PrintReg(RU.Reg, &ME.TRI, RU.Sub) + << " cell: " << ME.getCell(RU, Map) << "\n"; + } + dbgs() << "Outputs:\n"; + for (CellMapType::iterator I = ResMap.begin(), E = ResMap.end(); + I != E; ++I) { + RegisterRef RD(I->first); + dbgs() << " " << PrintReg(I->first, &ME.TRI) << " cell: " + << ME.getCell(RD, ResMap) << "\n"; + } + } + + // Iterate over all definitions of the instruction, and update the + // cells accordingly. + for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) { + const MachineOperand &MO = MI->getOperand(i); + // Visit register defs only. + if (!MO.isReg() || !MO.isDef()) + continue; + RegisterRef RD(MO); + assert(RD.Sub == 0 && "Unexpected sub-register in definition"); + if (!TargetRegisterInfo::isVirtualRegister(RD.Reg)) + continue; + + bool Changed = false; + if (!Eval || !ResMap.has(RD.Reg)) { + // Set to "ref" (aka "bottom"). + uint16_t DefBW = ME.getRegBitWidth(RD); + RegisterCell RefC = RegisterCell::self(RD.Reg, DefBW); + if (RefC != ME.getCell(RD, Map)) { + ME.putCell(RD, RefC, Map); + Changed = true; + } + } else { + RegisterCell DefC = ME.getCell(RD, Map); + RegisterCell ResC = ME.getCell(RD, ResMap); + // This is a non-phi instruction, so the values of the inputs come + // from the same registers each time this instruction is evaluated. + // During the propagation, the values of the inputs can become lowered + // in the sense of the lattice operation, which may cause different + // results to be calculated in subsequent evaluations. This should + // not cause the bottoming of the result in the map, since the new + // result is already reflecting the lowered inputs. + for (uint16_t i = 0, w = DefC.width(); i < w; ++i) { + BitValue &V = DefC[i]; + // Bits that are already "bottom" should not be updated. + if (V.Type == BitValue::Ref && V.RefI.Reg == RD.Reg) + continue; + // Same for those that are identical in DefC and ResC. + if (V == ResC[i]) + continue; + V = ResC[i]; + Changed = true; + } + if (Changed) + ME.putCell(RD, DefC, Map); + } + if (Changed) + visitUsesOf(RD.Reg); + } +} + + +void BT::visitBranchesFrom(const MachineInstr *BI) { + const MachineBasicBlock &B = *BI->getParent(); + MachineBasicBlock::const_iterator It = BI, End = B.end(); + BranchTargetList Targets, BTs; + bool FallsThrough = true, DefaultToAll = false; + int ThisN = B.getNumber(); + + do { + BTs.clear(); + const MachineInstr *MI = &*It; + if (Trace) + dbgs() << "Visit BR(BB#" << ThisN << "): " << *MI; + assert(MI->isBranch() && "Expecting branch instruction"); + InstrExec.insert(MI); + bool Eval = ME.evaluate(MI, Map, BTs, FallsThrough); + if (!Eval) { + // If the evaluation failed, we will add all targets. Keep going in + // the loop to mark all executable branches as such. + DefaultToAll = true; + FallsThrough = true; + if (Trace) + dbgs() << " failed to evaluate: will add all CFG successors\n"; + } else if (!DefaultToAll) { + // If evaluated successfully add the targets to the cumulative list. + if (Trace) { + dbgs() << " adding targets:"; + for (unsigned i = 0, n = BTs.size(); i < n; ++i) + dbgs() << " BB#" << BTs[i]->getNumber(); + if (FallsThrough) + dbgs() << "\n falls through\n"; + else + dbgs() << "\n does not fall through\n"; + } + Targets.insert(BTs.begin(), BTs.end()); + } + ++It; + } while (FallsThrough && It != End); + + typedef MachineBasicBlock::const_succ_iterator succ_iterator; + if (!DefaultToAll) { + // Need to add all CFG successors that lead to EH landing pads. + // There won't be explicit branches to these blocks, but they must + // be processed. + for (succ_iterator I = B.succ_begin(), E = B.succ_end(); I != E; ++I) { + const MachineBasicBlock *SB = *I; + if (SB->isLandingPad()) + Targets.insert(SB); + } + if (FallsThrough) { + MachineFunction::const_iterator BIt = &B; + MachineFunction::const_iterator Next = std::next(BIt); + if (Next != MF.end()) + Targets.insert(&*Next); + } + } else { + for (succ_iterator I = B.succ_begin(), E = B.succ_end(); I != E; ++I) + Targets.insert(*I); + } + + for (unsigned i = 0, n = Targets.size(); i < n; ++i) { + int TargetN = Targets[i]->getNumber(); + FlowQ.push(CFGEdge(ThisN, TargetN)); + } +} + + +void BT::visitUsesOf(unsigned Reg) { + if (Trace) + dbgs() << "visiting uses of " << PrintReg(Reg, &ME.TRI) << "\n"; + + typedef MachineRegisterInfo::use_nodbg_iterator use_iterator; + use_iterator End = MRI.use_nodbg_end(); + for (use_iterator I = MRI.use_nodbg_begin(Reg); I != End; ++I) { + MachineInstr *UseI = I->getParent(); + if (!InstrExec.count(UseI)) + continue; + if (UseI->isPHI()) + visitPHI(UseI); + else if (!UseI->isBranch()) + visitNonBranch(UseI); + else + visitBranchesFrom(UseI); + } +} + + +BT::RegisterCell BT::get(RegisterRef RR) const { + return ME.getCell(RR, Map); +} + + +void BT::put(RegisterRef RR, const RegisterCell &RC) { + ME.putCell(RR, RC, Map); +} + + +// Replace all references to bits from OldRR with the corresponding bits +// in NewRR. +void BT::subst(RegisterRef OldRR, RegisterRef NewRR) { + assert(Map.has(OldRR.Reg) && "OldRR not present in map"); + BitMask OM = ME.mask(OldRR.Reg, OldRR.Sub); + BitMask NM = ME.mask(NewRR.Reg, NewRR.Sub); + uint16_t OMB = OM.first(), OME = OM.last(); + uint16_t NMB = NM.first(), NME = NM.last(); + (void)NME; + assert((OME-OMB == NME-NMB) && + "Substituting registers of different lengths"); + for (CellMapType::iterator I = Map.begin(), E = Map.end(); I != E; ++I) { + RegisterCell &RC = I->second; + for (uint16_t i = 0, w = RC.width(); i < w; ++i) { + BitValue &V = RC[i]; + if (V.Type != BitValue::Ref || V.RefI.Reg != OldRR.Reg) + continue; + if (V.RefI.Pos < OMB || V.RefI.Pos > OME) + continue; + V.RefI.Reg = NewRR.Reg; + V.RefI.Pos += NMB-OMB; + } + } +} + + +// Check if the block has been "executed" during propagation. (If not, the +// block is dead, but it may still appear to be reachable.) +bool BT::reached(const MachineBasicBlock *B) const { + int BN = B->getNumber(); + assert(BN >= 0); + for (EdgeSetType::iterator I = EdgeExec.begin(), E = EdgeExec.end(); + I != E; ++I) { + if (I->second == BN) + return true; + } + return false; +} + + +void BT::reset() { + EdgeExec.clear(); + InstrExec.clear(); + Map.clear(); +} + + +void BT::run() { + reset(); + assert(FlowQ.empty()); + + typedef GraphTraits<const MachineFunction*> MachineFlowGraphTraits; + const MachineBasicBlock *Entry = MachineFlowGraphTraits::getEntryNode(&MF); + + unsigned MaxBN = 0; + for (MachineFunction::const_iterator I = MF.begin(), E = MF.end(); + I != E; ++I) { + assert(I->getNumber() >= 0 && "Disconnected block"); + unsigned BN = I->getNumber(); + if (BN > MaxBN) + MaxBN = BN; + } + + // Keep track of visited blocks. + BitVector BlockScanned(MaxBN+1); + + int EntryN = Entry->getNumber(); + // Generate a fake edge to get something to start with. + FlowQ.push(CFGEdge(-1, EntryN)); + + while (!FlowQ.empty()) { + CFGEdge Edge = FlowQ.front(); + FlowQ.pop(); + + if (EdgeExec.count(Edge)) + continue; + EdgeExec.insert(Edge); + + const MachineBasicBlock &B = *MF.getBlockNumbered(Edge.second); + MachineBasicBlock::const_iterator It = B.begin(), End = B.end(); + // Visit PHI nodes first. + while (It != End && It->isPHI()) { + const MachineInstr *PI = &*It++; + InstrExec.insert(PI); + visitPHI(PI); + } + + // If this block has already been visited through a flow graph edge, + // then the instructions have already been processed. Any updates to + // the cells would now only happen through visitUsesOf... + if (BlockScanned[Edge.second]) + continue; + BlockScanned[Edge.second] = true; + + // Visit non-branch instructions. + while (It != End && !It->isBranch()) { + const MachineInstr *MI = &*It++; + InstrExec.insert(MI); + visitNonBranch(MI); + } + // If block end has been reached, add the fall-through edge to the queue. + if (It == End) { + MachineFunction::const_iterator BIt = &B; + MachineFunction::const_iterator Next = std::next(BIt); + if (Next != MF.end()) { + int ThisN = B.getNumber(); + int NextN = Next->getNumber(); + FlowQ.push(CFGEdge(ThisN, NextN)); + } + } else { + // Handle the remaining sequence of branches. This function will update + // the work queue. + visitBranchesFrom(It); + } + } // while (!FlowQ->empty()) + + if (Trace) { + dbgs() << "Cells after propagation:\n"; + for (CellMapType::iterator I = Map.begin(), E = Map.end(); I != E; ++I) + dbgs() << PrintReg(I->first, &ME.TRI) << " -> " << I->second << "\n"; + } +} + diff --git a/lib/Target/Hexagon/BitTracker.h b/lib/Target/Hexagon/BitTracker.h new file mode 100644 index 000000000000..ed002a794d66 --- /dev/null +++ b/lib/Target/Hexagon/BitTracker.h @@ -0,0 +1,449 @@ +//===--- BitTracker.h -----------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef BITTRACKER_H +#define BITTRACKER_H + +#include "llvm/ADT/SetVector.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/CodeGen/MachineFunction.h" + +#include <map> +#include <queue> +#include <set> + +namespace llvm { + class ConstantInt; + class MachineRegisterInfo; + class MachineBasicBlock; + class MachineInstr; + class MachineOperand; + class raw_ostream; + +struct BitTracker { + struct BitRef; + struct RegisterRef; + struct BitValue; + struct BitMask; + struct RegisterCell; + struct MachineEvaluator; + + typedef SetVector<const MachineBasicBlock *> BranchTargetList; + + struct CellMapType : public std::map<unsigned,RegisterCell> { + bool has(unsigned Reg) const; + }; + + BitTracker(const MachineEvaluator &E, MachineFunction &F); + ~BitTracker(); + + void run(); + void trace(bool On = false) { Trace = On; } + bool has(unsigned Reg) const; + const RegisterCell &lookup(unsigned Reg) const; + RegisterCell get(RegisterRef RR) const; + void put(RegisterRef RR, const RegisterCell &RC); + void subst(RegisterRef OldRR, RegisterRef NewRR); + bool reached(const MachineBasicBlock *B) const; + +private: + void visitPHI(const MachineInstr *PI); + void visitNonBranch(const MachineInstr *MI); + void visitBranchesFrom(const MachineInstr *BI); + void visitUsesOf(unsigned Reg); + void reset(); + + typedef std::pair<int,int> CFGEdge; + typedef std::set<CFGEdge> EdgeSetType; + typedef std::set<const MachineInstr *> InstrSetType; + typedef std::queue<CFGEdge> EdgeQueueType; + + EdgeSetType EdgeExec; // Executable flow graph edges. + InstrSetType InstrExec; // Executable instructions. + EdgeQueueType FlowQ; // Work queue of CFG edges. + bool Trace; // Enable tracing for debugging. + + const MachineEvaluator &ME; + MachineFunction &MF; + MachineRegisterInfo &MRI; + CellMapType ⤅ +}; + + +// Abstraction of a reference to bit at position Pos from a register Reg. +struct BitTracker::BitRef { + BitRef(unsigned R = 0, uint16_t P = 0) : Reg(R), Pos(P) {} + BitRef(const BitRef &BR) : Reg(BR.Reg), Pos(BR.Pos) {} + bool operator== (const BitRef &BR) const { + // If Reg is 0, disregard Pos. + return Reg == BR.Reg && (Reg == 0 || Pos == BR.Pos); + } + unsigned Reg; + uint16_t Pos; +}; + + +// Abstraction of a register reference in MachineOperand. It contains the +// register number and the subregister index. +struct BitTracker::RegisterRef { + RegisterRef(unsigned R = 0, unsigned S = 0) + : Reg(R), Sub(S) {} + RegisterRef(const MachineOperand &MO) + : Reg(MO.getReg()), Sub(MO.getSubReg()) {} + unsigned Reg, Sub; +}; + + +// Value that a single bit can take. This is outside of the context of +// any register, it is more of an abstraction of the two-element set of +// possible bit values. One extension here is the "Ref" type, which +// indicates that this bit takes the same value as the bit described by +// RefInfo. +struct BitTracker::BitValue { + enum ValueType { + Top, // Bit not yet defined. + Zero, // Bit = 0. + One, // Bit = 1. + Ref // Bit value same as the one described in RefI. + // Conceptually, there is no explicit "bottom" value: the lattice's + // bottom will be expressed as a "ref to itself", which, in the context + // of registers, could be read as "this value of this bit is defined by + // this bit". + // The ordering is: + // x <= Top, + // Self <= x, where "Self" is "ref to itself". + // This makes the value lattice different for each virtual register + // (even for each bit in the same virtual register), since the "bottom" + // for one register will be a simple "ref" for another register. + // Since we do not store the "Self" bit and register number, the meet + // operation will need to take it as a parameter. + // + // In practice there is a special case for values that are not associa- + // ted with any specific virtual register. An example would be a value + // corresponding to a bit of a physical register, or an intermediate + // value obtained in some computation (such as instruction evaluation). + // Such cases are identical to the usual Ref type, but the register + // number is 0. In such case the Pos field of the reference is ignored. + // + // What is worthy of notice is that in value V (that is a "ref"), as long + // as the RefI.Reg is not 0, it may actually be the same register as the + // one in which V will be contained. If the RefI.Pos refers to the posi- + // tion of V, then V is assumed to be "bottom" (as a "ref to itself"), + // otherwise V is taken to be identical to the referenced bit of the + // same register. + // If RefI.Reg is 0, however, such a reference to the same register is + // not possible. Any value V that is a "ref", and whose RefI.Reg is 0 + // is treated as "bottom". + }; + ValueType Type; + BitRef RefI; + + BitValue(ValueType T = Top) : Type(T) {} + BitValue(bool B) : Type(B ? One : Zero) {} + BitValue(const BitValue &V) : Type(V.Type), RefI(V.RefI) {} + BitValue(unsigned Reg, uint16_t Pos) : Type(Ref), RefI(Reg, Pos) {} + + bool operator== (const BitValue &V) const { + if (Type != V.Type) + return false; + if (Type == Ref && !(RefI == V.RefI)) + return false; + return true; + } + bool operator!= (const BitValue &V) const { + return !operator==(V); + } + bool is(unsigned T) const { + assert(T == 0 || T == 1); + return T == 0 ? Type == Zero + : (T == 1 ? Type == One : false); + } + + // The "meet" operation is the "." operation in a semilattice (L, ., T, B): + // (1) x.x = x + // (2) x.y = y.x + // (3) x.(y.z) = (x.y).z + // (4) x.T = x (i.e. T = "top") + // (5) x.B = B (i.e. B = "bottom") + // + // This "meet" function will update the value of the "*this" object with + // the newly calculated one, and return "true" if the value of *this has + // changed, and "false" otherwise. + // To prove that it satisfies the conditions (1)-(5), it is sufficient + // to show that a relation + // x <= y <=> x.y = x + // defines a partial order (i.e. that "meet" is same as "infimum"). + bool meet(const BitValue &V, const BitRef &Self) { + // First, check the cases where there is nothing to be done. + if (Type == Ref && RefI == Self) // Bottom.meet(V) = Bottom (i.e. This) + return false; + if (V.Type == Top) // This.meet(Top) = This + return false; + if (*this == V) // This.meet(This) = This + return false; + + // At this point, we know that the value of "this" will change. + // If it is Top, it will become the same as V, otherwise it will + // become "bottom" (i.e. Self). + if (Type == Top) { + Type = V.Type; + RefI = V.RefI; // This may be irrelevant, but copy anyway. + return true; + } + // Become "bottom". + Type = Ref; + RefI = Self; + return true; + } + + // Create a reference to the bit value V. + static BitValue ref(const BitValue &V); + // Create a "self". + static BitValue self(const BitRef &Self = BitRef()); + + bool num() const { + return Type == Zero || Type == One; + } + operator bool() const { + assert(Type == Zero || Type == One); + return Type == One; + } + + friend raw_ostream &operator<<(raw_ostream &OS, const BitValue &BV); +}; + + +// This operation must be idempotent, i.e. ref(ref(V)) == ref(V). +inline BitTracker::BitValue +BitTracker::BitValue::ref(const BitValue &V) { + if (V.Type != Ref) + return BitValue(V.Type); + if (V.RefI.Reg != 0) + return BitValue(V.RefI.Reg, V.RefI.Pos); + return self(); +} + + +inline BitTracker::BitValue +BitTracker::BitValue::self(const BitRef &Self) { + return BitValue(Self.Reg, Self.Pos); +} + + +// A sequence of bits starting from index B up to and including index E. +// If E < B, the mask represents two sections: [0..E] and [B..W) where +// W is the width of the register. +struct BitTracker::BitMask { + BitMask() : B(0), E(0) {} + BitMask(uint16_t b, uint16_t e) : B(b), E(e) {} + uint16_t first() const { return B; } + uint16_t last() const { return E; } +private: + uint16_t B, E; +}; + + +// Representation of a register: a list of BitValues. +struct BitTracker::RegisterCell { + RegisterCell(uint16_t Width = DefaultBitN) : Bits(Width) {} + + uint16_t width() const { + return Bits.size(); + } + const BitValue &operator[](uint16_t BitN) const { + assert(BitN < Bits.size()); + return Bits[BitN]; + } + BitValue &operator[](uint16_t BitN) { + assert(BitN < Bits.size()); + return Bits[BitN]; + } + + bool meet(const RegisterCell &RC, unsigned SelfR); + RegisterCell &insert(const RegisterCell &RC, const BitMask &M); + RegisterCell extract(const BitMask &M) const; // Returns a new cell. + RegisterCell &rol(uint16_t Sh); // Rotate left. + RegisterCell &fill(uint16_t B, uint16_t E, const BitValue &V); + RegisterCell &cat(const RegisterCell &RC); // Concatenate. + uint16_t cl(bool B) const; + uint16_t ct(bool B) const; + + bool operator== (const RegisterCell &RC) const; + bool operator!= (const RegisterCell &RC) const { + return !operator==(RC); + } + + const RegisterCell &operator=(const RegisterCell &RC) { + Bits = RC.Bits; + return *this; + } + + // Generate a "ref" cell for the corresponding register. In the resulting + // cell each bit will be described as being the same as the corresponding + // bit in register Reg (i.e. the cell is "defined" by register Reg). + static RegisterCell self(unsigned Reg, uint16_t Width); + // Generate a "top" cell of given size. + static RegisterCell top(uint16_t Width); + // Generate a cell that is a "ref" to another cell. + static RegisterCell ref(const RegisterCell &C); + +private: + // The DefaultBitN is here only to avoid frequent reallocation of the + // memory in the vector. + static const unsigned DefaultBitN = 32; + typedef SmallVector<BitValue, DefaultBitN> BitValueList; + BitValueList Bits; + + friend raw_ostream &operator<<(raw_ostream &OS, const RegisterCell &RC); +}; + + +inline bool BitTracker::has(unsigned Reg) const { + return Map.find(Reg) != Map.end(); +} + + +inline const BitTracker::RegisterCell& +BitTracker::lookup(unsigned Reg) const { + CellMapType::const_iterator F = Map.find(Reg); + assert(F != Map.end()); + return F->second; +} + + +inline BitTracker::RegisterCell +BitTracker::RegisterCell::self(unsigned Reg, uint16_t Width) { + RegisterCell RC(Width); + for (uint16_t i = 0; i < Width; ++i) + RC.Bits[i] = BitValue::self(BitRef(Reg, i)); + return RC; +} + + +inline BitTracker::RegisterCell +BitTracker::RegisterCell::top(uint16_t Width) { + RegisterCell RC(Width); + for (uint16_t i = 0; i < Width; ++i) + RC.Bits[i] = BitValue(BitValue::Top); + return RC; +} + + +inline BitTracker::RegisterCell +BitTracker::RegisterCell::ref(const RegisterCell &C) { + uint16_t W = C.width(); + RegisterCell RC(W); + for (unsigned i = 0; i < W; ++i) + RC[i] = BitValue::ref(C[i]); + return RC; +} + + +inline bool BitTracker::CellMapType::has(unsigned Reg) const { + return find(Reg) != end(); +} + +// A class to evaluate target's instructions and update the cell maps. +// This is used internally by the bit tracker. A target that wants to +// utilize this should implement the evaluation functions (noted below) +// in a subclass of this class. +struct BitTracker::MachineEvaluator { + MachineEvaluator(const TargetRegisterInfo &T, MachineRegisterInfo &M) + : TRI(T), MRI(M) {} + virtual ~MachineEvaluator() {} + + uint16_t getRegBitWidth(const RegisterRef &RR) const; + + RegisterCell getCell(const RegisterRef &RR, const CellMapType &M) const; + void putCell(const RegisterRef &RR, RegisterCell RC, CellMapType &M) const; + // A result of any operation should use refs to the source cells, not + // the cells directly. This function is a convenience wrapper to quickly + // generate a ref for a cell corresponding to a register reference. + RegisterCell getRef(const RegisterRef &RR, const CellMapType &M) const { + RegisterCell RC = getCell(RR, M); + return RegisterCell::ref(RC); + } + + // Helper functions. + // Check if a cell is an immediate value (i.e. all bits are either 0 or 1). + bool isInt(const RegisterCell &A) const; + // Convert cell to an immediate value. + uint64_t toInt(const RegisterCell &A) const; + + // Generate cell from an immediate value. + RegisterCell eIMM(int64_t V, uint16_t W) const; + RegisterCell eIMM(const ConstantInt *CI) const; + + // Arithmetic. + RegisterCell eADD(const RegisterCell &A1, const RegisterCell &A2) const; + RegisterCell eSUB(const RegisterCell &A1, const RegisterCell &A2) const; + RegisterCell eMLS(const RegisterCell &A1, const RegisterCell &A2) const; + RegisterCell eMLU(const RegisterCell &A1, const RegisterCell &A2) const; + + // Shifts. + RegisterCell eASL(const RegisterCell &A1, uint16_t Sh) const; + RegisterCell eLSR(const RegisterCell &A1, uint16_t Sh) const; + RegisterCell eASR(const RegisterCell &A1, uint16_t Sh) const; + + // Logical. + RegisterCell eAND(const RegisterCell &A1, const RegisterCell &A2) const; + RegisterCell eORL(const RegisterCell &A1, const RegisterCell &A2) const; + RegisterCell eXOR(const RegisterCell &A1, const RegisterCell &A2) const; + RegisterCell eNOT(const RegisterCell &A1) const; + + // Set bit, clear bit. + RegisterCell eSET(const RegisterCell &A1, uint16_t BitN) const; + RegisterCell eCLR(const RegisterCell &A1, uint16_t BitN) const; + + // Count leading/trailing bits (zeros/ones). + RegisterCell eCLB(const RegisterCell &A1, bool B, uint16_t W) const; + RegisterCell eCTB(const RegisterCell &A1, bool B, uint16_t W) const; + + // Sign/zero extension. + RegisterCell eSXT(const RegisterCell &A1, uint16_t FromN) const; + RegisterCell eZXT(const RegisterCell &A1, uint16_t FromN) const; + + // Extract/insert + // XTR R,b,e: extract bits from A1 starting at bit b, ending at e-1. + // INS R,S,b: take R and replace bits starting from b with S. + RegisterCell eXTR(const RegisterCell &A1, uint16_t B, uint16_t E) const; + RegisterCell eINS(const RegisterCell &A1, const RegisterCell &A2, + uint16_t AtN) const; + + // User-provided functions for individual targets: + + // Return a sub-register mask that indicates which bits in Reg belong + // to the subregister Sub. These bits are assumed to be contiguous in + // the super-register, and have the same ordering in the sub-register + // as in the super-register. It is valid to call this function with + // Sub == 0, in this case, the function should return a mask that spans + // the entire register Reg (which is what the default implementation + // does). + virtual BitMask mask(unsigned Reg, unsigned Sub) const; + // Indicate whether a given register class should be tracked. + virtual bool track(const TargetRegisterClass *RC) const { return true; } + // Evaluate a non-branching machine instruction, given the cell map with + // the input values. Place the results in the Outputs map. Return "true" + // if evaluation succeeded, "false" otherwise. + virtual bool evaluate(const MachineInstr *MI, const CellMapType &Inputs, + CellMapType &Outputs) const; + // Evaluate a branch, given the cell map with the input values. Fill out + // a list of all possible branch targets and indicate (through a flag) + // whether the branch could fall-through. Return "true" if this information + // has been successfully computed, "false" otherwise. + virtual bool evaluate(const MachineInstr *BI, const CellMapType &Inputs, + BranchTargetList &Targets, bool &FallsThru) const = 0; + + const TargetRegisterInfo &TRI; + MachineRegisterInfo &MRI; +}; + +} // end namespace llvm + +#endif diff --git a/lib/Target/Hexagon/CMakeLists.txt b/lib/Target/Hexagon/CMakeLists.txt index 758ccc741007..7ab2f0ba01df 100644 --- a/lib/Target/Hexagon/CMakeLists.txt +++ b/lib/Target/Hexagon/CMakeLists.txt @@ -12,13 +12,19 @@ tablegen(LLVM HexagonGenSubtargetInfo.inc -gen-subtarget) add_public_tablegen_target(HexagonCommonTableGen) add_llvm_target(HexagonCodeGen + BitTracker.cpp HexagonAsmPrinter.cpp + HexagonBitTracker.cpp HexagonCFGOptimizer.cpp + HexagonCommonGEP.cpp HexagonCopyToCombine.cpp HexagonExpandCondsets.cpp HexagonExpandPredSpillCode.cpp HexagonFixupHwLoops.cpp HexagonFrameLowering.cpp + HexagonGenExtract.cpp + HexagonGenInsert.cpp + HexagonGenPredicate.cpp HexagonHardwareLoops.cpp HexagonInstrInfo.cpp HexagonISelDAGToDAG.cpp diff --git a/lib/Target/Hexagon/HexagonBitTracker.cpp b/lib/Target/Hexagon/HexagonBitTracker.cpp new file mode 100644 index 000000000000..021e58a1d08a --- /dev/null +++ b/lib/Target/Hexagon/HexagonBitTracker.cpp @@ -0,0 +1,1174 @@ +//===--- HexagonBitTracker.cpp --------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/IR/Module.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" + +#include "Hexagon.h" +#include "HexagonInstrInfo.h" +#include "HexagonRegisterInfo.h" +#include "HexagonTargetMachine.h" +#include "HexagonBitTracker.h" + +using namespace llvm; + +typedef BitTracker BT; + +HexagonEvaluator::HexagonEvaluator(const HexagonRegisterInfo &tri, + MachineRegisterInfo &mri, + const HexagonInstrInfo &tii, + MachineFunction &mf) + : MachineEvaluator(tri, mri), MF(mf), MFI(*mf.getFrameInfo()), TII(tii) { + // Populate the VRX map (VR to extension-type). + // Go over all the formal parameters of the function. If a given parameter + // P is sign- or zero-extended, locate the virtual register holding that + // parameter and create an entry in the VRX map indicating the type of ex- + // tension (and the source type). + // This is a bit complicated to do accurately, since the memory layout in- + // formation is necessary to precisely determine whether an aggregate para- + // meter will be passed in a register or in memory. What is given in MRI + // is the association between the physical register that is live-in (i.e. + // holds an argument), and the virtual register that this value will be + // copied into. This, by itself, is not sufficient to map back the virtual + // register to a formal parameter from Function (since consecutive live-ins + // from MRI may not correspond to consecutive formal parameters from Func- + // tion). To avoid the complications with in-memory arguments, only consi- + // der the initial sequence of formal parameters that are known to be + // passed via registers. + unsigned AttrIdx = 0; + unsigned InVirtReg, InPhysReg = 0; + const Function &F = *MF.getFunction(); + typedef Function::const_arg_iterator arg_iterator; + for (arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) { + AttrIdx++; + const Argument &Arg = *I; + Type *ATy = Arg.getType(); + unsigned Width = 0; + if (ATy->isIntegerTy()) + Width = ATy->getIntegerBitWidth(); + else if (ATy->isPointerTy()) + Width = 32; + // If pointer size is not set through target data, it will default to + // Module::AnyPointerSize. + if (Width == 0 || Width > 64) + break; + InPhysReg = getNextPhysReg(InPhysReg, Width); + if (!InPhysReg) + break; + InVirtReg = getVirtRegFor(InPhysReg); + if (!InVirtReg) + continue; + AttributeSet Attrs = F.getAttributes(); + if (Attrs.hasAttribute(AttrIdx, Attribute::SExt)) + VRX.insert(std::make_pair(InVirtReg, ExtType(ExtType::SExt, Width))); + else if (Attrs.hasAttribute(AttrIdx, Attribute::ZExt)) + VRX.insert(std::make_pair(InVirtReg, ExtType(ExtType::ZExt, Width))); + } +} + + +BT::BitMask HexagonEvaluator::mask(unsigned Reg, unsigned Sub) const { + if (Sub == 0) + return MachineEvaluator::mask(Reg, 0); + using namespace Hexagon; + const TargetRegisterClass *RC = MRI.getRegClass(Reg); + unsigned ID = RC->getID(); + uint16_t RW = getRegBitWidth(RegisterRef(Reg, Sub)); + switch (ID) { + case DoubleRegsRegClassID: + return (Sub == subreg_loreg) ? BT::BitMask(0, RW-1) + : BT::BitMask(RW, 2*RW-1); + default: + break; + } +#ifndef NDEBUG + dbgs() << PrintReg(Reg, &TRI, Sub) << '\n'; +#endif + llvm_unreachable("Unexpected register/subregister"); +} + + +namespace { + struct RegisterRefs : public std::vector<BT::RegisterRef> { + typedef std::vector<BT::RegisterRef> Base; + RegisterRefs(const MachineInstr *MI); + const BT::RegisterRef &operator[](unsigned n) const { + // The main purpose of this operator is to assert with bad argument. + assert(n < size()); + return Base::operator[](n); + } + }; + + RegisterRefs::RegisterRefs(const MachineInstr *MI) + : Base(MI->getNumOperands()) { + for (unsigned i = 0, n = size(); i < n; ++i) { + const MachineOperand &MO = MI->getOperand(i); + if (MO.isReg()) + at(i) = BT::RegisterRef(MO); + // For indices that don't correspond to registers, the entry will + // remain constructed via the default constructor. + } + } +} + + +bool HexagonEvaluator::evaluate(const MachineInstr *MI, + const CellMapType &Inputs, CellMapType &Outputs) const { + unsigned NumDefs = 0; + + // Sanity verification: there should not be any defs with subregisters. + for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) { + const MachineOperand &MO = MI->getOperand(i); + if (!MO.isReg() || !MO.isDef()) + continue; + NumDefs++; + assert(MO.getSubReg() == 0); + } + + if (NumDefs == 0) + return false; + + if (MI->mayLoad()) + return evaluateLoad(MI, Inputs, Outputs); + + // Check COPY instructions that copy formal parameters into virtual + // registers. Such parameters can be sign- or zero-extended at the + // call site, and we should take advantage of this knowledge. The MRI + // keeps a list of pairs of live-in physical and virtual registers, + // which provides information about which virtual registers will hold + // the argument values. The function will still contain instructions + // defining those virtual registers, and in practice those are COPY + // instructions from a physical to a virtual register. In such cases, + // applying the argument extension to the virtual register can be seen + // as simply mirroring the extension that had already been applied to + // the physical register at the call site. If the defining instruction + // was not a COPY, it would not be clear how to mirror that extension + // on the callee's side. For that reason, only check COPY instructions + // for potential extensions. + if (MI->isCopy()) { + if (evaluateFormalCopy(MI, Inputs, Outputs)) + return true; + } + + // Beyond this point, if any operand is a global, skip that instruction. + // The reason is that certain instructions that can take an immediate + // operand can also have a global symbol in that operand. To avoid + // checking what kind of operand a given instruction has individually + // for each instruction, do it here. Global symbols as operands gene- + // rally do not provide any useful information. + for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) { + const MachineOperand &MO = MI->getOperand(i); + if (MO.isGlobal() || MO.isBlockAddress() || MO.isSymbol() || MO.isJTI() || + MO.isCPI()) + return false; + } + + RegisterRefs Reg(MI); + unsigned Opc = MI->getOpcode(); + using namespace Hexagon; + #define op(i) MI->getOperand(i) + #define rc(i) RegisterCell::ref(getCell(Reg[i],Inputs)) + #define im(i) MI->getOperand(i).getImm() + + // If the instruction has no register operands, skip it. + if (Reg.size() == 0) + return false; + + // Record result for register in operand 0. + auto rr0 = [this,Reg] (const BT::RegisterCell &Val, CellMapType &Outputs) + -> bool { + putCell(Reg[0], Val, Outputs); + return true; + }; + // Get the cell corresponding to the N-th operand. + auto cop = [this,Reg,MI,Inputs] (unsigned N, uint16_t W) + -> BT::RegisterCell { + const MachineOperand &Op = MI->getOperand(N); + if (Op.isImm()) + return eIMM(Op.getImm(), W); + if (!Op.isReg()) + return RegisterCell::self(0, W); + assert(getRegBitWidth(Reg[N]) == W && "Register width mismatch"); + return rc(N); + }; + // Extract RW low bits of the cell. + auto lo = [this] (const BT::RegisterCell &RC, uint16_t RW) + -> BT::RegisterCell { + assert(RW <= RC.width()); + return eXTR(RC, 0, RW); + }; + // Extract RW high bits of the cell. + auto hi = [this] (const BT::RegisterCell &RC, uint16_t RW) + -> BT::RegisterCell { + uint16_t W = RC.width(); + assert(RW <= W); + return eXTR(RC, W-RW, W); + }; + // Extract N-th halfword (counting from the least significant position). + auto half = [this] (const BT::RegisterCell &RC, unsigned N) + -> BT::RegisterCell { + assert(N*16+16 <= RC.width()); + return eXTR(RC, N*16, N*16+16); + }; + // Shuffle bits (pick even/odd from cells and merge into result). + auto shuffle = [this] (const BT::RegisterCell &Rs, const BT::RegisterCell &Rt, + uint16_t BW, bool Odd) -> BT::RegisterCell { + uint16_t I = Odd, Ws = Rs.width(); + assert(Ws == Rt.width()); + RegisterCell RC = eXTR(Rt, I*BW, I*BW+BW).cat(eXTR(Rs, I*BW, I*BW+BW)); + I += 2; + while (I*BW < Ws) { + RC.cat(eXTR(Rt, I*BW, I*BW+BW)).cat(eXTR(Rs, I*BW, I*BW+BW)); + I += 2; + } + return RC; + }; + + // The bitwidth of the 0th operand. In most (if not all) of the + // instructions below, the 0th operand is the defined register. + // Pre-compute the bitwidth here, because it is needed in many cases + // cases below. + uint16_t W0 = (Reg[0].Reg != 0) ? getRegBitWidth(Reg[0]) : 0; + + switch (Opc) { + // Transfer immediate: + + case A2_tfrsi: + case A2_tfrpi: + case CONST32: + case CONST32_Float_Real: + case CONST32_Int_Real: + case CONST64_Float_Real: + case CONST64_Int_Real: + return rr0(eIMM(im(1), W0), Outputs); + case TFR_PdFalse: + return rr0(RegisterCell(W0).fill(0, W0, BT::BitValue::Zero), Outputs); + case TFR_PdTrue: + return rr0(RegisterCell(W0).fill(0, W0, BT::BitValue::One), Outputs); + case TFR_FI: { + int FI = op(1).getIndex(); + int Off = op(2).getImm(); + unsigned A = MFI.getObjectAlignment(FI) + std::abs(Off); + unsigned L = Log2_32(A); + RegisterCell RC = RegisterCell::self(Reg[0].Reg, W0); + RC.fill(0, L, BT::BitValue::Zero); + return rr0(RC, Outputs); + } + + // Transfer register: + + case A2_tfr: + case A2_tfrp: + case C2_pxfer_map: + return rr0(rc(1), Outputs); + case C2_tfrpr: { + uint16_t RW = W0; + uint16_t PW = 8; // XXX Pred size: getRegBitWidth(Reg[1]); + assert(PW <= RW); + RegisterCell PC = eXTR(rc(1), 0, PW); + RegisterCell RC = RegisterCell(RW).insert(PC, BT::BitMask(0, PW-1)); + RC.fill(PW, RW, BT::BitValue::Zero); + return rr0(RC, Outputs); + } + case C2_tfrrp: { + RegisterCell RC = RegisterCell::self(Reg[0].Reg, W0); + W0 = 8; // XXX Pred size + return rr0(eINS(RC, eXTR(rc(1), 0, W0), 0), Outputs); + } + + // Arithmetic: + + case A2_abs: + case A2_absp: + // TODO + break; + + case A2_addsp: { + uint16_t W1 = getRegBitWidth(Reg[1]); + assert(W0 == 64 && W1 == 32); + RegisterCell CW = RegisterCell(W0).insert(rc(1), BT::BitMask(0, W1-1)); + RegisterCell RC = eADD(eSXT(CW, W1), rc(2)); + return rr0(RC, Outputs); + } + case A2_add: + case A2_addp: + return rr0(eADD(rc(1), rc(2)), Outputs); + case A2_addi: + return rr0(eADD(rc(1), eIMM(im(2), W0)), Outputs); + case S4_addi_asl_ri: { + RegisterCell RC = eADD(eIMM(im(1), W0), eASL(rc(2), im(3))); + return rr0(RC, Outputs); + } + case S4_addi_lsr_ri: { + RegisterCell RC = eADD(eIMM(im(1), W0), eLSR(rc(2), im(3))); + return rr0(RC, Outputs); + } + case S4_addaddi: { + RegisterCell RC = eADD(rc(1), eADD(rc(2), eIMM(im(3), W0))); + return rr0(RC, Outputs); + } + case M4_mpyri_addi: { + RegisterCell M = eMLS(rc(2), eIMM(im(3), W0)); + RegisterCell RC = eADD(eIMM(im(1), W0), lo(M, W0)); + return rr0(RC, Outputs); + } + case M4_mpyrr_addi: { + RegisterCell M = eMLS(rc(2), rc(3)); + RegisterCell RC = eADD(eIMM(im(1), W0), lo(M, W0)); + return rr0(RC, Outputs); + } + case M4_mpyri_addr_u2: { + RegisterCell M = eMLS(eIMM(im(2), W0), rc(3)); + RegisterCell RC = eADD(rc(1), lo(M, W0)); + return rr0(RC, Outputs); + } + case M4_mpyri_addr: { + RegisterCell M = eMLS(rc(2), eIMM(im(3), W0)); + RegisterCell RC = eADD(rc(1), lo(M, W0)); + return rr0(RC, Outputs); + } + case M4_mpyrr_addr: { + RegisterCell M = eMLS(rc(2), rc(3)); + RegisterCell RC = eADD(rc(1), lo(M, W0)); + return rr0(RC, Outputs); + } + case S4_subaddi: { + RegisterCell RC = eADD(rc(1), eSUB(eIMM(im(2), W0), rc(3))); + return rr0(RC, Outputs); + } + case M2_accii: { + RegisterCell RC = eADD(rc(1), eADD(rc(2), eIMM(im(3), W0))); + return rr0(RC, Outputs); + } + case M2_acci: { + RegisterCell RC = eADD(rc(1), eADD(rc(2), rc(3))); + return rr0(RC, Outputs); + } + case M2_subacc: { + RegisterCell RC = eADD(rc(1), eSUB(rc(2), rc(3))); + return rr0(RC, Outputs); + } + case S2_addasl_rrri: { + RegisterCell RC = eADD(rc(1), eASL(rc(2), im(3))); + return rr0(RC, Outputs); + } + case C4_addipc: { + RegisterCell RPC = RegisterCell::self(Reg[0].Reg, W0); + RPC.fill(0, 2, BT::BitValue::Zero); + return rr0(eADD(RPC, eIMM(im(2), W0)), Outputs); + } + case A2_sub: + case A2_subp: + return rr0(eSUB(rc(1), rc(2)), Outputs); + case A2_subri: + return rr0(eSUB(eIMM(im(1), W0), rc(2)), Outputs); + case S4_subi_asl_ri: { + RegisterCell RC = eSUB(eIMM(im(1), W0), eASL(rc(2), im(3))); + return rr0(RC, Outputs); + } + case S4_subi_lsr_ri: { + RegisterCell RC = eSUB(eIMM(im(1), W0), eLSR(rc(2), im(3))); + return rr0(RC, Outputs); + } + case M2_naccii: { + RegisterCell RC = eSUB(rc(1), eADD(rc(2), eIMM(im(3), W0))); + return rr0(RC, Outputs); + } + case M2_nacci: { + RegisterCell RC = eSUB(rc(1), eADD(rc(2), rc(3))); + return rr0(RC, Outputs); + } + // 32-bit negation is done by "Rd = A2_subri 0, Rs" + case A2_negp: + return rr0(eSUB(eIMM(0, W0), rc(1)), Outputs); + + case M2_mpy_up: { + RegisterCell M = eMLS(rc(1), rc(2)); + return rr0(hi(M, W0), Outputs); + } + case M2_dpmpyss_s0: + return rr0(eMLS(rc(1), rc(2)), Outputs); + case M2_dpmpyss_acc_s0: + return rr0(eADD(rc(1), eMLS(rc(2), rc(3))), Outputs); + case M2_dpmpyss_nac_s0: + return rr0(eSUB(rc(1), eMLS(rc(2), rc(3))), Outputs); + case M2_mpyi: { + RegisterCell M = eMLS(rc(1), rc(2)); + return rr0(lo(M, W0), Outputs); + } + case M2_macsip: { + RegisterCell M = eMLS(rc(2), eIMM(im(3), W0)); + RegisterCell RC = eADD(rc(1), lo(M, W0)); + return rr0(RC, Outputs); + } + case M2_macsin: { + RegisterCell M = eMLS(rc(2), eIMM(im(3), W0)); + RegisterCell RC = eSUB(rc(1), lo(M, W0)); + return rr0(RC, Outputs); + } + case M2_maci: { + RegisterCell M = eMLS(rc(2), rc(3)); + RegisterCell RC = eADD(rc(1), lo(M, W0)); + return rr0(RC, Outputs); + } + case M2_mpysmi: { + RegisterCell M = eMLS(rc(1), eIMM(im(2), W0)); + return rr0(lo(M, 32), Outputs); + } + case M2_mpysin: { + RegisterCell M = eMLS(rc(1), eIMM(-im(2), W0)); + return rr0(lo(M, 32), Outputs); + } + case M2_mpysip: { + RegisterCell M = eMLS(rc(1), eIMM(im(2), W0)); + return rr0(lo(M, 32), Outputs); + } + case M2_mpyu_up: { + RegisterCell M = eMLU(rc(1), rc(2)); + return rr0(hi(M, W0), Outputs); + } + case M2_dpmpyuu_s0: + return rr0(eMLU(rc(1), rc(2)), Outputs); + case M2_dpmpyuu_acc_s0: + return rr0(eADD(rc(1), eMLU(rc(2), rc(3))), Outputs); + case M2_dpmpyuu_nac_s0: + return rr0(eSUB(rc(1), eMLU(rc(2), rc(3))), Outputs); + //case M2_mpysu_up: + + // Logical/bitwise: + + case A2_andir: + return rr0(eAND(rc(1), eIMM(im(2), W0)), Outputs); + case A2_and: + case A2_andp: + return rr0(eAND(rc(1), rc(2)), Outputs); + case A4_andn: + case A4_andnp: + return rr0(eAND(rc(1), eNOT(rc(2))), Outputs); + case S4_andi_asl_ri: { + RegisterCell RC = eAND(eIMM(im(1), W0), eASL(rc(2), im(3))); + return rr0(RC, Outputs); + } + case S4_andi_lsr_ri: { + RegisterCell RC = eAND(eIMM(im(1), W0), eLSR(rc(2), im(3))); + return rr0(RC, Outputs); + } + case M4_and_and: + return rr0(eAND(rc(1), eAND(rc(2), rc(3))), Outputs); + case M4_and_andn: + return rr0(eAND(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs); + case M4_and_or: + return rr0(eAND(rc(1), eORL(rc(2), rc(3))), Outputs); + case M4_and_xor: + return rr0(eAND(rc(1), eXOR(rc(2), rc(3))), Outputs); + case A2_orir: + return rr0(eORL(rc(1), eIMM(im(2), W0)), Outputs); + case A2_or: + case A2_orp: + return rr0(eORL(rc(1), rc(2)), Outputs); + case A4_orn: + case A4_ornp: + return rr0(eORL(rc(1), eNOT(rc(2))), Outputs); + case S4_ori_asl_ri: { + RegisterCell RC = eORL(eIMM(im(1), W0), eASL(rc(2), im(3))); + return rr0(RC, Outputs); + } + case S4_ori_lsr_ri: { + RegisterCell RC = eORL(eIMM(im(1), W0), eLSR(rc(2), im(3))); + return rr0(RC, Outputs); + } + case M4_or_and: + return rr0(eORL(rc(1), eAND(rc(2), rc(3))), Outputs); + case M4_or_andn: + return rr0(eORL(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs); + case S4_or_andi: + case S4_or_andix: { + RegisterCell RC = eORL(rc(1), eAND(rc(2), eIMM(im(3), W0))); + return rr0(RC, Outputs); + } + case S4_or_ori: { + RegisterCell RC = eORL(rc(1), eORL(rc(2), eIMM(im(3), W0))); + return rr0(RC, Outputs); + } + case M4_or_or: + return rr0(eORL(rc(1), eORL(rc(2), rc(3))), Outputs); + case M4_or_xor: + return rr0(eORL(rc(1), eXOR(rc(2), rc(3))), Outputs); + case A2_xor: + case A2_xorp: + return rr0(eXOR(rc(1), rc(2)), Outputs); + case M4_xor_and: + return rr0(eXOR(rc(1), eAND(rc(2), rc(3))), Outputs); + case M4_xor_andn: + return rr0(eXOR(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs); + case M4_xor_or: + return rr0(eXOR(rc(1), eORL(rc(2), rc(3))), Outputs); + case M4_xor_xacc: + return rr0(eXOR(rc(1), eXOR(rc(2), rc(3))), Outputs); + case A2_not: + case A2_notp: + return rr0(eNOT(rc(1)), Outputs); + + case S2_asl_i_r: + case S2_asl_i_p: + return rr0(eASL(rc(1), im(2)), Outputs); + case A2_aslh: + return rr0(eASL(rc(1), 16), Outputs); + case S2_asl_i_r_acc: + case S2_asl_i_p_acc: + return rr0(eADD(rc(1), eASL(rc(2), im(3))), Outputs); + case S2_asl_i_r_nac: + case S2_asl_i_p_nac: + return rr0(eSUB(rc(1), eASL(rc(2), im(3))), Outputs); + case S2_asl_i_r_and: + case S2_asl_i_p_and: + return rr0(eAND(rc(1), eASL(rc(2), im(3))), Outputs); + case S2_asl_i_r_or: + case S2_asl_i_p_or: + return rr0(eORL(rc(1), eASL(rc(2), im(3))), Outputs); + case S2_asl_i_r_xacc: + case S2_asl_i_p_xacc: + return rr0(eXOR(rc(1), eASL(rc(2), im(3))), Outputs); + case S2_asl_i_vh: + case S2_asl_i_vw: + // TODO + break; + + case S2_asr_i_r: + case S2_asr_i_p: + return rr0(eASR(rc(1), im(2)), Outputs); + case A2_asrh: + return rr0(eASR(rc(1), 16), Outputs); + case S2_asr_i_r_acc: + case S2_asr_i_p_acc: + return rr0(eADD(rc(1), eASR(rc(2), im(3))), Outputs); + case S2_asr_i_r_nac: + case S2_asr_i_p_nac: + return rr0(eSUB(rc(1), eASR(rc(2), im(3))), Outputs); + case S2_asr_i_r_and: + case S2_asr_i_p_and: + return rr0(eAND(rc(1), eASR(rc(2), im(3))), Outputs); + case S2_asr_i_r_or: + case S2_asr_i_p_or: + return rr0(eORL(rc(1), eASR(rc(2), im(3))), Outputs); + case S2_asr_i_r_rnd: { + // The input is first sign-extended to 64 bits, then the output + // is truncated back to 32 bits. + assert(W0 == 32); + RegisterCell XC = eSXT(rc(1).cat(eIMM(0, W0)), W0); + RegisterCell RC = eASR(eADD(eASR(XC, im(2)), eIMM(1, 2*W0)), 1); + return rr0(eXTR(RC, 0, W0), Outputs); + } + case S2_asr_i_r_rnd_goodsyntax: { + int64_t S = im(2); + if (S == 0) + return rr0(rc(1), Outputs); + // Result: S2_asr_i_r_rnd Rs, u5-1 + RegisterCell XC = eSXT(rc(1).cat(eIMM(0, W0)), W0); + RegisterCell RC = eLSR(eADD(eASR(XC, S-1), eIMM(1, 2*W0)), 1); + return rr0(eXTR(RC, 0, W0), Outputs); + } + case S2_asr_r_vh: + case S2_asr_i_vw: + case S2_asr_i_svw_trun: + // TODO + break; + + case S2_lsr_i_r: + case S2_lsr_i_p: + return rr0(eLSR(rc(1), im(2)), Outputs); + case S2_lsr_i_r_acc: + case S2_lsr_i_p_acc: + return rr0(eADD(rc(1), eLSR(rc(2), im(3))), Outputs); + case S2_lsr_i_r_nac: + case S2_lsr_i_p_nac: + return rr0(eSUB(rc(1), eLSR(rc(2), im(3))), Outputs); + case S2_lsr_i_r_and: + case S2_lsr_i_p_and: + return rr0(eAND(rc(1), eLSR(rc(2), im(3))), Outputs); + case S2_lsr_i_r_or: + case S2_lsr_i_p_or: + return rr0(eORL(rc(1), eLSR(rc(2), im(3))), Outputs); + case S2_lsr_i_r_xacc: + case S2_lsr_i_p_xacc: + return rr0(eXOR(rc(1), eLSR(rc(2), im(3))), Outputs); + + case S2_clrbit_i: { + RegisterCell RC = rc(1); + RC[im(2)] = BT::BitValue::Zero; + return rr0(RC, Outputs); + } + case S2_setbit_i: { + RegisterCell RC = rc(1); + RC[im(2)] = BT::BitValue::One; + return rr0(RC, Outputs); + } + case S2_togglebit_i: { + RegisterCell RC = rc(1); + uint16_t BX = im(2); + RC[BX] = RC[BX].is(0) ? BT::BitValue::One + : RC[BX].is(1) ? BT::BitValue::Zero + : BT::BitValue::self(); + return rr0(RC, Outputs); + } + + case A4_bitspliti: { + uint16_t W1 = getRegBitWidth(Reg[1]); + uint16_t BX = im(2); + // Res.uw[1] = Rs[bx+1:], Res.uw[0] = Rs[0:bx] + const BT::BitValue Zero = BT::BitValue::Zero; + RegisterCell RZ = RegisterCell(W0).fill(BX, W1, Zero) + .fill(W1+(W1-BX), W0, Zero); + RegisterCell BF1 = eXTR(rc(1), 0, BX), BF2 = eXTR(rc(1), BX, W1); + RegisterCell RC = eINS(eINS(RZ, BF1, 0), BF2, W1); + return rr0(RC, Outputs); + } + case S4_extract: + case S4_extractp: + case S2_extractu: + case S2_extractup: { + uint16_t Wd = im(2), Of = im(3); + assert(Wd <= W0); + if (Wd == 0) + return rr0(eIMM(0, W0), Outputs); + // If the width extends beyond the register size, pad the register + // with 0 bits. + RegisterCell Pad = (Wd+Of > W0) ? rc(1).cat(eIMM(0, Wd+Of-W0)) : rc(1); + RegisterCell Ext = eXTR(Pad, Of, Wd+Of); + // Ext is short, need to extend it with 0s or sign bit. + RegisterCell RC = RegisterCell(W0).insert(Ext, BT::BitMask(0, Wd-1)); + if (Opc == S2_extractu || Opc == S2_extractup) + return rr0(eZXT(RC, Wd), Outputs); + return rr0(eSXT(RC, Wd), Outputs); + } + case S2_insert: + case S2_insertp: { + uint16_t Wd = im(3), Of = im(4); + assert(Wd < W0 && Of < W0); + // If Wd+Of exceeds W0, the inserted bits are truncated. + if (Wd+Of > W0) + Wd = W0-Of; + if (Wd == 0) + return rr0(rc(1), Outputs); + return rr0(eINS(rc(1), eXTR(rc(2), 0, Wd), Of), Outputs); + } + + // Bit permutations: + + case A2_combineii: + case A4_combineii: + case A4_combineir: + case A4_combineri: + case A2_combinew: + assert(W0 % 2 == 0); + return rr0(cop(2, W0/2).cat(cop(1, W0/2)), Outputs); + case A2_combine_ll: + case A2_combine_lh: + case A2_combine_hl: + case A2_combine_hh: { + assert(W0 == 32); + assert(getRegBitWidth(Reg[1]) == 32 && getRegBitWidth(Reg[2]) == 32); + // Low half in the output is 0 for _ll and _hl, 1 otherwise: + unsigned LoH = !(Opc == A2_combine_ll || Opc == A2_combine_hl); + // High half in the output is 0 for _ll and _lh, 1 otherwise: + unsigned HiH = !(Opc == A2_combine_ll || Opc == A2_combine_lh); + RegisterCell R1 = rc(1); + RegisterCell R2 = rc(2); + RegisterCell RC = half(R2, LoH).cat(half(R1, HiH)); + return rr0(RC, Outputs); + } + case S2_packhl: { + assert(W0 == 64); + assert(getRegBitWidth(Reg[1]) == 32 && getRegBitWidth(Reg[2]) == 32); + RegisterCell R1 = rc(1); + RegisterCell R2 = rc(2); + RegisterCell RC = half(R2, 0).cat(half(R1, 0)).cat(half(R2, 1)) + .cat(half(R1, 1)); + return rr0(RC, Outputs); + } + case S2_shuffeb: { + RegisterCell RC = shuffle(rc(1), rc(2), 8, false); + return rr0(RC, Outputs); + } + case S2_shuffeh: { + RegisterCell RC = shuffle(rc(1), rc(2), 16, false); + return rr0(RC, Outputs); + } + case S2_shuffob: { + RegisterCell RC = shuffle(rc(1), rc(2), 8, true); + return rr0(RC, Outputs); + } + case S2_shuffoh: { + RegisterCell RC = shuffle(rc(1), rc(2), 16, true); + return rr0(RC, Outputs); + } + case C2_mask: { + uint16_t WR = W0; + uint16_t WP = 8; // XXX Pred size: getRegBitWidth(Reg[1]); + assert(WR == 64 && WP == 8); + RegisterCell R1 = rc(1); + RegisterCell RC(WR); + for (uint16_t i = 0; i < WP; ++i) { + const BT::BitValue &V = R1[i]; + BT::BitValue F = (V.is(0) || V.is(1)) ? V : BT::BitValue::self(); + RC.fill(i*8, i*8+8, F); + } + return rr0(RC, Outputs); + } + + // Mux: + + case C2_muxii: + case C2_muxir: + case C2_muxri: + case C2_mux: { + BT::BitValue PC0 = rc(1)[0]; + RegisterCell R2 = cop(2, W0); + RegisterCell R3 = cop(3, W0); + if (PC0.is(0) || PC0.is(1)) + return rr0(RegisterCell::ref(PC0 ? R2 : R3), Outputs); + R2.meet(R3, Reg[0].Reg); + return rr0(R2, Outputs); + } + case C2_vmux: + // TODO + break; + + // Sign- and zero-extension: + + case A2_sxtb: + return rr0(eSXT(rc(1), 8), Outputs); + case A2_sxth: + return rr0(eSXT(rc(1), 16), Outputs); + case A2_sxtw: { + uint16_t W1 = getRegBitWidth(Reg[1]); + assert(W0 == 64 && W1 == 32); + RegisterCell RC = eSXT(rc(1).cat(eIMM(0, W1)), W1); + return rr0(RC, Outputs); + } + case A2_zxtb: + return rr0(eZXT(rc(1), 8), Outputs); + case A2_zxth: + return rr0(eZXT(rc(1), 16), Outputs); + + // Bit count: + + case S2_cl0: + case S2_cl0p: + // Always produce a 32-bit result. + return rr0(eCLB(rc(1), 0/*bit*/, 32), Outputs); + case S2_cl1: + case S2_cl1p: + return rr0(eCLB(rc(1), 1/*bit*/, 32), Outputs); + case S2_clb: + case S2_clbp: { + uint16_t W1 = getRegBitWidth(Reg[1]); + RegisterCell R1 = rc(1); + BT::BitValue TV = R1[W1-1]; + if (TV.is(0) || TV.is(1)) + return rr0(eCLB(R1, TV, 32), Outputs); + break; + } + case S2_ct0: + case S2_ct0p: + return rr0(eCTB(rc(1), 0/*bit*/, 32), Outputs); + case S2_ct1: + case S2_ct1p: + return rr0(eCTB(rc(1), 1/*bit*/, 32), Outputs); + case S5_popcountp: + // TODO + break; + + case C2_all8: { + RegisterCell P1 = rc(1); + bool Has0 = false, All1 = true; + for (uint16_t i = 0; i < 8/*XXX*/; ++i) { + if (!P1[i].is(1)) + All1 = false; + if (!P1[i].is(0)) + continue; + Has0 = true; + break; + } + if (!Has0 && !All1) + break; + RegisterCell RC(W0); + RC.fill(0, W0, (All1 ? BT::BitValue::One : BT::BitValue::Zero)); + return rr0(RC, Outputs); + } + case C2_any8: { + RegisterCell P1 = rc(1); + bool Has1 = false, All0 = true; + for (uint16_t i = 0; i < 8/*XXX*/; ++i) { + if (!P1[i].is(0)) + All0 = false; + if (!P1[i].is(1)) + continue; + Has1 = true; + break; + } + if (!Has1 && !All0) + break; + RegisterCell RC(W0); + RC.fill(0, W0, (Has1 ? BT::BitValue::One : BT::BitValue::Zero)); + return rr0(RC, Outputs); + } + case C2_and: + return rr0(eAND(rc(1), rc(2)), Outputs); + case C2_andn: + return rr0(eAND(rc(1), eNOT(rc(2))), Outputs); + case C2_not: + return rr0(eNOT(rc(1)), Outputs); + case C2_or: + return rr0(eORL(rc(1), rc(2)), Outputs); + case C2_orn: + return rr0(eORL(rc(1), eNOT(rc(2))), Outputs); + case C2_xor: + return rr0(eXOR(rc(1), rc(2)), Outputs); + case C4_and_and: + return rr0(eAND(rc(1), eAND(rc(2), rc(3))), Outputs); + case C4_and_andn: + return rr0(eAND(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs); + case C4_and_or: + return rr0(eAND(rc(1), eORL(rc(2), rc(3))), Outputs); + case C4_and_orn: + return rr0(eAND(rc(1), eORL(rc(2), eNOT(rc(3)))), Outputs); + case C4_or_and: + return rr0(eORL(rc(1), eAND(rc(2), rc(3))), Outputs); + case C4_or_andn: + return rr0(eORL(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs); + case C4_or_or: + return rr0(eORL(rc(1), eORL(rc(2), rc(3))), Outputs); + case C4_or_orn: + return rr0(eORL(rc(1), eORL(rc(2), eNOT(rc(3)))), Outputs); + case C2_bitsclr: + case C2_bitsclri: + case C2_bitsset: + case C4_nbitsclr: + case C4_nbitsclri: + case C4_nbitsset: + // TODO + break; + case S2_tstbit_i: + case S4_ntstbit_i: { + BT::BitValue V = rc(1)[im(2)]; + if (V.is(0) || V.is(1)) { + // If instruction is S2_tstbit_i, test for 1, otherwise test for 0. + bool TV = (Opc == S2_tstbit_i); + BT::BitValue F = V.is(TV) ? BT::BitValue::One : BT::BitValue::Zero; + return rr0(RegisterCell(W0).fill(0, W0, F), Outputs); + } + break; + } + + default: + return MachineEvaluator::evaluate(MI, Inputs, Outputs); + } + #undef im + #undef rc + #undef op + return false; +} + + +bool HexagonEvaluator::evaluate(const MachineInstr *BI, + const CellMapType &Inputs, BranchTargetList &Targets, + bool &FallsThru) const { + // We need to evaluate one branch at a time. TII::AnalyzeBranch checks + // all the branches in a basic block at once, so we cannot use it. + unsigned Opc = BI->getOpcode(); + bool SimpleBranch = false; + bool Negated = false; + switch (Opc) { + case Hexagon::J2_jumpf: + case Hexagon::J2_jumpfnew: + case Hexagon::J2_jumpfnewpt: + Negated = true; + case Hexagon::J2_jumpt: + case Hexagon::J2_jumptnew: + case Hexagon::J2_jumptnewpt: + // Simple branch: if([!]Pn) jump ... + // i.e. Op0 = predicate, Op1 = branch target. + SimpleBranch = true; + break; + case Hexagon::J2_jump: + Targets.insert(BI->getOperand(0).getMBB()); + FallsThru = false; + return true; + default: + // If the branch is of unknown type, assume that all successors are + // executable. + return false; + } + + if (!SimpleBranch) + return false; + + // BI is a conditional branch if we got here. + RegisterRef PR = BI->getOperand(0); + RegisterCell PC = getCell(PR, Inputs); + const BT::BitValue &Test = PC[0]; + + // If the condition is neither true nor false, then it's unknown. + if (!Test.is(0) && !Test.is(1)) + return false; + + // "Test.is(!Negated)" means "branch condition is true". + if (!Test.is(!Negated)) { + // Condition known to be false. + FallsThru = true; + return true; + } + + Targets.insert(BI->getOperand(1).getMBB()); + FallsThru = false; + return true; +} + + +bool HexagonEvaluator::evaluateLoad(const MachineInstr *MI, + const CellMapType &Inputs, CellMapType &Outputs) const { + if (TII.isPredicated(MI)) + return false; + assert(MI->mayLoad() && "A load that mayn't?"); + unsigned Opc = MI->getOpcode(); + + uint16_t BitNum; + bool SignEx; + using namespace Hexagon; + + switch (Opc) { + default: + return false; + +#if 0 + // memb_fifo + case L2_loadalignb_pbr: + case L2_loadalignb_pcr: + case L2_loadalignb_pi: + // memh_fifo + case L2_loadalignh_pbr: + case L2_loadalignh_pcr: + case L2_loadalignh_pi: + // membh + case L2_loadbsw2_pbr: + case L2_loadbsw2_pci: + case L2_loadbsw2_pcr: + case L2_loadbsw2_pi: + case L2_loadbsw4_pbr: + case L2_loadbsw4_pci: + case L2_loadbsw4_pcr: + case L2_loadbsw4_pi: + // memubh + case L2_loadbzw2_pbr: + case L2_loadbzw2_pci: + case L2_loadbzw2_pcr: + case L2_loadbzw2_pi: + case L2_loadbzw4_pbr: + case L2_loadbzw4_pci: + case L2_loadbzw4_pcr: + case L2_loadbzw4_pi: +#endif + + case L2_loadrbgp: + case L2_loadrb_io: + case L2_loadrb_pbr: + case L2_loadrb_pci: + case L2_loadrb_pcr: + case L2_loadrb_pi: + case L4_loadrb_abs: + case L4_loadrb_ap: + case L4_loadrb_rr: + case L4_loadrb_ur: + BitNum = 8; + SignEx = true; + break; + + case L2_loadrubgp: + case L2_loadrub_io: + case L2_loadrub_pbr: + case L2_loadrub_pci: + case L2_loadrub_pcr: + case L2_loadrub_pi: + case L4_loadrub_abs: + case L4_loadrub_ap: + case L4_loadrub_rr: + case L4_loadrub_ur: + BitNum = 8; + SignEx = false; + break; + + case L2_loadrhgp: + case L2_loadrh_io: + case L2_loadrh_pbr: + case L2_loadrh_pci: + case L2_loadrh_pcr: + case L2_loadrh_pi: + case L4_loadrh_abs: + case L4_loadrh_ap: + case L4_loadrh_rr: + case L4_loadrh_ur: + BitNum = 16; + SignEx = true; + break; + + case L2_loadruhgp: + case L2_loadruh_io: + case L2_loadruh_pbr: + case L2_loadruh_pci: + case L2_loadruh_pcr: + case L2_loadruh_pi: + case L4_loadruh_rr: + case L4_loadruh_abs: + case L4_loadruh_ap: + case L4_loadruh_ur: + BitNum = 16; + SignEx = false; + break; + + case L2_loadrigp: + case L2_loadri_io: + case L2_loadri_pbr: + case L2_loadri_pci: + case L2_loadri_pcr: + case L2_loadri_pi: + case L2_loadw_locked: + case L4_loadri_abs: + case L4_loadri_ap: + case L4_loadri_rr: + case L4_loadri_ur: + case LDriw_pred: + BitNum = 32; + SignEx = true; + break; + + case L2_loadrdgp: + case L2_loadrd_io: + case L2_loadrd_pbr: + case L2_loadrd_pci: + case L2_loadrd_pcr: + case L2_loadrd_pi: + case L4_loadd_locked: + case L4_loadrd_abs: + case L4_loadrd_ap: + case L4_loadrd_rr: + case L4_loadrd_ur: + BitNum = 64; + SignEx = true; + break; + } + + const MachineOperand &MD = MI->getOperand(0); + assert(MD.isReg() && MD.isDef()); + RegisterRef RD = MD; + + uint16_t W = getRegBitWidth(RD); + assert(W >= BitNum && BitNum > 0); + RegisterCell Res(W); + + for (uint16_t i = 0; i < BitNum; ++i) + Res[i] = BT::BitValue::self(BT::BitRef(RD.Reg, i)); + + if (SignEx) { + const BT::BitValue &Sign = Res[BitNum-1]; + for (uint16_t i = BitNum; i < W; ++i) + Res[i] = BT::BitValue::ref(Sign); + } else { + for (uint16_t i = BitNum; i < W; ++i) + Res[i] = BT::BitValue::Zero; + } + + putCell(RD, Res, Outputs); + return true; +} + + +bool HexagonEvaluator::evaluateFormalCopy(const MachineInstr *MI, + const CellMapType &Inputs, CellMapType &Outputs) const { + // If MI defines a formal parameter, but is not a copy (loads are handled + // in evaluateLoad), then it's not clear what to do. + assert(MI->isCopy()); + + RegisterRef RD = MI->getOperand(0); + RegisterRef RS = MI->getOperand(1); + assert(RD.Sub == 0); + if (!TargetRegisterInfo::isPhysicalRegister(RS.Reg)) + return false; + RegExtMap::const_iterator F = VRX.find(RD.Reg); + if (F == VRX.end()) + return false; + + uint16_t EW = F->second.Width; + // Store RD's cell into the map. This will associate the cell with a virtual + // register, and make zero-/sign-extends possible (otherwise we would be ex- + // tending "self" bit values, which will have no effect, since "self" values + // cannot be references to anything). + putCell(RD, getCell(RS, Inputs), Outputs); + + RegisterCell Res; + // Read RD's cell from the outputs instead of RS's cell from the inputs: + if (F->second.Type == ExtType::SExt) + Res = eSXT(getCell(RD, Outputs), EW); + else if (F->second.Type == ExtType::ZExt) + Res = eZXT(getCell(RD, Outputs), EW); + + putCell(RD, Res, Outputs); + return true; +} + + +unsigned HexagonEvaluator::getNextPhysReg(unsigned PReg, unsigned Width) const { + using namespace Hexagon; + bool Is64 = DoubleRegsRegClass.contains(PReg); + assert(PReg == 0 || Is64 || IntRegsRegClass.contains(PReg)); + + static const unsigned Phys32[] = { R0, R1, R2, R3, R4, R5 }; + static const unsigned Phys64[] = { D0, D1, D2 }; + const unsigned Num32 = sizeof(Phys32)/sizeof(unsigned); + const unsigned Num64 = sizeof(Phys64)/sizeof(unsigned); + + // Return the first parameter register of the required width. + if (PReg == 0) + return (Width <= 32) ? Phys32[0] : Phys64[0]; + + // Set Idx32, Idx64 in such a way that Idx+1 would give the index of the + // next register. + unsigned Idx32 = 0, Idx64 = 0; + if (!Is64) { + while (Idx32 < Num32) { + if (Phys32[Idx32] == PReg) + break; + Idx32++; + } + Idx64 = Idx32/2; + } else { + while (Idx64 < Num64) { + if (Phys64[Idx64] == PReg) + break; + Idx64++; + } + Idx32 = Idx64*2+1; + } + + if (Width <= 32) + return (Idx32+1 < Num32) ? Phys32[Idx32+1] : 0; + return (Idx64+1 < Num64) ? Phys64[Idx64+1] : 0; +} + + +unsigned HexagonEvaluator::getVirtRegFor(unsigned PReg) const { + typedef MachineRegisterInfo::livein_iterator iterator; + for (iterator I = MRI.livein_begin(), E = MRI.livein_end(); I != E; ++I) { + if (I->first == PReg) + return I->second; + } + return 0; +} diff --git a/lib/Target/Hexagon/HexagonBitTracker.h b/lib/Target/Hexagon/HexagonBitTracker.h new file mode 100644 index 000000000000..897af2d71870 --- /dev/null +++ b/lib/Target/Hexagon/HexagonBitTracker.h @@ -0,0 +1,64 @@ +//===--- HexagonBitTracker.h ----------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef HEXAGONBITTRACKER_H +#define HEXAGONBITTRACKER_H + +#include "BitTracker.h" +#include "llvm/ADT/DenseMap.h" + +namespace llvm { + class HexagonInstrInfo; + class HexagonRegisterInfo; + +struct HexagonEvaluator : public BitTracker::MachineEvaluator { + typedef BitTracker::CellMapType CellMapType; + typedef BitTracker::RegisterRef RegisterRef; + typedef BitTracker::RegisterCell RegisterCell; + typedef BitTracker::BranchTargetList BranchTargetList; + + HexagonEvaluator(const HexagonRegisterInfo &tri, MachineRegisterInfo &mri, + const HexagonInstrInfo &tii, MachineFunction &mf); + + bool evaluate(const MachineInstr *MI, const CellMapType &Inputs, + CellMapType &Outputs) const override; + bool evaluate(const MachineInstr *BI, const CellMapType &Inputs, + BranchTargetList &Targets, bool &FallsThru) const override; + + BitTracker::BitMask mask(unsigned Reg, unsigned Sub) const override; + + MachineFunction &MF; + MachineFrameInfo &MFI; + const HexagonInstrInfo &TII; + +private: + bool evaluateLoad(const MachineInstr *MI, const CellMapType &Inputs, + CellMapType &Outputs) const; + bool evaluateFormalCopy(const MachineInstr *MI, const CellMapType &Inputs, + CellMapType &Outputs) const; + + unsigned getNextPhysReg(unsigned PReg, unsigned Width) const; + unsigned getVirtRegFor(unsigned PReg) const; + + // Type of formal parameter extension. + struct ExtType { + enum { SExt, ZExt }; + char Type; + uint16_t Width; + ExtType() : Type(0), Width(0) {} + ExtType(char t, uint16_t w) : Type(t), Width(w) {} + }; + // Map VR -> extension type. + typedef DenseMap<unsigned, ExtType> RegExtMap; + RegExtMap VRX; +}; + +} // end namespace llvm + +#endif diff --git a/lib/Target/Hexagon/HexagonCommonGEP.cpp b/lib/Target/Hexagon/HexagonCommonGEP.cpp new file mode 100644 index 000000000000..9f5fac156527 --- /dev/null +++ b/lib/Target/Hexagon/HexagonCommonGEP.cpp @@ -0,0 +1,1325 @@ +//===--- HexagonCommonGEP.cpp ---------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "commgep" + +#include "llvm/Pass.h" +#include "llvm/ADT/FoldingSet.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/Analysis/LoopInfo.h" +#include "llvm/Analysis/PostDominators.h" +#include "llvm/CodeGen/MachineFunctionAnalysis.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/Verifier.h" +#include "llvm/Support/Allocator.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Utils/Local.h" + +#include <map> +#include <set> +#include <vector> + +#include "HexagonTargetMachine.h" + +using namespace llvm; + +static cl::opt<bool> OptSpeculate("commgep-speculate", cl::init(true), + cl::Hidden, cl::ZeroOrMore); + +static cl::opt<bool> OptEnableInv("commgep-inv", cl::init(true), cl::Hidden, + cl::ZeroOrMore); + +static cl::opt<bool> OptEnableConst("commgep-const", cl::init(true), + cl::Hidden, cl::ZeroOrMore); + +namespace llvm { + void initializeHexagonCommonGEPPass(PassRegistry&); +} + +namespace { + struct GepNode; + typedef std::set<GepNode*> NodeSet; + typedef std::map<GepNode*,Value*> NodeToValueMap; + typedef std::vector<GepNode*> NodeVect; + typedef std::map<GepNode*,NodeVect> NodeChildrenMap; + typedef std::set<Use*> UseSet; + typedef std::map<GepNode*,UseSet> NodeToUsesMap; + + // Numbering map for gep nodes. Used to keep track of ordering for + // gep nodes. + struct NodeNumbering : public std::map<const GepNode*,unsigned> { + }; + + struct NodeOrdering : public NodeNumbering { + NodeOrdering() : LastNum(0) {} +#ifdef _MSC_VER + void special_insert_for_special_msvc(const GepNode *N) +#else + using NodeNumbering::insert; + void insert(const GepNode* N) +#endif + { + insert(std::make_pair(N, ++LastNum)); + } + bool operator() (const GepNode* N1, const GepNode *N2) const { + const_iterator F1 = find(N1), F2 = find(N2); + assert(F1 != end() && F2 != end()); + return F1->second < F2->second; + } + private: + unsigned LastNum; + }; + + + class HexagonCommonGEP : public FunctionPass { + public: + static char ID; + HexagonCommonGEP() : FunctionPass(ID) { + initializeHexagonCommonGEPPass(*PassRegistry::getPassRegistry()); + } + virtual bool runOnFunction(Function &F); + virtual const char *getPassName() const { + return "Hexagon Common GEP"; + } + + virtual void getAnalysisUsage(AnalysisUsage &AU) const { + AU.addRequired<DominatorTreeWrapperPass>(); + AU.addPreserved<DominatorTreeWrapperPass>(); + AU.addRequired<PostDominatorTree>(); + AU.addPreserved<PostDominatorTree>(); + AU.addRequired<LoopInfoWrapperPass>(); + AU.addPreserved<LoopInfoWrapperPass>(); + FunctionPass::getAnalysisUsage(AU); + } + + private: + typedef std::map<Value*,GepNode*> ValueToNodeMap; + typedef std::vector<Value*> ValueVect; + typedef std::map<GepNode*,ValueVect> NodeToValuesMap; + + void getBlockTraversalOrder(BasicBlock *Root, ValueVect &Order); + bool isHandledGepForm(GetElementPtrInst *GepI); + void processGepInst(GetElementPtrInst *GepI, ValueToNodeMap &NM); + void collect(); + void common(); + + BasicBlock *recalculatePlacement(GepNode *Node, NodeChildrenMap &NCM, + NodeToValueMap &Loc); + BasicBlock *recalculatePlacementRec(GepNode *Node, NodeChildrenMap &NCM, + NodeToValueMap &Loc); + bool isInvariantIn(Value *Val, Loop *L); + bool isInvariantIn(GepNode *Node, Loop *L); + bool isInMainPath(BasicBlock *B, Loop *L); + BasicBlock *adjustForInvariance(GepNode *Node, NodeChildrenMap &NCM, + NodeToValueMap &Loc); + void separateChainForNode(GepNode *Node, Use *U, NodeToValueMap &Loc); + void separateConstantChains(GepNode *Node, NodeChildrenMap &NCM, + NodeToValueMap &Loc); + void computeNodePlacement(NodeToValueMap &Loc); + + Value *fabricateGEP(NodeVect &NA, BasicBlock::iterator At, + BasicBlock *LocB); + void getAllUsersForNode(GepNode *Node, ValueVect &Values, + NodeChildrenMap &NCM); + void materialize(NodeToValueMap &Loc); + + void removeDeadCode(); + + NodeVect Nodes; + NodeToUsesMap Uses; + NodeOrdering NodeOrder; // Node ordering, for deterministic behavior. + SpecificBumpPtrAllocator<GepNode> *Mem; + LLVMContext *Ctx; + LoopInfo *LI; + DominatorTree *DT; + PostDominatorTree *PDT; + Function *Fn; + }; +} + + +char HexagonCommonGEP::ID = 0; +INITIALIZE_PASS_BEGIN(HexagonCommonGEP, "hcommgep", "Hexagon Common GEP", + false, false) +INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) +INITIALIZE_PASS_DEPENDENCY(PostDominatorTree) +INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) +INITIALIZE_PASS_END(HexagonCommonGEP, "hcommgep", "Hexagon Common GEP", + false, false) + +namespace { + struct GepNode { + enum { + None = 0, + Root = 0x01, + Internal = 0x02, + Used = 0x04 + }; + + uint32_t Flags; + union { + GepNode *Parent; + Value *BaseVal; + }; + Value *Idx; + Type *PTy; // Type of the pointer operand. + + GepNode() : Flags(0), Parent(0), Idx(0), PTy(0) {} + GepNode(const GepNode *N) : Flags(N->Flags), Idx(N->Idx), PTy(N->PTy) { + if (Flags & Root) + BaseVal = N->BaseVal; + else + Parent = N->Parent; + } + friend raw_ostream &operator<< (raw_ostream &OS, const GepNode &GN); + }; + + + Type *next_type(Type *Ty, Value *Idx) { + // Advance the type. + if (!Ty->isStructTy()) { + Type *NexTy = cast<SequentialType>(Ty)->getElementType(); + return NexTy; + } + // Otherwise it is a struct type. + ConstantInt *CI = dyn_cast<ConstantInt>(Idx); + assert(CI && "Struct type with non-constant index"); + int64_t i = CI->getValue().getSExtValue(); + Type *NextTy = cast<StructType>(Ty)->getElementType(i); + return NextTy; + } + + + raw_ostream &operator<< (raw_ostream &OS, const GepNode &GN) { + OS << "{ {"; + bool Comma = false; + if (GN.Flags & GepNode::Root) { + OS << "root"; + Comma = true; + } + if (GN.Flags & GepNode::Internal) { + if (Comma) + OS << ','; + OS << "internal"; + Comma = true; + } + if (GN.Flags & GepNode::Used) { + if (Comma) + OS << ','; + OS << "used"; + Comma = true; + } + OS << "} "; + if (GN.Flags & GepNode::Root) + OS << "BaseVal:" << GN.BaseVal->getName() << '(' << GN.BaseVal << ')'; + else + OS << "Parent:" << GN.Parent; + + OS << " Idx:"; + if (ConstantInt *CI = dyn_cast<ConstantInt>(GN.Idx)) + OS << CI->getValue().getSExtValue(); + else if (GN.Idx->hasName()) + OS << GN.Idx->getName(); + else + OS << "<anon> =" << *GN.Idx; + + OS << " PTy:"; + if (GN.PTy->isStructTy()) { + StructType *STy = cast<StructType>(GN.PTy); + if (!STy->isLiteral()) + OS << GN.PTy->getStructName(); + else + OS << "<anon-struct>:" << *STy; + } + else + OS << *GN.PTy; + OS << " }"; + return OS; + } + + + template <typename NodeContainer> + void dump_node_container(raw_ostream &OS, const NodeContainer &S) { + typedef typename NodeContainer::const_iterator const_iterator; + for (const_iterator I = S.begin(), E = S.end(); I != E; ++I) + OS << *I << ' ' << **I << '\n'; + } + + raw_ostream &operator<< (raw_ostream &OS, + const NodeVect &S) LLVM_ATTRIBUTE_UNUSED; + raw_ostream &operator<< (raw_ostream &OS, const NodeVect &S) { + dump_node_container(OS, S); + return OS; + } + + + raw_ostream &operator<< (raw_ostream &OS, + const NodeToUsesMap &M) LLVM_ATTRIBUTE_UNUSED; + raw_ostream &operator<< (raw_ostream &OS, const NodeToUsesMap &M){ + typedef NodeToUsesMap::const_iterator const_iterator; + for (const_iterator I = M.begin(), E = M.end(); I != E; ++I) { + const UseSet &Us = I->second; + OS << I->first << " -> #" << Us.size() << '{'; + for (UseSet::const_iterator J = Us.begin(), F = Us.end(); J != F; ++J) { + User *R = (*J)->getUser(); + if (R->hasName()) + OS << ' ' << R->getName(); + else + OS << " <?>(" << *R << ')'; + } + OS << " }\n"; + } + return OS; + } + + + struct in_set { + in_set(const NodeSet &S) : NS(S) {} + bool operator() (GepNode *N) const { + return NS.find(N) != NS.end(); + } + private: + const NodeSet &NS; + }; +} + + +inline void *operator new(size_t, SpecificBumpPtrAllocator<GepNode> &A) { + return A.Allocate(); +} + + +void HexagonCommonGEP::getBlockTraversalOrder(BasicBlock *Root, + ValueVect &Order) { + // Compute block ordering for a typical DT-based traversal of the flow + // graph: "before visiting a block, all of its dominators must have been + // visited". + + Order.push_back(Root); + DomTreeNode *DTN = DT->getNode(Root); + typedef GraphTraits<DomTreeNode*> GTN; + typedef GTN::ChildIteratorType Iter; + for (Iter I = GTN::child_begin(DTN), E = GTN::child_end(DTN); I != E; ++I) + getBlockTraversalOrder((*I)->getBlock(), Order); +} + + +bool HexagonCommonGEP::isHandledGepForm(GetElementPtrInst *GepI) { + // No vector GEPs. + if (!GepI->getType()->isPointerTy()) + return false; + // No GEPs without any indices. (Is this possible?) + if (GepI->idx_begin() == GepI->idx_end()) + return false; + return true; +} + + +void HexagonCommonGEP::processGepInst(GetElementPtrInst *GepI, + ValueToNodeMap &NM) { + DEBUG(dbgs() << "Visiting GEP: " << *GepI << '\n'); + GepNode *N = new (*Mem) GepNode; + Value *PtrOp = GepI->getPointerOperand(); + ValueToNodeMap::iterator F = NM.find(PtrOp); + if (F == NM.end()) { + N->BaseVal = PtrOp; + N->Flags |= GepNode::Root; + } else { + // If PtrOp was a GEP instruction, it must have already been processed. + // The ValueToNodeMap entry for it is the last gep node in the generated + // chain. Link to it here. + N->Parent = F->second; + } + N->PTy = PtrOp->getType(); + N->Idx = *GepI->idx_begin(); + + // Collect the list of users of this GEP instruction. Will add it to the + // last node created for it. + UseSet Us; + for (Value::user_iterator UI = GepI->user_begin(), UE = GepI->user_end(); + UI != UE; ++UI) { + // Check if this gep is used by anything other than other geps that + // we will process. + if (isa<GetElementPtrInst>(*UI)) { + GetElementPtrInst *UserG = cast<GetElementPtrInst>(*UI); + if (isHandledGepForm(UserG)) + continue; + } + Us.insert(&UI.getUse()); + } + Nodes.push_back(N); +#ifdef _MSC_VER + NodeOrder.special_insert_for_special_msvc(N); +#else + NodeOrder.insert(N); +#endif + + // Skip the first index operand, since we only handle 0. This dereferences + // the pointer operand. + GepNode *PN = N; + Type *PtrTy = cast<PointerType>(PtrOp->getType())->getElementType(); + for (User::op_iterator OI = GepI->idx_begin()+1, OE = GepI->idx_end(); + OI != OE; ++OI) { + Value *Op = *OI; + GepNode *Nx = new (*Mem) GepNode; + Nx->Parent = PN; // Link Nx to the previous node. + Nx->Flags |= GepNode::Internal; + Nx->PTy = PtrTy; + Nx->Idx = Op; + Nodes.push_back(Nx); +#ifdef _MSC_VER + NodeOrder.special_insert_for_special_msvc(Nx); +#else + NodeOrder.insert(Nx); +#endif + PN = Nx; + + PtrTy = next_type(PtrTy, Op); + } + + // After last node has been created, update the use information. + if (!Us.empty()) { + PN->Flags |= GepNode::Used; + Uses[PN].insert(Us.begin(), Us.end()); + } + + // Link the last node with the originating GEP instruction. This is to + // help with linking chained GEP instructions. + NM.insert(std::make_pair(GepI, PN)); +} + + +void HexagonCommonGEP::collect() { + // Establish depth-first traversal order of the dominator tree. + ValueVect BO; + getBlockTraversalOrder(Fn->begin(), BO); + + // The creation of gep nodes requires DT-traversal. When processing a GEP + // instruction that uses another GEP instruction as the base pointer, the + // gep node for the base pointer should already exist. + ValueToNodeMap NM; + for (ValueVect::iterator I = BO.begin(), E = BO.end(); I != E; ++I) { + BasicBlock *B = cast<BasicBlock>(*I); + for (BasicBlock::iterator J = B->begin(), F = B->end(); J != F; ++J) { + if (!isa<GetElementPtrInst>(J)) + continue; + GetElementPtrInst *GepI = cast<GetElementPtrInst>(J); + if (isHandledGepForm(GepI)) + processGepInst(GepI, NM); + } + } + + DEBUG(dbgs() << "Gep nodes after initial collection:\n" << Nodes); +} + + +namespace { + void invert_find_roots(const NodeVect &Nodes, NodeChildrenMap &NCM, + NodeVect &Roots) { + typedef NodeVect::const_iterator const_iterator; + for (const_iterator I = Nodes.begin(), E = Nodes.end(); I != E; ++I) { + GepNode *N = *I; + if (N->Flags & GepNode::Root) { + Roots.push_back(N); + continue; + } + GepNode *PN = N->Parent; + NCM[PN].push_back(N); + } + } + + void nodes_for_root(GepNode *Root, NodeChildrenMap &NCM, NodeSet &Nodes) { + NodeVect Work; + Work.push_back(Root); + Nodes.insert(Root); + + while (!Work.empty()) { + NodeVect::iterator First = Work.begin(); + GepNode *N = *First; + Work.erase(First); + NodeChildrenMap::iterator CF = NCM.find(N); + if (CF != NCM.end()) { + Work.insert(Work.end(), CF->second.begin(), CF->second.end()); + Nodes.insert(CF->second.begin(), CF->second.end()); + } + } + } +} + + +namespace { + typedef std::set<NodeSet> NodeSymRel; + typedef std::pair<GepNode*,GepNode*> NodePair; + typedef std::set<NodePair> NodePairSet; + + const NodeSet *node_class(GepNode *N, NodeSymRel &Rel) { + for (NodeSymRel::iterator I = Rel.begin(), E = Rel.end(); I != E; ++I) + if (I->count(N)) + return &*I; + return 0; + } + + // Create an ordered pair of GepNode pointers. The pair will be used in + // determining equality. The only purpose of the ordering is to eliminate + // duplication due to the commutativity of equality/non-equality. + NodePair node_pair(GepNode *N1, GepNode *N2) { + uintptr_t P1 = uintptr_t(N1), P2 = uintptr_t(N2); + if (P1 <= P2) + return std::make_pair(N1, N2); + return std::make_pair(N2, N1); + } + + unsigned node_hash(GepNode *N) { + // Include everything except flags and parent. + FoldingSetNodeID ID; + ID.AddPointer(N->Idx); + ID.AddPointer(N->PTy); + return ID.ComputeHash(); + } + + bool node_eq(GepNode *N1, GepNode *N2, NodePairSet &Eq, NodePairSet &Ne) { + // Don't cache the result for nodes with different hashes. The hash + // comparison is fast enough. + if (node_hash(N1) != node_hash(N2)) + return false; + + NodePair NP = node_pair(N1, N2); + NodePairSet::iterator FEq = Eq.find(NP); + if (FEq != Eq.end()) + return true; + NodePairSet::iterator FNe = Ne.find(NP); + if (FNe != Ne.end()) + return false; + // Not previously compared. + bool Root1 = N1->Flags & GepNode::Root; + bool Root2 = N2->Flags & GepNode::Root; + NodePair P = node_pair(N1, N2); + // If the Root flag has different values, the nodes are different. + // If both nodes are root nodes, but their base pointers differ, + // they are different. + if (Root1 != Root2 || (Root1 && N1->BaseVal != N2->BaseVal)) { + Ne.insert(P); + return false; + } + // Here the root flags are identical, and for root nodes the + // base pointers are equal, so the root nodes are equal. + // For non-root nodes, compare their parent nodes. + if (Root1 || node_eq(N1->Parent, N2->Parent, Eq, Ne)) { + Eq.insert(P); + return true; + } + return false; + } +} + + +void HexagonCommonGEP::common() { + // The essence of this commoning is finding gep nodes that are equal. + // To do this we need to compare all pairs of nodes. To save time, + // first, partition the set of all nodes into sets of potentially equal + // nodes, and then compare pairs from within each partition. + typedef std::map<unsigned,NodeSet> NodeSetMap; + NodeSetMap MaybeEq; + + for (NodeVect::iterator I = Nodes.begin(), E = Nodes.end(); I != E; ++I) { + GepNode *N = *I; + unsigned H = node_hash(N); + MaybeEq[H].insert(N); + } + + // Compute the equivalence relation for the gep nodes. Use two caches, + // one for equality and the other for non-equality. + NodeSymRel EqRel; // Equality relation (as set of equivalence classes). + NodePairSet Eq, Ne; // Caches. + for (NodeSetMap::iterator I = MaybeEq.begin(), E = MaybeEq.end(); + I != E; ++I) { + NodeSet &S = I->second; + for (NodeSet::iterator NI = S.begin(), NE = S.end(); NI != NE; ++NI) { + GepNode *N = *NI; + // If node already has a class, then the class must have been created + // in a prior iteration of this loop. Since equality is transitive, + // nothing more will be added to that class, so skip it. + if (node_class(N, EqRel)) + continue; + + // Create a new class candidate now. + NodeSet C; + for (NodeSet::iterator NJ = std::next(NI); NJ != NE; ++NJ) + if (node_eq(N, *NJ, Eq, Ne)) + C.insert(*NJ); + // If Tmp is empty, N would be the only element in it. Don't bother + // creating a class for it then. + if (!C.empty()) { + C.insert(N); // Finalize the set before adding it to the relation. + std::pair<NodeSymRel::iterator, bool> Ins = EqRel.insert(C); + (void)Ins; + assert(Ins.second && "Cannot add a class"); + } + } + } + + DEBUG({ + dbgs() << "Gep node equality:\n"; + for (NodePairSet::iterator I = Eq.begin(), E = Eq.end(); I != E; ++I) + dbgs() << "{ " << I->first << ", " << I->second << " }\n"; + + dbgs() << "Gep equivalence classes:\n"; + for (NodeSymRel::iterator I = EqRel.begin(), E = EqRel.end(); I != E; ++I) { + dbgs() << '{'; + const NodeSet &S = *I; + for (NodeSet::const_iterator J = S.begin(), F = S.end(); J != F; ++J) { + if (J != S.begin()) + dbgs() << ','; + dbgs() << ' ' << *J; + } + dbgs() << " }\n"; + } + }); + + + // Create a projection from a NodeSet to the minimal element in it. + typedef std::map<const NodeSet*,GepNode*> ProjMap; + ProjMap PM; + for (NodeSymRel::iterator I = EqRel.begin(), E = EqRel.end(); I != E; ++I) { + const NodeSet &S = *I; + GepNode *Min = *std::min_element(S.begin(), S.end(), NodeOrder); + std::pair<ProjMap::iterator,bool> Ins = PM.insert(std::make_pair(&S, Min)); + (void)Ins; + assert(Ins.second && "Cannot add minimal element"); + + // Update the min element's flags, and user list. + uint32_t Flags = 0; + UseSet &MinUs = Uses[Min]; + for (NodeSet::iterator J = S.begin(), F = S.end(); J != F; ++J) { + GepNode *N = *J; + uint32_t NF = N->Flags; + // If N is used, append all original values of N to the list of + // original values of Min. + if (NF & GepNode::Used) + MinUs.insert(Uses[N].begin(), Uses[N].end()); + Flags |= NF; + } + if (MinUs.empty()) + Uses.erase(Min); + + // The collected flags should include all the flags from the min element. + assert((Min->Flags & Flags) == Min->Flags); + Min->Flags = Flags; + } + + // Commoning: for each non-root gep node, replace "Parent" with the + // selected (minimum) node from the corresponding equivalence class. + // If a given parent does not have an equivalence class, leave it + // unchanged (it means that it's the only element in its class). + for (NodeVect::iterator I = Nodes.begin(), E = Nodes.end(); I != E; ++I) { + GepNode *N = *I; + if (N->Flags & GepNode::Root) + continue; + const NodeSet *PC = node_class(N->Parent, EqRel); + if (!PC) + continue; + ProjMap::iterator F = PM.find(PC); + if (F == PM.end()) + continue; + // Found a replacement, use it. + GepNode *Rep = F->second; + N->Parent = Rep; + } + + DEBUG(dbgs() << "Gep nodes after commoning:\n" << Nodes); + + // Finally, erase the nodes that are no longer used. + NodeSet Erase; + for (NodeVect::iterator I = Nodes.begin(), E = Nodes.end(); I != E; ++I) { + GepNode *N = *I; + const NodeSet *PC = node_class(N, EqRel); + if (!PC) + continue; + ProjMap::iterator F = PM.find(PC); + if (F == PM.end()) + continue; + if (N == F->second) + continue; + // Node for removal. + Erase.insert(*I); + } + NodeVect::iterator NewE = std::remove_if(Nodes.begin(), Nodes.end(), + in_set(Erase)); + Nodes.resize(std::distance(Nodes.begin(), NewE)); + + DEBUG(dbgs() << "Gep nodes after post-commoning cleanup:\n" << Nodes); +} + + +namespace { + template <typename T> + BasicBlock *nearest_common_dominator(DominatorTree *DT, T &Blocks) { + DEBUG({ + dbgs() << "NCD of {"; + for (typename T::iterator I = Blocks.begin(), E = Blocks.end(); + I != E; ++I) { + if (!*I) + continue; + BasicBlock *B = cast<BasicBlock>(*I); + dbgs() << ' ' << B->getName(); + } + dbgs() << " }\n"; + }); + + // Allow null basic blocks in Blocks. In such cases, return 0. + typename T::iterator I = Blocks.begin(), E = Blocks.end(); + if (I == E || !*I) + return 0; + BasicBlock *Dom = cast<BasicBlock>(*I); + while (++I != E) { + BasicBlock *B = cast_or_null<BasicBlock>(*I); + Dom = B ? DT->findNearestCommonDominator(Dom, B) : 0; + if (!Dom) + return 0; + } + DEBUG(dbgs() << "computed:" << Dom->getName() << '\n'); + return Dom; + } + + template <typename T> + BasicBlock *nearest_common_dominatee(DominatorTree *DT, T &Blocks) { + // If two blocks, A and B, dominate a block C, then A dominates B, + // or B dominates A. + typename T::iterator I = Blocks.begin(), E = Blocks.end(); + // Find the first non-null block. + while (I != E && !*I) + ++I; + if (I == E) + return DT->getRoot(); + BasicBlock *DomB = cast<BasicBlock>(*I); + while (++I != E) { + if (!*I) + continue; + BasicBlock *B = cast<BasicBlock>(*I); + if (DT->dominates(B, DomB)) + continue; + if (!DT->dominates(DomB, B)) + return 0; + DomB = B; + } + return DomB; + } + + // Find the first use in B of any value from Values. If no such use, + // return B->end(). + template <typename T> + BasicBlock::iterator first_use_of_in_block(T &Values, BasicBlock *B) { + BasicBlock::iterator FirstUse = B->end(), BEnd = B->end(); + typedef typename T::iterator iterator; + for (iterator I = Values.begin(), E = Values.end(); I != E; ++I) { + Value *V = *I; + // If V is used in a PHI node, the use belongs to the incoming block, + // not the block with the PHI node. In the incoming block, the use + // would be considered as being at the end of it, so it cannot + // influence the position of the first use (which is assumed to be + // at the end to start with). + if (isa<PHINode>(V)) + continue; + if (!isa<Instruction>(V)) + continue; + Instruction *In = cast<Instruction>(V); + if (In->getParent() != B) + continue; + BasicBlock::iterator It = In; + if (std::distance(FirstUse, BEnd) < std::distance(It, BEnd)) + FirstUse = It; + } + return FirstUse; + } + + bool is_empty(const BasicBlock *B) { + return B->empty() || (&*B->begin() == B->getTerminator()); + } +} + + +BasicBlock *HexagonCommonGEP::recalculatePlacement(GepNode *Node, + NodeChildrenMap &NCM, NodeToValueMap &Loc) { + DEBUG(dbgs() << "Loc for node:" << Node << '\n'); + // Recalculate the placement for Node, assuming that the locations of + // its children in Loc are valid. + // Return 0 if there is no valid placement for Node (for example, it + // uses an index value that is not available at the location required + // to dominate all children, etc.). + + // Find the nearest common dominator for: + // - all users, if the node is used, and + // - all children. + ValueVect Bs; + if (Node->Flags & GepNode::Used) { + // Append all blocks with uses of the original values to the + // block vector Bs. + NodeToUsesMap::iterator UF = Uses.find(Node); + assert(UF != Uses.end() && "Used node with no use information"); + UseSet &Us = UF->second; + for (UseSet::iterator I = Us.begin(), E = Us.end(); I != E; ++I) { + Use *U = *I; + User *R = U->getUser(); + if (!isa<Instruction>(R)) + continue; + BasicBlock *PB = isa<PHINode>(R) + ? cast<PHINode>(R)->getIncomingBlock(*U) + : cast<Instruction>(R)->getParent(); + Bs.push_back(PB); + } + } + // Append the location of each child. + NodeChildrenMap::iterator CF = NCM.find(Node); + if (CF != NCM.end()) { + NodeVect &Cs = CF->second; + for (NodeVect::iterator I = Cs.begin(), E = Cs.end(); I != E; ++I) { + GepNode *CN = *I; + NodeToValueMap::iterator LF = Loc.find(CN); + // If the child is only used in GEP instructions (i.e. is not used in + // non-GEP instructions), the nearest dominator computed for it may + // have been null. In such case it won't have a location available. + if (LF == Loc.end()) + continue; + Bs.push_back(LF->second); + } + } + + BasicBlock *DomB = nearest_common_dominator(DT, Bs); + if (!DomB) + return 0; + // Check if the index used by Node dominates the computed dominator. + Instruction *IdxI = dyn_cast<Instruction>(Node->Idx); + if (IdxI && !DT->dominates(IdxI->getParent(), DomB)) + return 0; + + // Avoid putting nodes into empty blocks. + while (is_empty(DomB)) { + DomTreeNode *N = (*DT)[DomB]->getIDom(); + if (!N) + break; + DomB = N->getBlock(); + } + + // Otherwise, DomB is fine. Update the location map. + Loc[Node] = DomB; + return DomB; +} + + +BasicBlock *HexagonCommonGEP::recalculatePlacementRec(GepNode *Node, + NodeChildrenMap &NCM, NodeToValueMap &Loc) { + DEBUG(dbgs() << "LocRec begin for node:" << Node << '\n'); + // Recalculate the placement of Node, after recursively recalculating the + // placements of all its children. + NodeChildrenMap::iterator CF = NCM.find(Node); + if (CF != NCM.end()) { + NodeVect &Cs = CF->second; + for (NodeVect::iterator I = Cs.begin(), E = Cs.end(); I != E; ++I) + recalculatePlacementRec(*I, NCM, Loc); + } + BasicBlock *LB = recalculatePlacement(Node, NCM, Loc); + DEBUG(dbgs() << "LocRec end for node:" << Node << '\n'); + return LB; +} + + +bool HexagonCommonGEP::isInvariantIn(Value *Val, Loop *L) { + if (isa<Constant>(Val) || isa<Argument>(Val)) + return true; + Instruction *In = dyn_cast<Instruction>(Val); + if (!In) + return false; + BasicBlock *HdrB = L->getHeader(), *DefB = In->getParent(); + return DT->properlyDominates(DefB, HdrB); +} + + +bool HexagonCommonGEP::isInvariantIn(GepNode *Node, Loop *L) { + if (Node->Flags & GepNode::Root) + if (!isInvariantIn(Node->BaseVal, L)) + return false; + return isInvariantIn(Node->Idx, L); +} + + +bool HexagonCommonGEP::isInMainPath(BasicBlock *B, Loop *L) { + BasicBlock *HB = L->getHeader(); + BasicBlock *LB = L->getLoopLatch(); + // B must post-dominate the loop header or dominate the loop latch. + if (PDT->dominates(B, HB)) + return true; + if (LB && DT->dominates(B, LB)) + return true; + return false; +} + + +namespace { + BasicBlock *preheader(DominatorTree *DT, Loop *L) { + if (BasicBlock *PH = L->getLoopPreheader()) + return PH; + if (!OptSpeculate) + return 0; + DomTreeNode *DN = DT->getNode(L->getHeader()); + if (!DN) + return 0; + return DN->getIDom()->getBlock(); + } +} + + +BasicBlock *HexagonCommonGEP::adjustForInvariance(GepNode *Node, + NodeChildrenMap &NCM, NodeToValueMap &Loc) { + // Find the "topmost" location for Node: it must be dominated by both, + // its parent (or the BaseVal, if it's a root node), and by the index + // value. + ValueVect Bs; + if (Node->Flags & GepNode::Root) { + if (Instruction *PIn = dyn_cast<Instruction>(Node->BaseVal)) + Bs.push_back(PIn->getParent()); + } else { + Bs.push_back(Loc[Node->Parent]); + } + if (Instruction *IIn = dyn_cast<Instruction>(Node->Idx)) + Bs.push_back(IIn->getParent()); + BasicBlock *TopB = nearest_common_dominatee(DT, Bs); + + // Traverse the loop nest upwards until we find a loop in which Node + // is no longer invariant, or until we get to the upper limit of Node's + // placement. The traversal will also stop when a suitable "preheader" + // cannot be found for a given loop. The "preheader" may actually be + // a regular block outside of the loop (i.e. not guarded), in which case + // the Node will be speculated. + // For nodes that are not in the main path of the containing loop (i.e. + // are not executed in each iteration), do not move them out of the loop. + BasicBlock *LocB = cast_or_null<BasicBlock>(Loc[Node]); + if (LocB) { + Loop *Lp = LI->getLoopFor(LocB); + while (Lp) { + if (!isInvariantIn(Node, Lp) || !isInMainPath(LocB, Lp)) + break; + BasicBlock *NewLoc = preheader(DT, Lp); + if (!NewLoc || !DT->dominates(TopB, NewLoc)) + break; + Lp = Lp->getParentLoop(); + LocB = NewLoc; + } + } + Loc[Node] = LocB; + + // Recursively compute the locations of all children nodes. + NodeChildrenMap::iterator CF = NCM.find(Node); + if (CF != NCM.end()) { + NodeVect &Cs = CF->second; + for (NodeVect::iterator I = Cs.begin(), E = Cs.end(); I != E; ++I) + adjustForInvariance(*I, NCM, Loc); + } + return LocB; +} + + +namespace { + struct LocationAsBlock { + LocationAsBlock(const NodeToValueMap &L) : Map(L) {} + const NodeToValueMap ⤅ + }; + + raw_ostream &operator<< (raw_ostream &OS, + const LocationAsBlock &Loc) LLVM_ATTRIBUTE_UNUSED ; + raw_ostream &operator<< (raw_ostream &OS, const LocationAsBlock &Loc) { + for (NodeToValueMap::const_iterator I = Loc.Map.begin(), E = Loc.Map.end(); + I != E; ++I) { + OS << I->first << " -> "; + BasicBlock *B = cast<BasicBlock>(I->second); + OS << B->getName() << '(' << B << ')'; + OS << '\n'; + } + return OS; + } + + inline bool is_constant(GepNode *N) { + return isa<ConstantInt>(N->Idx); + } +} + + +void HexagonCommonGEP::separateChainForNode(GepNode *Node, Use *U, + NodeToValueMap &Loc) { + User *R = U->getUser(); + DEBUG(dbgs() << "Separating chain for node (" << Node << ") user: " + << *R << '\n'); + BasicBlock *PB = cast<Instruction>(R)->getParent(); + + GepNode *N = Node; + GepNode *C = 0, *NewNode = 0; + while (is_constant(N) && !(N->Flags & GepNode::Root)) { + // XXX if (single-use) dont-replicate; + GepNode *NewN = new (*Mem) GepNode(N); + Nodes.push_back(NewN); + Loc[NewN] = PB; + + if (N == Node) + NewNode = NewN; + NewN->Flags &= ~GepNode::Used; + if (C) + C->Parent = NewN; + C = NewN; + N = N->Parent; + } + if (!NewNode) + return; + + // Move over all uses that share the same user as U from Node to NewNode. + NodeToUsesMap::iterator UF = Uses.find(Node); + assert(UF != Uses.end()); + UseSet &Us = UF->second; + UseSet NewUs; + for (UseSet::iterator I = Us.begin(); I != Us.end(); ) { + User *S = (*I)->getUser(); + UseSet::iterator Nx = std::next(I); + if (S == R) { + NewUs.insert(*I); + Us.erase(I); + } + I = Nx; + } + if (Us.empty()) { + Node->Flags &= ~GepNode::Used; + Uses.erase(UF); + } + + // Should at least have U in NewUs. + NewNode->Flags |= GepNode::Used; + DEBUG(dbgs() << "new node: " << NewNode << " " << *NewNode << '\n'); + assert(!NewUs.empty()); + Uses[NewNode] = NewUs; +} + + +void HexagonCommonGEP::separateConstantChains(GepNode *Node, + NodeChildrenMap &NCM, NodeToValueMap &Loc) { + // First approximation: extract all chains. + NodeSet Ns; + nodes_for_root(Node, NCM, Ns); + + DEBUG(dbgs() << "Separating constant chains for node: " << Node << '\n'); + // Collect all used nodes together with the uses from loads and stores, + // where the GEP node could be folded into the load/store instruction. + NodeToUsesMap FNs; // Foldable nodes. + for (NodeSet::iterator I = Ns.begin(), E = Ns.end(); I != E; ++I) { + GepNode *N = *I; + if (!(N->Flags & GepNode::Used)) + continue; + NodeToUsesMap::iterator UF = Uses.find(N); + assert(UF != Uses.end()); + UseSet &Us = UF->second; + // Loads/stores that use the node N. + UseSet LSs; + for (UseSet::iterator J = Us.begin(), F = Us.end(); J != F; ++J) { + Use *U = *J; + User *R = U->getUser(); + // We're interested in uses that provide the address. It can happen + // that the value may also be provided via GEP, but we won't handle + // those cases here for now. + if (LoadInst *Ld = dyn_cast<LoadInst>(R)) { + unsigned PtrX = LoadInst::getPointerOperandIndex(); + if (&Ld->getOperandUse(PtrX) == U) + LSs.insert(U); + } else if (StoreInst *St = dyn_cast<StoreInst>(R)) { + unsigned PtrX = StoreInst::getPointerOperandIndex(); + if (&St->getOperandUse(PtrX) == U) + LSs.insert(U); + } + } + // Even if the total use count is 1, separating the chain may still be + // beneficial, since the constant chain may be longer than the GEP alone + // would be (e.g. if the parent node has a constant index and also has + // other children). + if (!LSs.empty()) + FNs.insert(std::make_pair(N, LSs)); + } + + DEBUG(dbgs() << "Nodes with foldable users:\n" << FNs); + + for (NodeToUsesMap::iterator I = FNs.begin(), E = FNs.end(); I != E; ++I) { + GepNode *N = I->first; + UseSet &Us = I->second; + for (UseSet::iterator J = Us.begin(), F = Us.end(); J != F; ++J) + separateChainForNode(N, *J, Loc); + } +} + + +void HexagonCommonGEP::computeNodePlacement(NodeToValueMap &Loc) { + // Compute the inverse of the Node.Parent links. Also, collect the set + // of root nodes. + NodeChildrenMap NCM; + NodeVect Roots; + invert_find_roots(Nodes, NCM, Roots); + + // Compute the initial placement determined by the users' locations, and + // the locations of the child nodes. + for (NodeVect::iterator I = Roots.begin(), E = Roots.end(); I != E; ++I) + recalculatePlacementRec(*I, NCM, Loc); + + DEBUG(dbgs() << "Initial node placement:\n" << LocationAsBlock(Loc)); + + if (OptEnableInv) { + for (NodeVect::iterator I = Roots.begin(), E = Roots.end(); I != E; ++I) + adjustForInvariance(*I, NCM, Loc); + + DEBUG(dbgs() << "Node placement after adjustment for invariance:\n" + << LocationAsBlock(Loc)); + } + if (OptEnableConst) { + for (NodeVect::iterator I = Roots.begin(), E = Roots.end(); I != E; ++I) + separateConstantChains(*I, NCM, Loc); + } + DEBUG(dbgs() << "Node use information:\n" << Uses); + + // At the moment, there is no further refinement of the initial placement. + // Such a refinement could include splitting the nodes if they are placed + // too far from some of its users. + + DEBUG(dbgs() << "Final node placement:\n" << LocationAsBlock(Loc)); +} + + +Value *HexagonCommonGEP::fabricateGEP(NodeVect &NA, BasicBlock::iterator At, + BasicBlock *LocB) { + DEBUG(dbgs() << "Fabricating GEP in " << LocB->getName() + << " for nodes:\n" << NA); + unsigned Num = NA.size(); + GepNode *RN = NA[0]; + assert((RN->Flags & GepNode::Root) && "Creating GEP for non-root"); + + Value *NewInst = 0; + Value *Input = RN->BaseVal; + Value **IdxList = new Value*[Num+1]; + unsigned nax = 0; + do { + unsigned IdxC = 0; + // If the type of the input of the first node is not a pointer, + // we need to add an artificial i32 0 to the indices (because the + // actual input in the IR will be a pointer). + if (!NA[nax]->PTy->isPointerTy()) { + Type *Int32Ty = Type::getInt32Ty(*Ctx); + IdxList[IdxC++] = ConstantInt::get(Int32Ty, 0); + } + + // Keep adding indices from NA until we have to stop and generate + // an "intermediate" GEP. + while (++nax <= Num) { + GepNode *N = NA[nax-1]; + IdxList[IdxC++] = N->Idx; + if (nax < Num) { + // We have to stop, if the expected type of the output of this node + // is not the same as the input type of the next node. + Type *NextTy = next_type(N->PTy, N->Idx); + if (NextTy != NA[nax]->PTy) + break; + } + } + ArrayRef<Value*> A(IdxList, IdxC); + Type *InpTy = Input->getType(); + Type *ElTy = cast<PointerType>(InpTy->getScalarType())->getElementType(); + NewInst = GetElementPtrInst::Create(ElTy, Input, A, "cgep", At); + DEBUG(dbgs() << "new GEP: " << *NewInst << '\n'); + Input = NewInst; + } while (nax <= Num); + + delete[] IdxList; + return NewInst; +} + + +void HexagonCommonGEP::getAllUsersForNode(GepNode *Node, ValueVect &Values, + NodeChildrenMap &NCM) { + NodeVect Work; + Work.push_back(Node); + + while (!Work.empty()) { + NodeVect::iterator First = Work.begin(); + GepNode *N = *First; + Work.erase(First); + if (N->Flags & GepNode::Used) { + NodeToUsesMap::iterator UF = Uses.find(N); + assert(UF != Uses.end() && "No use information for used node"); + UseSet &Us = UF->second; + for (UseSet::iterator I = Us.begin(), E = Us.end(); I != E; ++I) + Values.push_back((*I)->getUser()); + } + NodeChildrenMap::iterator CF = NCM.find(N); + if (CF != NCM.end()) { + NodeVect &Cs = CF->second; + Work.insert(Work.end(), Cs.begin(), Cs.end()); + } + } +} + + +void HexagonCommonGEP::materialize(NodeToValueMap &Loc) { + DEBUG(dbgs() << "Nodes before materialization:\n" << Nodes << '\n'); + NodeChildrenMap NCM; + NodeVect Roots; + // Compute the inversion again, since computing placement could alter + // "parent" relation between nodes. + invert_find_roots(Nodes, NCM, Roots); + + while (!Roots.empty()) { + NodeVect::iterator First = Roots.begin(); + GepNode *Root = *First, *Last = *First; + Roots.erase(First); + + NodeVect NA; // Nodes to assemble. + // Append to NA all child nodes up to (and including) the first child + // that: + // (1) has more than 1 child, or + // (2) is used, or + // (3) has a child located in a different block. + bool LastUsed = false; + unsigned LastCN = 0; + // The location may be null if the computation failed (it can legitimately + // happen for nodes created from dead GEPs). + Value *LocV = Loc[Last]; + if (!LocV) + continue; + BasicBlock *LastB = cast<BasicBlock>(LocV); + do { + NA.push_back(Last); + LastUsed = (Last->Flags & GepNode::Used); + if (LastUsed) + break; + NodeChildrenMap::iterator CF = NCM.find(Last); + LastCN = (CF != NCM.end()) ? CF->second.size() : 0; + if (LastCN != 1) + break; + GepNode *Child = CF->second.front(); + BasicBlock *ChildB = cast_or_null<BasicBlock>(Loc[Child]); + if (ChildB != 0 && LastB != ChildB) + break; + Last = Child; + } while (true); + + BasicBlock::iterator InsertAt = LastB->getTerminator(); + if (LastUsed || LastCN > 0) { + ValueVect Urs; + getAllUsersForNode(Root, Urs, NCM); + BasicBlock::iterator FirstUse = first_use_of_in_block(Urs, LastB); + if (FirstUse != LastB->end()) + InsertAt = FirstUse; + } + + // Generate a new instruction for NA. + Value *NewInst = fabricateGEP(NA, InsertAt, LastB); + + // Convert all the children of Last node into roots, and append them + // to the Roots list. + if (LastCN > 0) { + NodeVect &Cs = NCM[Last]; + for (NodeVect::iterator I = Cs.begin(), E = Cs.end(); I != E; ++I) { + GepNode *CN = *I; + CN->Flags &= ~GepNode::Internal; + CN->Flags |= GepNode::Root; + CN->BaseVal = NewInst; + Roots.push_back(CN); + } + } + + // Lastly, if the Last node was used, replace all uses with the new GEP. + // The uses reference the original GEP values. + if (LastUsed) { + NodeToUsesMap::iterator UF = Uses.find(Last); + assert(UF != Uses.end() && "No use information found"); + UseSet &Us = UF->second; + for (UseSet::iterator I = Us.begin(), E = Us.end(); I != E; ++I) { + Use *U = *I; + U->set(NewInst); + } + } + } +} + + +void HexagonCommonGEP::removeDeadCode() { + ValueVect BO; + BO.push_back(&Fn->front()); + + for (unsigned i = 0; i < BO.size(); ++i) { + BasicBlock *B = cast<BasicBlock>(BO[i]); + DomTreeNode *N = DT->getNode(B); + typedef GraphTraits<DomTreeNode*> GTN; + typedef GTN::ChildIteratorType Iter; + for (Iter I = GTN::child_begin(N), E = GTN::child_end(N); I != E; ++I) + BO.push_back((*I)->getBlock()); + } + + for (unsigned i = BO.size(); i > 0; --i) { + BasicBlock *B = cast<BasicBlock>(BO[i-1]); + BasicBlock::InstListType &IL = B->getInstList(); + typedef BasicBlock::InstListType::reverse_iterator reverse_iterator; + ValueVect Ins; + for (reverse_iterator I = IL.rbegin(), E = IL.rend(); I != E; ++I) + Ins.push_back(&*I); + for (ValueVect::iterator I = Ins.begin(), E = Ins.end(); I != E; ++I) { + Instruction *In = cast<Instruction>(*I); + if (isInstructionTriviallyDead(In)) + In->eraseFromParent(); + } + } +} + + +bool HexagonCommonGEP::runOnFunction(Function &F) { + // For now bail out on C++ exception handling. + for (Function::iterator A = F.begin(), Z = F.end(); A != Z; ++A) + for (BasicBlock::iterator I = A->begin(), E = A->end(); I != E; ++I) + if (isa<InvokeInst>(I) || isa<LandingPadInst>(I)) + return false; + + Fn = &F; + DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); + PDT = &getAnalysis<PostDominatorTree>(); + LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); + Ctx = &F.getContext(); + + Nodes.clear(); + Uses.clear(); + NodeOrder.clear(); + + SpecificBumpPtrAllocator<GepNode> Allocator; + Mem = &Allocator; + + collect(); + common(); + + NodeToValueMap Loc; + computeNodePlacement(Loc); + materialize(Loc); + removeDeadCode(); + +#ifdef XDEBUG + // Run this only when expensive checks are enabled. + verifyFunction(F); +#endif + return true; +} + + +namespace llvm { + FunctionPass *createHexagonCommonGEP() { + return new HexagonCommonGEP(); + } +} diff --git a/lib/Target/Hexagon/HexagonExpandCondsets.cpp b/lib/Target/Hexagon/HexagonExpandCondsets.cpp index 37ed173a79cd..ce10aeadef94 100644 --- a/lib/Target/Hexagon/HexagonExpandCondsets.cpp +++ b/lib/Target/Hexagon/HexagonExpandCondsets.cpp @@ -1,3 +1,12 @@ +//===--- HexagonExpandCondsets.cpp ----------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + // Replace mux instructions with the corresponding legal instructions. // It is meant to work post-SSA, but still on virtual registers. It was // originally placed between register coalescing and machine instruction diff --git a/lib/Target/Hexagon/HexagonFrameLowering.cpp b/lib/Target/Hexagon/HexagonFrameLowering.cpp index 868f87e18413..29283c81877e 100644 --- a/lib/Target/Hexagon/HexagonFrameLowering.cpp +++ b/lib/Target/Hexagon/HexagonFrameLowering.cpp @@ -864,13 +864,13 @@ static bool needToReserveScavengingSpillSlots(MachineFunction &MF, // Check for an unused caller-saved register. for ( ; *CallerSavedRegs; ++CallerSavedRegs) { MCPhysReg FreeReg = *CallerSavedRegs; - if (MRI.isPhysRegUsed(FreeReg)) + if (!MRI.reg_nodbg_empty(FreeReg)) continue; // Check aliased register usage. bool IsCurrentRegUsed = false; for (MCRegAliasIterator AI(FreeReg, &HRI, false); AI.isValid(); ++AI) - if (MRI.isPhysRegUsed(*AI)) { + if (!MRI.reg_nodbg_empty(*AI)) { IsCurrentRegUsed = true; break; } @@ -959,8 +959,11 @@ bool HexagonFrameLowering::replacePredRegPseudoSpillCode(MachineFunction &MF) } -void HexagonFrameLowering::processFunctionBeforeCalleeSavedScan( - MachineFunction &MF, RegScavenger* RS) const { +void HexagonFrameLowering::determineCalleeSaves(MachineFunction &MF, + BitVector &SavedRegs, + RegScavenger *RS) const { + TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); + auto &HST = static_cast<const HexagonSubtarget&>(MF.getSubtarget()); auto &HRI = *HST.getRegisterInfo(); @@ -969,11 +972,9 @@ void HexagonFrameLowering::processFunctionBeforeCalleeSavedScan( // If we have a function containing __builtin_eh_return we want to spill and // restore all callee saved registers. Pretend that they are used. if (HasEHReturn) { - MachineRegisterInfo &MRI = MF.getRegInfo(); for (const MCPhysReg *CSRegs = HRI.getCalleeSavedRegs(&MF); *CSRegs; ++CSRegs) - if (!MRI.isPhysRegUsed(*CSRegs)) - MRI.setPhysRegUsed(*CSRegs); + SavedRegs.set(*CSRegs); } const TargetRegisterClass &RC = Hexagon::IntRegsRegClass; diff --git a/lib/Target/Hexagon/HexagonFrameLowering.h b/lib/Target/Hexagon/HexagonFrameLowering.h index 89500cb85724..d39ee2c77195 100644 --- a/lib/Target/Hexagon/HexagonFrameLowering.h +++ b/lib/Target/Hexagon/HexagonFrameLowering.h @@ -45,7 +45,7 @@ public: MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const override; void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS = nullptr) const override; - void processFunctionBeforeCalleeSavedScan(MachineFunction &MF, + void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS) const override; bool targetHandlesStackFrameRounding() const override { diff --git a/lib/Target/Hexagon/HexagonGenExtract.cpp b/lib/Target/Hexagon/HexagonGenExtract.cpp new file mode 100644 index 000000000000..4d32208bd5aa --- /dev/null +++ b/lib/Target/Hexagon/HexagonGenExtract.cpp @@ -0,0 +1,259 @@ +//===--- HexagonGenExtract.cpp --------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "llvm/ADT/STLExtras.h" +#include "llvm/CodeGen/MachineFunctionAnalysis.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/PatternMatch.h" +#include "llvm/Pass.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/raw_ostream.h" + +using namespace llvm; + +static cl::opt<unsigned> ExtractCutoff("extract-cutoff", cl::init(~0U), + cl::Hidden, cl::desc("Cutoff for generating \"extract\"" + " instructions")); + +// This prevents generating extract instructions that have the offset of 0. +// One of the reasons for "extract" is to put a sequence of bits in a regis- +// ter, starting at offset 0 (so that these bits can then be used by an +// "insert"). If the bits are already at offset 0, it is better not to gene- +// rate "extract", since logical bit operations can be merged into compound +// instructions (as opposed to "extract"). +static cl::opt<bool> NoSR0("extract-nosr0", cl::init(true), cl::Hidden, + cl::desc("No extract instruction with offset 0")); + +static cl::opt<bool> NeedAnd("extract-needand", cl::init(true), cl::Hidden, + cl::desc("Require & in extract patterns")); + +namespace llvm { + void initializeHexagonGenExtractPass(PassRegistry&); + FunctionPass *createHexagonGenExtract(); +} + + +namespace { + class HexagonGenExtract : public FunctionPass { + public: + static char ID; + HexagonGenExtract() : FunctionPass(ID), ExtractCount(0) { + initializeHexagonGenExtractPass(*PassRegistry::getPassRegistry()); + } + virtual const char *getPassName() const override { + return "Hexagon generate \"extract\" instructions"; + } + virtual bool runOnFunction(Function &F) override; + virtual void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.addRequired<DominatorTreeWrapperPass>(); + AU.addPreserved<DominatorTreeWrapperPass>(); + AU.addPreserved<MachineFunctionAnalysis>(); + FunctionPass::getAnalysisUsage(AU); + } + private: + bool visitBlock(BasicBlock *B); + bool convert(Instruction *In); + + unsigned ExtractCount; + DominatorTree *DT; + }; + + char HexagonGenExtract::ID = 0; +} + +INITIALIZE_PASS_BEGIN(HexagonGenExtract, "hextract", "Hexagon generate " + "\"extract\" instructions", false, false) +INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) +INITIALIZE_PASS_END(HexagonGenExtract, "hextract", "Hexagon generate " + "\"extract\" instructions", false, false) + + +bool HexagonGenExtract::convert(Instruction *In) { + using namespace PatternMatch; + Value *BF = 0; + ConstantInt *CSL = 0, *CSR = 0, *CM = 0; + BasicBlock *BB = In->getParent(); + LLVMContext &Ctx = BB->getContext(); + bool LogicalSR; + + // (and (shl (lshr x, #sr), #sl), #m) + LogicalSR = true; + bool Match = match(In, m_And(m_Shl(m_LShr(m_Value(BF), m_ConstantInt(CSR)), + m_ConstantInt(CSL)), + m_ConstantInt(CM))); + + if (!Match) { + // (and (shl (ashr x, #sr), #sl), #m) + LogicalSR = false; + Match = match(In, m_And(m_Shl(m_AShr(m_Value(BF), m_ConstantInt(CSR)), + m_ConstantInt(CSL)), + m_ConstantInt(CM))); + } + if (!Match) { + // (and (shl x, #sl), #m) + LogicalSR = true; + CSR = ConstantInt::get(Type::getInt32Ty(Ctx), 0); + Match = match(In, m_And(m_Shl(m_Value(BF), m_ConstantInt(CSL)), + m_ConstantInt(CM))); + if (Match && NoSR0) + return false; + } + if (!Match) { + // (and (lshr x, #sr), #m) + LogicalSR = true; + CSL = ConstantInt::get(Type::getInt32Ty(Ctx), 0); + Match = match(In, m_And(m_LShr(m_Value(BF), m_ConstantInt(CSR)), + m_ConstantInt(CM))); + } + if (!Match) { + // (and (ashr x, #sr), #m) + LogicalSR = false; + CSL = ConstantInt::get(Type::getInt32Ty(Ctx), 0); + Match = match(In, m_And(m_AShr(m_Value(BF), m_ConstantInt(CSR)), + m_ConstantInt(CM))); + } + if (!Match) { + CM = 0; + // (shl (lshr x, #sr), #sl) + LogicalSR = true; + Match = match(In, m_Shl(m_LShr(m_Value(BF), m_ConstantInt(CSR)), + m_ConstantInt(CSL))); + } + if (!Match) { + CM = 0; + // (shl (ashr x, #sr), #sl) + LogicalSR = false; + Match = match(In, m_Shl(m_AShr(m_Value(BF), m_ConstantInt(CSR)), + m_ConstantInt(CSL))); + } + if (!Match) + return false; + + Type *Ty = BF->getType(); + if (!Ty->isIntegerTy()) + return false; + unsigned BW = Ty->getPrimitiveSizeInBits(); + if (BW != 32 && BW != 64) + return false; + + uint32_t SR = CSR->getZExtValue(); + uint32_t SL = CSL->getZExtValue(); + + if (!CM) { + // If there was no and, and the shift left did not remove all potential + // sign bits created by the shift right, then extractu cannot reproduce + // this value. + if (!LogicalSR && (SR > SL)) + return false; + APInt A = APInt(BW, ~0ULL).lshr(SR).shl(SL); + CM = ConstantInt::get(Ctx, A); + } + + // CM is the shifted-left mask. Shift it back right to remove the zero + // bits on least-significant positions. + APInt M = CM->getValue().lshr(SL); + uint32_t T = M.countTrailingOnes(); + + // During the shifts some of the bits will be lost. Calculate how many + // of the original value will remain after shift right and then left. + uint32_t U = BW - std::max(SL, SR); + // The width of the extracted field is the minimum of the original bits + // that remain after the shifts and the number of contiguous 1s in the mask. + uint32_t W = std::min(U, T); + if (W == 0) + return false; + + // Check if the extracted bits are contained within the mask that it is + // and-ed with. The extract operation will copy these bits, and so the + // mask cannot any holes in it that would clear any of the bits of the + // extracted field. + if (!LogicalSR) { + // If the shift right was arithmetic, it could have included some 1 bits. + // It is still ok to generate extract, but only if the mask eliminates + // those bits (i.e. M does not have any bits set beyond U). + APInt C = APInt::getHighBitsSet(BW, BW-U); + if (M.intersects(C) || !APIntOps::isMask(W, M)) + return false; + } else { + // Check if M starts with a contiguous sequence of W times 1 bits. Get + // the low U bits of M (which eliminates the 0 bits shifted in on the + // left), and check if the result is APInt's "mask": + if (!APIntOps::isMask(W, M.getLoBits(U))) + return false; + } + + IRBuilder<> IRB(BB, In); + Intrinsic::ID IntId = (BW == 32) ? Intrinsic::hexagon_S2_extractu + : Intrinsic::hexagon_S2_extractup; + Module *Mod = BB->getParent()->getParent(); + Value *ExtF = Intrinsic::getDeclaration(Mod, IntId); + Value *NewIn = IRB.CreateCall(ExtF, {BF, IRB.getInt32(W), IRB.getInt32(SR)}); + if (SL != 0) + NewIn = IRB.CreateShl(NewIn, SL, CSL->getName()); + In->replaceAllUsesWith(NewIn); + return true; +} + + +bool HexagonGenExtract::visitBlock(BasicBlock *B) { + // Depth-first, bottom-up traversal. + DomTreeNode *DTN = DT->getNode(B); + typedef GraphTraits<DomTreeNode*> GTN; + typedef GTN::ChildIteratorType Iter; + for (Iter I = GTN::child_begin(DTN), E = GTN::child_end(DTN); I != E; ++I) + visitBlock((*I)->getBlock()); + + // Allow limiting the number of generated extracts for debugging purposes. + bool HasCutoff = ExtractCutoff.getPosition(); + unsigned Cutoff = ExtractCutoff; + + bool Changed = false; + BasicBlock::iterator I = std::prev(B->end()), NextI, Begin = B->begin(); + while (true) { + if (HasCutoff && (ExtractCount >= Cutoff)) + return Changed; + bool Last = (I == Begin); + if (!Last) + NextI = std::prev(I); + Instruction *In = &*I; + bool Done = convert(In); + if (HasCutoff && Done) + ExtractCount++; + Changed |= Done; + if (Last) + break; + I = NextI; + } + return Changed; +} + + +bool HexagonGenExtract::runOnFunction(Function &F) { + DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); + bool Changed; + + // Traverse the function bottom-up, to see super-expressions before their + // sub-expressions. + BasicBlock *Entry = GraphTraits<Function*>::getEntryNode(&F); + Changed = visitBlock(Entry); + + return Changed; +} + + +FunctionPass *llvm::createHexagonGenExtract() { + return new HexagonGenExtract(); +} diff --git a/lib/Target/Hexagon/HexagonGenInsert.cpp b/lib/Target/Hexagon/HexagonGenInsert.cpp new file mode 100644 index 000000000000..096da949e77b --- /dev/null +++ b/lib/Target/Hexagon/HexagonGenInsert.cpp @@ -0,0 +1,1598 @@ +//===--- HexagonGenInsert.cpp ---------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "hexinsert" + +#include "llvm/Pass.h" +#include "llvm/PassRegistry.h" +#include "llvm/ADT/BitVector.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/DenseSet.h" +#include "llvm/ADT/PostOrderIterator.h" +#include "llvm/CodeGen/MachineDominators.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/IR/Constants.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Support/Timer.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetRegisterInfo.h" + +#include "Hexagon.h" +#include "HexagonRegisterInfo.h" +#include "HexagonTargetMachine.h" +#include "HexagonBitTracker.h" + +#include <map> +#include <vector> + +using namespace llvm; + +static cl::opt<unsigned> VRegIndexCutoff("insert-vreg-cutoff", cl::init(~0U), + cl::Hidden, cl::ZeroOrMore, cl::desc("Vreg# cutoff for insert generation.")); +// The distance cutoff is selected based on the precheckin-perf results: +// cutoffs 20, 25, 35, and 40 are worse than 30. +static cl::opt<unsigned> VRegDistCutoff("insert-dist-cutoff", cl::init(30U), + cl::Hidden, cl::ZeroOrMore, cl::desc("Vreg distance cutoff for insert " + "generation.")); + +static cl::opt<bool> OptTiming("insert-timing", cl::init(false), cl::Hidden, + cl::ZeroOrMore, cl::desc("Enable timing of insert generation")); +static cl::opt<bool> OptTimingDetail("insert-timing-detail", cl::init(false), + cl::Hidden, cl::ZeroOrMore, cl::desc("Enable detailed timing of insert " + "generation")); + +static cl::opt<bool> OptSelectAll0("insert-all0", cl::init(false), cl::Hidden, + cl::ZeroOrMore); +static cl::opt<bool> OptSelectHas0("insert-has0", cl::init(false), cl::Hidden, + cl::ZeroOrMore); +// Whether to construct constant values via "insert". Could eliminate constant +// extenders, but often not practical. +static cl::opt<bool> OptConst("insert-const", cl::init(false), cl::Hidden, + cl::ZeroOrMore); + +namespace { + // The preprocessor gets confused when the DEBUG macro is passed larger + // chunks of code. Use this function to detect debugging. + inline bool isDebug() { +#ifndef NDEBUG + return ::llvm::DebugFlag && ::llvm::isCurrentDebugType(DEBUG_TYPE); +#else + return false; +#endif + } +} + + +namespace { + // Set of virtual registers, based on BitVector. + struct RegisterSet : private BitVector { + RegisterSet() : BitVector() {} + explicit RegisterSet(unsigned s, bool t = false) : BitVector(s, t) {} + RegisterSet(const RegisterSet &RS) : BitVector(RS) {} + + using BitVector::clear; + + unsigned find_first() const { + int First = BitVector::find_first(); + if (First < 0) + return 0; + return x2v(First); + } + + unsigned find_next(unsigned Prev) const { + int Next = BitVector::find_next(v2x(Prev)); + if (Next < 0) + return 0; + return x2v(Next); + } + + RegisterSet &insert(unsigned R) { + unsigned Idx = v2x(R); + ensure(Idx); + return static_cast<RegisterSet&>(BitVector::set(Idx)); + } + RegisterSet &remove(unsigned R) { + unsigned Idx = v2x(R); + if (Idx >= size()) + return *this; + return static_cast<RegisterSet&>(BitVector::reset(Idx)); + } + + RegisterSet &insert(const RegisterSet &Rs) { + return static_cast<RegisterSet&>(BitVector::operator|=(Rs)); + } + RegisterSet &remove(const RegisterSet &Rs) { + return static_cast<RegisterSet&>(BitVector::reset(Rs)); + } + + reference operator[](unsigned R) { + unsigned Idx = v2x(R); + ensure(Idx); + return BitVector::operator[](Idx); + } + bool operator[](unsigned R) const { + unsigned Idx = v2x(R); + assert(Idx < size()); + return BitVector::operator[](Idx); + } + bool has(unsigned R) const { + unsigned Idx = v2x(R); + if (Idx >= size()) + return false; + return BitVector::test(Idx); + } + + bool empty() const { + return !BitVector::any(); + } + bool includes(const RegisterSet &Rs) const { + // A.BitVector::test(B) <=> A-B != {} + return !Rs.BitVector::test(*this); + } + bool intersects(const RegisterSet &Rs) const { + return BitVector::anyCommon(Rs); + } + + private: + void ensure(unsigned Idx) { + if (size() <= Idx) + resize(std::max(Idx+1, 32U)); + } + static inline unsigned v2x(unsigned v) { + return TargetRegisterInfo::virtReg2Index(v); + } + static inline unsigned x2v(unsigned x) { + return TargetRegisterInfo::index2VirtReg(x); + } + }; + + + struct PrintRegSet { + PrintRegSet(const RegisterSet &S, const TargetRegisterInfo *RI) + : RS(S), TRI(RI) {} + friend raw_ostream &operator<< (raw_ostream &OS, + const PrintRegSet &P); + private: + const RegisterSet &RS; + const TargetRegisterInfo *TRI; + }; + + raw_ostream &operator<< (raw_ostream &OS, const PrintRegSet &P) { + OS << '{'; + for (unsigned R = P.RS.find_first(); R; R = P.RS.find_next(R)) + OS << ' ' << PrintReg(R, P.TRI); + OS << " }"; + return OS; + } +} + + +namespace { + // A convenience class to associate unsigned numbers (such as virtual + // registers) with unsigned numbers. + struct UnsignedMap : public DenseMap<unsigned,unsigned> { + UnsignedMap() : BaseType() {} + private: + typedef DenseMap<unsigned,unsigned> BaseType; + }; + + // A utility to establish an ordering between virtual registers: + // VRegA < VRegB <=> RegisterOrdering[VRegA] < RegisterOrdering[VRegB] + // This is meant as a cache for the ordering of virtual registers defined + // by a potentially expensive comparison function, or obtained by a proce- + // dure that should not be repeated each time two registers are compared. + struct RegisterOrdering : public UnsignedMap { + RegisterOrdering() : UnsignedMap() {} + unsigned operator[](unsigned VR) const { + const_iterator F = find(VR); + assert(F != end()); + return F->second; + } + // Add operator(), so that objects of this class can be used as + // comparators in std::sort et al. + bool operator() (unsigned VR1, unsigned VR2) const { + return operator[](VR1) < operator[](VR2); + } + }; +} + + +namespace { + // Ordering of bit values. This class does not have operator[], but + // is supplies a comparison operator() for use in std:: algorithms. + // The order is as follows: + // - 0 < 1 < ref + // - ref1 < ref2, if ord(ref1.Reg) < ord(ref2.Reg), + // or ord(ref1.Reg) == ord(ref2.Reg), and ref1.Pos < ref2.Pos. + struct BitValueOrdering { + BitValueOrdering(const RegisterOrdering &RB) : BaseOrd(RB) {} + bool operator() (const BitTracker::BitValue &V1, + const BitTracker::BitValue &V2) const; + const RegisterOrdering &BaseOrd; + }; +} + + +bool BitValueOrdering::operator() (const BitTracker::BitValue &V1, + const BitTracker::BitValue &V2) const { + if (V1 == V2) + return false; + // V1==0 => true, V2==0 => false + if (V1.is(0) || V2.is(0)) + return V1.is(0); + // Neither of V1,V2 is 0, and V1!=V2. + // V2==1 => false, V1==1 => true + if (V2.is(1) || V1.is(1)) + return !V2.is(1); + // Both V1,V2 are refs. + unsigned Ind1 = BaseOrd[V1.RefI.Reg], Ind2 = BaseOrd[V2.RefI.Reg]; + if (Ind1 != Ind2) + return Ind1 < Ind2; + // If V1.Pos==V2.Pos + assert(V1.RefI.Pos != V2.RefI.Pos && "Bit values should be different"); + return V1.RefI.Pos < V2.RefI.Pos; +} + + +namespace { + // Cache for the BitTracker's cell map. Map lookup has a logarithmic + // complexity, this class will memoize the lookup results to reduce + // the access time for repeated lookups of the same cell. + struct CellMapShadow { + CellMapShadow(const BitTracker &T) : BT(T) {} + const BitTracker::RegisterCell &lookup(unsigned VR) { + unsigned RInd = TargetRegisterInfo::virtReg2Index(VR); + // Grow the vector to at least 32 elements. + if (RInd >= CVect.size()) + CVect.resize(std::max(RInd+16, 32U), 0); + const BitTracker::RegisterCell *CP = CVect[RInd]; + if (CP == 0) + CP = CVect[RInd] = &BT.lookup(VR); + return *CP; + } + + const BitTracker &BT; + + private: + typedef std::vector<const BitTracker::RegisterCell*> CellVectType; + CellVectType CVect; + }; +} + + +namespace { + // Comparator class for lexicographic ordering of virtual registers + // according to the corresponding BitTracker::RegisterCell objects. + struct RegisterCellLexCompare { + RegisterCellLexCompare(const BitValueOrdering &BO, CellMapShadow &M) + : BitOrd(BO), CM(M) {} + bool operator() (unsigned VR1, unsigned VR2) const; + private: + const BitValueOrdering &BitOrd; + CellMapShadow &CM; + }; + + // Comparator class for lexicographic ordering of virtual registers + // according to the specified bits of the corresponding BitTracker:: + // RegisterCell objects. + // Specifically, this class will be used to compare bit B of a register + // cell for a selected virtual register R with bit N of any register + // other than R. + struct RegisterCellBitCompareSel { + RegisterCellBitCompareSel(unsigned R, unsigned B, unsigned N, + const BitValueOrdering &BO, CellMapShadow &M) + : SelR(R), SelB(B), BitN(N), BitOrd(BO), CM(M) {} + bool operator() (unsigned VR1, unsigned VR2) const; + private: + const unsigned SelR, SelB; + const unsigned BitN; + const BitValueOrdering &BitOrd; + CellMapShadow &CM; + }; +} + + +bool RegisterCellLexCompare::operator() (unsigned VR1, unsigned VR2) const { + // Ordering of registers, made up from two given orderings: + // - the ordering of the register numbers, and + // - the ordering of register cells. + // Def. R1 < R2 if: + // - cell(R1) < cell(R2), or + // - cell(R1) == cell(R2), and index(R1) < index(R2). + // + // For register cells, the ordering is lexicographic, with index 0 being + // the most significant. + if (VR1 == VR2) + return false; + + const BitTracker::RegisterCell &RC1 = CM.lookup(VR1), &RC2 = CM.lookup(VR2); + uint16_t W1 = RC1.width(), W2 = RC2.width(); + for (uint16_t i = 0, w = std::min(W1, W2); i < w; ++i) { + const BitTracker::BitValue &V1 = RC1[i], &V2 = RC2[i]; + if (V1 != V2) + return BitOrd(V1, V2); + } + // Cells are equal up until the common length. + if (W1 != W2) + return W1 < W2; + + return BitOrd.BaseOrd[VR1] < BitOrd.BaseOrd[VR2]; +} + + +bool RegisterCellBitCompareSel::operator() (unsigned VR1, unsigned VR2) const { + if (VR1 == VR2) + return false; + const BitTracker::RegisterCell &RC1 = CM.lookup(VR1); + const BitTracker::RegisterCell &RC2 = CM.lookup(VR2); + uint16_t W1 = RC1.width(), W2 = RC2.width(); + uint16_t Bit1 = (VR1 == SelR) ? SelB : BitN; + uint16_t Bit2 = (VR2 == SelR) ? SelB : BitN; + // If Bit1 exceeds the width of VR1, then: + // - return false, if at the same time Bit2 exceeds VR2, or + // - return true, otherwise. + // (I.e. "a bit value that does not exist is less than any bit value + // that does exist".) + if (W1 <= Bit1) + return Bit2 < W2; + // If Bit1 is within VR1, but Bit2 is not within VR2, return false. + if (W2 <= Bit2) + return false; + + const BitTracker::BitValue &V1 = RC1[Bit1], V2 = RC2[Bit2]; + if (V1 != V2) + return BitOrd(V1, V2); + return false; +} + + +namespace { + class OrderedRegisterList { + typedef std::vector<unsigned> ListType; + public: + OrderedRegisterList(const RegisterOrdering &RO) : Ord(RO) {} + void insert(unsigned VR); + void remove(unsigned VR); + unsigned operator[](unsigned Idx) const { + assert(Idx < Seq.size()); + return Seq[Idx]; + } + unsigned size() const { + return Seq.size(); + } + + typedef ListType::iterator iterator; + typedef ListType::const_iterator const_iterator; + iterator begin() { return Seq.begin(); } + iterator end() { return Seq.end(); } + const_iterator begin() const { return Seq.begin(); } + const_iterator end() const { return Seq.end(); } + + // Convenience function to convert an iterator to the corresponding index. + unsigned idx(iterator It) const { return It-begin(); } + private: + ListType Seq; + const RegisterOrdering &Ord; + }; + + + struct PrintORL { + PrintORL(const OrderedRegisterList &L, const TargetRegisterInfo *RI) + : RL(L), TRI(RI) {} + friend raw_ostream &operator<< (raw_ostream &OS, const PrintORL &P); + private: + const OrderedRegisterList &RL; + const TargetRegisterInfo *TRI; + }; + + raw_ostream &operator<< (raw_ostream &OS, const PrintORL &P) { + OS << '('; + OrderedRegisterList::const_iterator B = P.RL.begin(), E = P.RL.end(); + for (OrderedRegisterList::const_iterator I = B; I != E; ++I) { + if (I != B) + OS << ", "; + OS << PrintReg(*I, P.TRI); + } + OS << ')'; + return OS; + } +} + + +void OrderedRegisterList::insert(unsigned VR) { + iterator L = std::lower_bound(Seq.begin(), Seq.end(), VR, Ord); + if (L == Seq.end()) + Seq.push_back(VR); + else + Seq.insert(L, VR); +} + + +void OrderedRegisterList::remove(unsigned VR) { + iterator L = std::lower_bound(Seq.begin(), Seq.end(), VR, Ord); + assert(L != Seq.end()); + Seq.erase(L); +} + + +namespace { + // A record of the insert form. The fields correspond to the operands + // of the "insert" instruction: + // ... = insert(SrcR, InsR, #Wdh, #Off) + struct IFRecord { + IFRecord(unsigned SR = 0, unsigned IR = 0, uint16_t W = 0, uint16_t O = 0) + : SrcR(SR), InsR(IR), Wdh(W), Off(O) {} + unsigned SrcR, InsR; + uint16_t Wdh, Off; + }; + + struct PrintIFR { + PrintIFR(const IFRecord &R, const TargetRegisterInfo *RI) + : IFR(R), TRI(RI) {} + private: + const IFRecord &IFR; + const TargetRegisterInfo *TRI; + friend raw_ostream &operator<< (raw_ostream &OS, const PrintIFR &P); + }; + + raw_ostream &operator<< (raw_ostream &OS, const PrintIFR &P) { + unsigned SrcR = P.IFR.SrcR, InsR = P.IFR.InsR; + OS << '(' << PrintReg(SrcR, P.TRI) << ',' << PrintReg(InsR, P.TRI) + << ",#" << P.IFR.Wdh << ",#" << P.IFR.Off << ')'; + return OS; + } + + typedef std::pair<IFRecord,RegisterSet> IFRecordWithRegSet; +} + + +namespace llvm { + void initializeHexagonGenInsertPass(PassRegistry&); + FunctionPass *createHexagonGenInsert(); +} + + +namespace { + class HexagonGenInsert : public MachineFunctionPass { + public: + static char ID; + HexagonGenInsert() : MachineFunctionPass(ID), HII(0), HRI(0) { + initializeHexagonGenInsertPass(*PassRegistry::getPassRegistry()); + } + virtual const char *getPassName() const { + return "Hexagon generate \"insert\" instructions"; + } + virtual void getAnalysisUsage(AnalysisUsage &AU) const { + AU.addRequired<MachineDominatorTree>(); + AU.addPreserved<MachineDominatorTree>(); + MachineFunctionPass::getAnalysisUsage(AU); + } + virtual bool runOnMachineFunction(MachineFunction &MF); + + private: + typedef DenseMap<std::pair<unsigned,unsigned>,unsigned> PairMapType; + + void buildOrderingMF(RegisterOrdering &RO) const; + void buildOrderingBT(RegisterOrdering &RB, RegisterOrdering &RO) const; + bool isIntClass(const TargetRegisterClass *RC) const; + bool isConstant(unsigned VR) const; + bool isSmallConstant(unsigned VR) const; + bool isValidInsertForm(unsigned DstR, unsigned SrcR, unsigned InsR, + uint16_t L, uint16_t S) const; + bool findSelfReference(unsigned VR) const; + bool findNonSelfReference(unsigned VR) const; + void getInstrDefs(const MachineInstr *MI, RegisterSet &Defs) const; + void getInstrUses(const MachineInstr *MI, RegisterSet &Uses) const; + unsigned distance(const MachineBasicBlock *FromB, + const MachineBasicBlock *ToB, const UnsignedMap &RPO, + PairMapType &M) const; + unsigned distance(MachineBasicBlock::const_iterator FromI, + MachineBasicBlock::const_iterator ToI, const UnsignedMap &RPO, + PairMapType &M) const; + bool findRecordInsertForms(unsigned VR, OrderedRegisterList &AVs); + void collectInBlock(MachineBasicBlock *B, OrderedRegisterList &AVs); + void findRemovableRegisters(unsigned VR, IFRecord IF, + RegisterSet &RMs) const; + void computeRemovableRegisters(); + + void pruneEmptyLists(); + void pruneCoveredSets(unsigned VR); + void pruneUsesTooFar(unsigned VR, const UnsignedMap &RPO, PairMapType &M); + void pruneRegCopies(unsigned VR); + void pruneCandidates(); + void selectCandidates(); + bool generateInserts(); + + bool removeDeadCode(MachineDomTreeNode *N); + + // IFRecord coupled with a set of potentially removable registers: + typedef std::vector<IFRecordWithRegSet> IFListType; + typedef DenseMap<unsigned,IFListType> IFMapType; // vreg -> IFListType + + void dump_map() const; + + const HexagonInstrInfo *HII; + const HexagonRegisterInfo *HRI; + + MachineFunction *MFN; + MachineRegisterInfo *MRI; + MachineDominatorTree *MDT; + CellMapShadow *CMS; + + RegisterOrdering BaseOrd; + RegisterOrdering CellOrd; + IFMapType IFMap; + }; + + char HexagonGenInsert::ID = 0; +} + + +void HexagonGenInsert::dump_map() const { + typedef IFMapType::const_iterator iterator; + for (iterator I = IFMap.begin(), E = IFMap.end(); I != E; ++I) { + dbgs() << " " << PrintReg(I->first, HRI) << ":\n"; + const IFListType &LL = I->second; + for (unsigned i = 0, n = LL.size(); i < n; ++i) + dbgs() << " " << PrintIFR(LL[i].first, HRI) << ", " + << PrintRegSet(LL[i].second, HRI) << '\n'; + } +} + + +void HexagonGenInsert::buildOrderingMF(RegisterOrdering &RO) const { + unsigned Index = 0; + typedef MachineFunction::const_iterator mf_iterator; + for (mf_iterator A = MFN->begin(), Z = MFN->end(); A != Z; ++A) { + const MachineBasicBlock &B = *A; + if (!CMS->BT.reached(&B)) + continue; + typedef MachineBasicBlock::const_iterator mb_iterator; + for (mb_iterator I = B.begin(), E = B.end(); I != E; ++I) { + const MachineInstr *MI = &*I; + for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) { + const MachineOperand &MO = MI->getOperand(i); + if (MO.isReg() && MO.isDef()) { + unsigned R = MO.getReg(); + assert(MO.getSubReg() == 0 && "Unexpected subregister in definition"); + if (TargetRegisterInfo::isVirtualRegister(R)) + RO.insert(std::make_pair(R, Index++)); + } + } + } + } + // Since some virtual registers may have had their def and uses eliminated, + // they are no longer referenced in the code, and so they will not appear + // in the map. +} + + +void HexagonGenInsert::buildOrderingBT(RegisterOrdering &RB, + RegisterOrdering &RO) const { + // Create a vector of all virtual registers (collect them from the base + // ordering RB), and then sort it using the RegisterCell comparator. + BitValueOrdering BVO(RB); + RegisterCellLexCompare LexCmp(BVO, *CMS); + typedef std::vector<unsigned> SortableVectorType; + SortableVectorType VRs; + for (RegisterOrdering::iterator I = RB.begin(), E = RB.end(); I != E; ++I) + VRs.push_back(I->first); + std::sort(VRs.begin(), VRs.end(), LexCmp); + // Transfer the results to the outgoing register ordering. + for (unsigned i = 0, n = VRs.size(); i < n; ++i) + RO.insert(std::make_pair(VRs[i], i)); +} + + +inline bool HexagonGenInsert::isIntClass(const TargetRegisterClass *RC) const { + return RC == &Hexagon::IntRegsRegClass || RC == &Hexagon::DoubleRegsRegClass; +} + + +bool HexagonGenInsert::isConstant(unsigned VR) const { + const BitTracker::RegisterCell &RC = CMS->lookup(VR); + uint16_t W = RC.width(); + for (uint16_t i = 0; i < W; ++i) { + const BitTracker::BitValue &BV = RC[i]; + if (BV.is(0) || BV.is(1)) + continue; + return false; + } + return true; +} + + +bool HexagonGenInsert::isSmallConstant(unsigned VR) const { + const BitTracker::RegisterCell &RC = CMS->lookup(VR); + uint16_t W = RC.width(); + if (W > 64) + return false; + uint64_t V = 0, B = 1; + for (uint16_t i = 0; i < W; ++i) { + const BitTracker::BitValue &BV = RC[i]; + if (BV.is(1)) + V |= B; + else if (!BV.is(0)) + return false; + B <<= 1; + } + + // For 32-bit registers, consider: Rd = #s16. + if (W == 32) + return isInt<16>(V); + + // For 64-bit registers, it's Rdd = #s8 or Rdd = combine(#s8,#s8) + return isInt<8>(Lo_32(V)) && isInt<8>(Hi_32(V)); +} + + +bool HexagonGenInsert::isValidInsertForm(unsigned DstR, unsigned SrcR, + unsigned InsR, uint16_t L, uint16_t S) const { + const TargetRegisterClass *DstRC = MRI->getRegClass(DstR); + const TargetRegisterClass *SrcRC = MRI->getRegClass(SrcR); + const TargetRegisterClass *InsRC = MRI->getRegClass(InsR); + // Only integet (32-/64-bit) register classes. + if (!isIntClass(DstRC) || !isIntClass(SrcRC) || !isIntClass(InsRC)) + return false; + // The "source" register must be of the same class as DstR. + if (DstRC != SrcRC) + return false; + if (DstRC == InsRC) + return true; + // A 64-bit register can only be generated from other 64-bit registers. + if (DstRC == &Hexagon::DoubleRegsRegClass) + return false; + // Otherwise, the L and S cannot span 32-bit word boundary. + if (S < 32 && S+L > 32) + return false; + return true; +} + + +bool HexagonGenInsert::findSelfReference(unsigned VR) const { + const BitTracker::RegisterCell &RC = CMS->lookup(VR); + for (uint16_t i = 0, w = RC.width(); i < w; ++i) { + const BitTracker::BitValue &V = RC[i]; + if (V.Type == BitTracker::BitValue::Ref && V.RefI.Reg == VR) + return true; + } + return false; +} + + +bool HexagonGenInsert::findNonSelfReference(unsigned VR) const { + BitTracker::RegisterCell RC = CMS->lookup(VR); + for (uint16_t i = 0, w = RC.width(); i < w; ++i) { + const BitTracker::BitValue &V = RC[i]; + if (V.Type == BitTracker::BitValue::Ref && V.RefI.Reg != VR) + return true; + } + return false; +} + + +void HexagonGenInsert::getInstrDefs(const MachineInstr *MI, + RegisterSet &Defs) const { + for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) { + const MachineOperand &MO = MI->getOperand(i); + if (!MO.isReg() || !MO.isDef()) + continue; + unsigned R = MO.getReg(); + if (!TargetRegisterInfo::isVirtualRegister(R)) + continue; + Defs.insert(R); + } +} + + +void HexagonGenInsert::getInstrUses(const MachineInstr *MI, + RegisterSet &Uses) const { + for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) { + const MachineOperand &MO = MI->getOperand(i); + if (!MO.isReg() || !MO.isUse()) + continue; + unsigned R = MO.getReg(); + if (!TargetRegisterInfo::isVirtualRegister(R)) + continue; + Uses.insert(R); + } +} + + +unsigned HexagonGenInsert::distance(const MachineBasicBlock *FromB, + const MachineBasicBlock *ToB, const UnsignedMap &RPO, + PairMapType &M) const { + // Forward distance from the end of a block to the beginning of it does + // not make sense. This function should not be called with FromB == ToB. + assert(FromB != ToB); + + unsigned FromN = FromB->getNumber(), ToN = ToB->getNumber(); + // If we have already computed it, return the cached result. + PairMapType::iterator F = M.find(std::make_pair(FromN, ToN)); + if (F != M.end()) + return F->second; + unsigned ToRPO = RPO.lookup(ToN); + + unsigned MaxD = 0; + typedef MachineBasicBlock::const_pred_iterator pred_iterator; + for (pred_iterator I = ToB->pred_begin(), E = ToB->pred_end(); I != E; ++I) { + const MachineBasicBlock *PB = *I; + // Skip back edges. Also, if FromB is a predecessor of ToB, the distance + // along that path will be 0, and we don't need to do any calculations + // on it. + if (PB == FromB || RPO.lookup(PB->getNumber()) >= ToRPO) + continue; + unsigned D = PB->size() + distance(FromB, PB, RPO, M); + if (D > MaxD) + MaxD = D; + } + + // Memoize the result for later lookup. + M.insert(std::make_pair(std::make_pair(FromN, ToN), MaxD)); + return MaxD; +} + + +unsigned HexagonGenInsert::distance(MachineBasicBlock::const_iterator FromI, + MachineBasicBlock::const_iterator ToI, const UnsignedMap &RPO, + PairMapType &M) const { + const MachineBasicBlock *FB = FromI->getParent(), *TB = ToI->getParent(); + if (FB == TB) + return std::distance(FromI, ToI); + unsigned D1 = std::distance(TB->begin(), ToI); + unsigned D2 = distance(FB, TB, RPO, M); + unsigned D3 = std::distance(FromI, FB->end()); + return D1+D2+D3; +} + + +bool HexagonGenInsert::findRecordInsertForms(unsigned VR, + OrderedRegisterList &AVs) { + if (isDebug()) { + dbgs() << LLVM_FUNCTION_NAME << ": " << PrintReg(VR, HRI) + << " AVs: " << PrintORL(AVs, HRI) << "\n"; + } + if (AVs.size() == 0) + return false; + + typedef OrderedRegisterList::iterator iterator; + BitValueOrdering BVO(BaseOrd); + const BitTracker::RegisterCell &RC = CMS->lookup(VR); + uint16_t W = RC.width(); + + typedef std::pair<unsigned,uint16_t> RSRecord; // (reg,shift) + typedef std::vector<RSRecord> RSListType; + // Have a map, with key being the matching prefix length, and the value + // being the list of pairs (R,S), where R's prefix matches VR at S. + // (DenseMap<uint16_t,RSListType> fails to instantiate.) + typedef DenseMap<unsigned,RSListType> LRSMapType; + LRSMapType LM; + + // Conceptually, rotate the cell RC right (i.e. towards the LSB) by S, + // and find matching prefixes from AVs with the rotated RC. Such a prefix + // would match a string of bits (of length L) in RC starting at S. + for (uint16_t S = 0; S < W; ++S) { + iterator B = AVs.begin(), E = AVs.end(); + // The registers in AVs are ordered according to the lexical order of + // the corresponding register cells. This means that the range of regis- + // ters in AVs that match a prefix of length L+1 will be contained in + // the range that matches a prefix of length L. This means that we can + // keep narrowing the search space as the prefix length goes up. This + // helps reduce the overall complexity of the search. + uint16_t L; + for (L = 0; L < W-S; ++L) { + // Compare against VR's bits starting at S, which emulates rotation + // of VR by S. + RegisterCellBitCompareSel RCB(VR, S+L, L, BVO, *CMS); + iterator NewB = std::lower_bound(B, E, VR, RCB); + iterator NewE = std::upper_bound(NewB, E, VR, RCB); + // For the registers that are eliminated from the next range, L is + // the longest prefix matching VR at position S (their prefixes + // differ from VR at S+L). If L>0, record this information for later + // use. + if (L > 0) { + for (iterator I = B; I != NewB; ++I) + LM[L].push_back(std::make_pair(*I, S)); + for (iterator I = NewE; I != E; ++I) + LM[L].push_back(std::make_pair(*I, S)); + } + B = NewB, E = NewE; + if (B == E) + break; + } + // Record the final register range. If this range is non-empty, then + // L=W-S. + assert(B == E || L == W-S); + if (B != E) { + for (iterator I = B; I != E; ++I) + LM[L].push_back(std::make_pair(*I, S)); + // If B!=E, then we found a range of registers whose prefixes cover the + // rest of VR from position S. There is no need to further advance S. + break; + } + } + + if (isDebug()) { + dbgs() << "Prefixes matching register " << PrintReg(VR, HRI) << "\n"; + for (LRSMapType::iterator I = LM.begin(), E = LM.end(); I != E; ++I) { + dbgs() << " L=" << I->first << ':'; + const RSListType &LL = I->second; + for (unsigned i = 0, n = LL.size(); i < n; ++i) + dbgs() << " (" << PrintReg(LL[i].first, HRI) << ",@" + << LL[i].second << ')'; + dbgs() << '\n'; + } + } + + + bool Recorded = false; + + for (iterator I = AVs.begin(), E = AVs.end(); I != E; ++I) { + unsigned SrcR = *I; + int FDi = -1, LDi = -1; // First/last different bit. + const BitTracker::RegisterCell &AC = CMS->lookup(SrcR); + uint16_t AW = AC.width(); + for (uint16_t i = 0, w = std::min(W, AW); i < w; ++i) { + if (RC[i] == AC[i]) + continue; + if (FDi == -1) + FDi = i; + LDi = i; + } + if (FDi == -1) + continue; // TODO (future): Record identical registers. + // Look for a register whose prefix could patch the range [FD..LD] + // where VR and SrcR differ. + uint16_t FD = FDi, LD = LDi; // Switch to unsigned type. + uint16_t MinL = LD-FD+1; + for (uint16_t L = MinL; L < W; ++L) { + LRSMapType::iterator F = LM.find(L); + if (F == LM.end()) + continue; + RSListType &LL = F->second; + for (unsigned i = 0, n = LL.size(); i < n; ++i) { + uint16_t S = LL[i].second; + // MinL is the minimum length of the prefix. Any length above MinL + // allows some flexibility as to where the prefix can start: + // given the extra length EL=L-MinL, the prefix must start between + // max(0,FD-EL) and FD. + if (S > FD) // Starts too late. + continue; + uint16_t EL = L-MinL; + uint16_t LowS = (EL < FD) ? FD-EL : 0; + if (S < LowS) // Starts too early. + continue; + unsigned InsR = LL[i].first; + if (!isValidInsertForm(VR, SrcR, InsR, L, S)) + continue; + if (isDebug()) { + dbgs() << PrintReg(VR, HRI) << " = insert(" << PrintReg(SrcR, HRI) + << ',' << PrintReg(InsR, HRI) << ",#" << L << ",#" + << S << ")\n"; + } + IFRecordWithRegSet RR(IFRecord(SrcR, InsR, L, S), RegisterSet()); + IFMap[VR].push_back(RR); + Recorded = true; + } + } + } + + return Recorded; +} + + +void HexagonGenInsert::collectInBlock(MachineBasicBlock *B, + OrderedRegisterList &AVs) { + if (isDebug()) + dbgs() << "visiting block BB#" << B->getNumber() << "\n"; + + // First, check if this block is reachable at all. If not, the bit tracker + // will not have any information about registers in it. + if (!CMS->BT.reached(B)) + return; + + bool DoConst = OptConst; + // Keep a separate set of registers defined in this block, so that we + // can remove them from the list of available registers once all DT + // successors have been processed. + RegisterSet BlockDefs, InsDefs; + for (MachineBasicBlock::iterator I = B->begin(), E = B->end(); I != E; ++I) { + MachineInstr *MI = &*I; + InsDefs.clear(); + getInstrDefs(MI, InsDefs); + // Leave those alone. They are more transparent than "insert". + bool Skip = MI->isCopy() || MI->isRegSequence(); + + if (!Skip) { + // Visit all defined registers, and attempt to find the corresponding + // "insert" representations. + for (unsigned VR = InsDefs.find_first(); VR; VR = InsDefs.find_next(VR)) { + // Do not collect registers that are known to be compile-time cons- + // tants, unless requested. + if (!DoConst && isConstant(VR)) + continue; + // If VR's cell contains a reference to VR, then VR cannot be defined + // via "insert". If VR is a constant that can be generated in a single + // instruction (without constant extenders), generating it via insert + // makes no sense. + if (findSelfReference(VR) || isSmallConstant(VR)) + continue; + + findRecordInsertForms(VR, AVs); + } + } + + // Insert the defined registers into the list of available registers + // after they have been processed. + for (unsigned VR = InsDefs.find_first(); VR; VR = InsDefs.find_next(VR)) + AVs.insert(VR); + BlockDefs.insert(InsDefs); + } + + MachineDomTreeNode *N = MDT->getNode(B); + typedef GraphTraits<MachineDomTreeNode*> GTN; + typedef GTN::ChildIteratorType ChildIter; + for (ChildIter I = GTN::child_begin(N), E = GTN::child_end(N); I != E; ++I) { + MachineBasicBlock *SB = (*I)->getBlock(); + collectInBlock(SB, AVs); + } + + for (unsigned VR = BlockDefs.find_first(); VR; VR = BlockDefs.find_next(VR)) + AVs.remove(VR); +} + + +void HexagonGenInsert::findRemovableRegisters(unsigned VR, IFRecord IF, + RegisterSet &RMs) const { + // For a given register VR and a insert form, find the registers that are + // used by the current definition of VR, and which would no longer be + // needed for it after the definition of VR is replaced with the insert + // form. These are the registers that could potentially become dead. + RegisterSet Regs[2]; + + unsigned S = 0; // Register set selector. + Regs[S].insert(VR); + + while (!Regs[S].empty()) { + // Breadth-first search. + unsigned OtherS = 1-S; + Regs[OtherS].clear(); + for (unsigned R = Regs[S].find_first(); R; R = Regs[S].find_next(R)) { + Regs[S].remove(R); + if (R == IF.SrcR || R == IF.InsR) + continue; + // Check if a given register has bits that are references to any other + // registers. This is to detect situations where the instruction that + // defines register R takes register Q as an operand, but R itself does + // not contain any bits from Q. Loads are examples of how this could + // happen: + // R = load Q + // In this case (assuming we do not have any knowledge about the loaded + // value), we must not treat R as a "conveyance" of the bits from Q. + // (The information in BT about R's bits would have them as constants, + // in case of zero-extending loads, or refs to R.) + if (!findNonSelfReference(R)) + continue; + RMs.insert(R); + const MachineInstr *DefI = MRI->getVRegDef(R); + assert(DefI); + // Do not iterate past PHI nodes to avoid infinite loops. This can + // make the final set a bit less accurate, but the removable register + // sets are an approximation anyway. + if (DefI->isPHI()) + continue; + getInstrUses(DefI, Regs[OtherS]); + } + S = OtherS; + } + // The register VR is added to the list as a side-effect of the algorithm, + // but it is not "potentially removable". A potentially removable register + // is one that may become unused (dead) after conversion to the insert form + // IF, and obviously VR (or its replacement) will not become dead by apply- + // ing IF. + RMs.remove(VR); +} + + +void HexagonGenInsert::computeRemovableRegisters() { + for (IFMapType::iterator I = IFMap.begin(), E = IFMap.end(); I != E; ++I) { + IFListType &LL = I->second; + for (unsigned i = 0, n = LL.size(); i < n; ++i) + findRemovableRegisters(I->first, LL[i].first, LL[i].second); + } +} + + +void HexagonGenInsert::pruneEmptyLists() { + // Remove all entries from the map, where the register has no insert forms + // associated with it. + typedef SmallVector<IFMapType::iterator,16> IterListType; + IterListType Prune; + for (IFMapType::iterator I = IFMap.begin(), E = IFMap.end(); I != E; ++I) { + if (I->second.size() == 0) + Prune.push_back(I); + } + for (unsigned i = 0, n = Prune.size(); i < n; ++i) + IFMap.erase(Prune[i]); +} + + +void HexagonGenInsert::pruneCoveredSets(unsigned VR) { + IFMapType::iterator F = IFMap.find(VR); + assert(F != IFMap.end()); + IFListType &LL = F->second; + + // First, examine the IF candidates for register VR whose removable-regis- + // ter sets are empty. This means that a given candidate will not help eli- + // minate any registers, but since "insert" is not a constant-extendable + // instruction, using such a candidate may reduce code size if the defini- + // tion of VR is constant-extended. + // If there exists a candidate with a non-empty set, the ones with empty + // sets will not be used and can be removed. + MachineInstr *DefVR = MRI->getVRegDef(VR); + bool DefEx = HII->isConstExtended(DefVR); + bool HasNE = false; + for (unsigned i = 0, n = LL.size(); i < n; ++i) { + if (LL[i].second.empty()) + continue; + HasNE = true; + break; + } + if (!DefEx || HasNE) { + // The definition of VR is not constant-extended, or there is a candidate + // with a non-empty set. Remove all candidates with empty sets. + auto IsEmpty = [] (const IFRecordWithRegSet &IR) -> bool { + return IR.second.empty(); + }; + auto End = std::remove_if(LL.begin(), LL.end(), IsEmpty); + if (End != LL.end()) + LL.erase(End, LL.end()); + } else { + // The definition of VR is constant-extended, and all candidates have + // empty removable-register sets. Pick the maximum candidate, and remove + // all others. The "maximum" does not have any special meaning here, it + // is only so that the candidate that will remain on the list is selec- + // ted deterministically. + IFRecord MaxIF = LL[0].first; + for (unsigned i = 1, n = LL.size(); i < n; ++i) { + // If LL[MaxI] < LL[i], then MaxI = i. + const IFRecord &IF = LL[i].first; + unsigned M0 = BaseOrd[MaxIF.SrcR], M1 = BaseOrd[MaxIF.InsR]; + unsigned R0 = BaseOrd[IF.SrcR], R1 = BaseOrd[IF.InsR]; + if (M0 > R0) + continue; + if (M0 == R0) { + if (M1 > R1) + continue; + if (M1 == R1) { + if (MaxIF.Wdh > IF.Wdh) + continue; + if (MaxIF.Wdh == IF.Wdh && MaxIF.Off >= IF.Off) + continue; + } + } + // MaxIF < IF. + MaxIF = IF; + } + // Remove everything except the maximum candidate. All register sets + // are empty, so no need to preserve anything. + LL.clear(); + LL.push_back(std::make_pair(MaxIF, RegisterSet())); + } + + // Now, remove those whose sets of potentially removable registers are + // contained in another IF candidate for VR. For example, given these + // candidates for vreg45, + // %vreg45: + // (%vreg44,%vreg41,#9,#8), { %vreg42 } + // (%vreg43,%vreg41,#9,#8), { %vreg42 %vreg44 } + // remove the first one, since it is contained in the second one. + for (unsigned i = 0, n = LL.size(); i < n; ) { + const RegisterSet &RMi = LL[i].second; + unsigned j = 0; + while (j < n) { + if (j != i && LL[j].second.includes(RMi)) + break; + j++; + } + if (j == n) { // RMi not contained in anything else. + i++; + continue; + } + LL.erase(LL.begin()+i); + n = LL.size(); + } +} + + +void HexagonGenInsert::pruneUsesTooFar(unsigned VR, const UnsignedMap &RPO, + PairMapType &M) { + IFMapType::iterator F = IFMap.find(VR); + assert(F != IFMap.end()); + IFListType &LL = F->second; + unsigned Cutoff = VRegDistCutoff; + const MachineInstr *DefV = MRI->getVRegDef(VR); + + for (unsigned i = LL.size(); i > 0; --i) { + unsigned SR = LL[i-1].first.SrcR, IR = LL[i-1].first.InsR; + const MachineInstr *DefS = MRI->getVRegDef(SR); + const MachineInstr *DefI = MRI->getVRegDef(IR); + unsigned DSV = distance(DefS, DefV, RPO, M); + if (DSV < Cutoff) { + unsigned DIV = distance(DefI, DefV, RPO, M); + if (DIV < Cutoff) + continue; + } + LL.erase(LL.begin()+(i-1)); + } +} + + +void HexagonGenInsert::pruneRegCopies(unsigned VR) { + IFMapType::iterator F = IFMap.find(VR); + assert(F != IFMap.end()); + IFListType &LL = F->second; + + auto IsCopy = [] (const IFRecordWithRegSet &IR) -> bool { + return IR.first.Wdh == 32 && (IR.first.Off == 0 || IR.first.Off == 32); + }; + auto End = std::remove_if(LL.begin(), LL.end(), IsCopy); + if (End != LL.end()) + LL.erase(End, LL.end()); +} + + +void HexagonGenInsert::pruneCandidates() { + // Remove candidates that are not beneficial, regardless of the final + // selection method. + // First, remove candidates whose potentially removable set is a subset + // of another candidate's set. + for (IFMapType::iterator I = IFMap.begin(), E = IFMap.end(); I != E; ++I) + pruneCoveredSets(I->first); + + UnsignedMap RPO; + typedef ReversePostOrderTraversal<const MachineFunction*> RPOTType; + RPOTType RPOT(MFN); + unsigned RPON = 0; + for (RPOTType::rpo_iterator I = RPOT.begin(), E = RPOT.end(); I != E; ++I) + RPO[(*I)->getNumber()] = RPON++; + + PairMapType Memo; // Memoization map for distance calculation. + // Remove candidates that would use registers defined too far away. + for (IFMapType::iterator I = IFMap.begin(), E = IFMap.end(); I != E; ++I) + pruneUsesTooFar(I->first, RPO, Memo); + + pruneEmptyLists(); + + for (IFMapType::iterator I = IFMap.begin(), E = IFMap.end(); I != E; ++I) + pruneRegCopies(I->first); +} + + +namespace { + // Class for comparing IF candidates for registers that have multiple of + // them. The smaller the candidate, according to this ordering, the better. + // First, compare the number of zeros in the associated potentially remova- + // ble register sets. "Zero" indicates that the register is very likely to + // become dead after this transformation. + // Second, compare "averages", i.e. use-count per size. The lower wins. + // After that, it does not really matter which one is smaller. Resolve + // the tie in some deterministic way. + struct IFOrdering { + IFOrdering(const UnsignedMap &UC, const RegisterOrdering &BO) + : UseC(UC), BaseOrd(BO) {} + bool operator() (const IFRecordWithRegSet &A, + const IFRecordWithRegSet &B) const; + private: + void stats(const RegisterSet &Rs, unsigned &Size, unsigned &Zero, + unsigned &Sum) const; + const UnsignedMap &UseC; + const RegisterOrdering &BaseOrd; + }; +} + + +bool IFOrdering::operator() (const IFRecordWithRegSet &A, + const IFRecordWithRegSet &B) const { + unsigned SizeA = 0, ZeroA = 0, SumA = 0; + unsigned SizeB = 0, ZeroB = 0, SumB = 0; + stats(A.second, SizeA, ZeroA, SumA); + stats(B.second, SizeB, ZeroB, SumB); + + // We will pick the minimum element. The more zeros, the better. + if (ZeroA != ZeroB) + return ZeroA > ZeroB; + // Compare SumA/SizeA with SumB/SizeB, lower is better. + uint64_t AvgA = SumA*SizeB, AvgB = SumB*SizeA; + if (AvgA != AvgB) + return AvgA < AvgB; + + // The sets compare identical so far. Resort to comparing the IF records. + // The actual values don't matter, this is only for determinism. + unsigned OSA = BaseOrd[A.first.SrcR], OSB = BaseOrd[B.first.SrcR]; + if (OSA != OSB) + return OSA < OSB; + unsigned OIA = BaseOrd[A.first.InsR], OIB = BaseOrd[B.first.InsR]; + if (OIA != OIB) + return OIA < OIB; + if (A.first.Wdh != B.first.Wdh) + return A.first.Wdh < B.first.Wdh; + return A.first.Off < B.first.Off; +} + + +void IFOrdering::stats(const RegisterSet &Rs, unsigned &Size, unsigned &Zero, + unsigned &Sum) const { + for (unsigned R = Rs.find_first(); R; R = Rs.find_next(R)) { + UnsignedMap::const_iterator F = UseC.find(R); + assert(F != UseC.end()); + unsigned UC = F->second; + if (UC == 0) + Zero++; + Sum += UC; + Size++; + } +} + + +void HexagonGenInsert::selectCandidates() { + // Some registers may have multiple valid candidates. Pick the best one + // (or decide not to use any). + + // Compute the "removability" measure of R: + // For each potentially removable register R, record the number of regis- + // ters with IF candidates, where R appears in at least one set. + RegisterSet AllRMs; + UnsignedMap UseC, RemC; + IFMapType::iterator End = IFMap.end(); + + for (IFMapType::iterator I = IFMap.begin(); I != End; ++I) { + const IFListType &LL = I->second; + RegisterSet TT; + for (unsigned i = 0, n = LL.size(); i < n; ++i) + TT.insert(LL[i].second); + for (unsigned R = TT.find_first(); R; R = TT.find_next(R)) + RemC[R]++; + AllRMs.insert(TT); + } + + for (unsigned R = AllRMs.find_first(); R; R = AllRMs.find_next(R)) { + typedef MachineRegisterInfo::use_nodbg_iterator use_iterator; + typedef SmallSet<const MachineInstr*,16> InstrSet; + InstrSet UIs; + // Count as the number of instructions in which R is used, not the + // number of operands. + use_iterator E = MRI->use_nodbg_end(); + for (use_iterator I = MRI->use_nodbg_begin(R); I != E; ++I) + UIs.insert(I->getParent()); + unsigned C = UIs.size(); + // Calculate a measure, which is the number of instructions using R, + // minus the "removability" count computed earlier. + unsigned D = RemC[R]; + UseC[R] = (C > D) ? C-D : 0; // doz + } + + + bool SelectAll0 = OptSelectAll0, SelectHas0 = OptSelectHas0; + if (!SelectAll0 && !SelectHas0) + SelectAll0 = true; + + // The smaller the number UseC for a given register R, the "less used" + // R is aside from the opportunities for removal offered by generating + // "insert" instructions. + // Iterate over the IF map, and for those registers that have multiple + // candidates, pick the minimum one according to IFOrdering. + IFOrdering IFO(UseC, BaseOrd); + for (IFMapType::iterator I = IFMap.begin(); I != End; ++I) { + IFListType &LL = I->second; + if (LL.empty()) + continue; + // Get the minimum element, remember it and clear the list. If the + // element found is adequate, we will put it back on the list, other- + // wise the list will remain empty, and the entry for this register + // will be removed (i.e. this register will not be replaced by insert). + IFListType::iterator MinI = std::min_element(LL.begin(), LL.end(), IFO); + assert(MinI != LL.end()); + IFRecordWithRegSet M = *MinI; + LL.clear(); + + // We want to make sure that this replacement will have a chance to be + // beneficial, and that means that we want to have indication that some + // register will be removed. The most likely registers to be eliminated + // are the use operands in the definition of I->first. Accept/reject a + // candidate based on how many of its uses it can potentially eliminate. + + RegisterSet Us; + const MachineInstr *DefI = MRI->getVRegDef(I->first); + getInstrUses(DefI, Us); + bool Accept = false; + + if (SelectAll0) { + bool All0 = true; + for (unsigned R = Us.find_first(); R; R = Us.find_next(R)) { + if (UseC[R] == 0) + continue; + All0 = false; + break; + } + Accept = All0; + } else if (SelectHas0) { + bool Has0 = false; + for (unsigned R = Us.find_first(); R; R = Us.find_next(R)) { + if (UseC[R] != 0) + continue; + Has0 = true; + break; + } + Accept = Has0; + } + if (Accept) + LL.push_back(M); + } + + // Remove candidates that add uses of removable registers, unless the + // removable registers are among replacement candidates. + // Recompute the removable registers, since some candidates may have + // been eliminated. + AllRMs.clear(); + for (IFMapType::iterator I = IFMap.begin(); I != End; ++I) { + const IFListType &LL = I->second; + if (LL.size() > 0) + AllRMs.insert(LL[0].second); + } + for (IFMapType::iterator I = IFMap.begin(); I != End; ++I) { + IFListType &LL = I->second; + if (LL.size() == 0) + continue; + unsigned SR = LL[0].first.SrcR, IR = LL[0].first.InsR; + if (AllRMs[SR] || AllRMs[IR]) + LL.clear(); + } + + pruneEmptyLists(); +} + + +bool HexagonGenInsert::generateInserts() { + // Create a new register for each one from IFMap, and store them in the + // map. + UnsignedMap RegMap; + for (IFMapType::iterator I = IFMap.begin(), E = IFMap.end(); I != E; ++I) { + unsigned VR = I->first; + const TargetRegisterClass *RC = MRI->getRegClass(VR); + unsigned NewVR = MRI->createVirtualRegister(RC); + RegMap[VR] = NewVR; + } + + // We can generate the "insert" instructions using potentially stale re- + // gisters: SrcR and InsR for a given VR may be among other registers that + // are also replaced. This is fine, we will do the mass "rauw" a bit later. + for (IFMapType::iterator I = IFMap.begin(), E = IFMap.end(); I != E; ++I) { + MachineInstr *MI = MRI->getVRegDef(I->first); + MachineBasicBlock &B = *MI->getParent(); + DebugLoc DL = MI->getDebugLoc(); + unsigned NewR = RegMap[I->first]; + bool R32 = MRI->getRegClass(NewR) == &Hexagon::IntRegsRegClass; + const MCInstrDesc &D = R32 ? HII->get(Hexagon::S2_insert) + : HII->get(Hexagon::S2_insertp); + IFRecord IF = I->second[0].first; + unsigned Wdh = IF.Wdh, Off = IF.Off; + unsigned InsS = 0; + if (R32 && MRI->getRegClass(IF.InsR) == &Hexagon::DoubleRegsRegClass) { + InsS = Hexagon::subreg_loreg; + if (Off >= 32) { + InsS = Hexagon::subreg_hireg; + Off -= 32; + } + } + // Advance to the proper location for inserting instructions. This could + // be B.end(). + MachineBasicBlock::iterator At = MI; + if (MI->isPHI()) + At = B.getFirstNonPHI(); + + BuildMI(B, At, DL, D, NewR) + .addReg(IF.SrcR) + .addReg(IF.InsR, 0, InsS) + .addImm(Wdh) + .addImm(Off); + + MRI->clearKillFlags(IF.SrcR); + MRI->clearKillFlags(IF.InsR); + } + + for (IFMapType::iterator I = IFMap.begin(), E = IFMap.end(); I != E; ++I) { + MachineInstr *DefI = MRI->getVRegDef(I->first); + MRI->replaceRegWith(I->first, RegMap[I->first]); + DefI->eraseFromParent(); + } + + return true; +} + + +bool HexagonGenInsert::removeDeadCode(MachineDomTreeNode *N) { + bool Changed = false; + typedef GraphTraits<MachineDomTreeNode*> GTN; + for (auto I = GTN::child_begin(N), E = GTN::child_end(N); I != E; ++I) + Changed |= removeDeadCode(*I); + + MachineBasicBlock *B = N->getBlock(); + std::vector<MachineInstr*> Instrs; + for (auto I = B->rbegin(), E = B->rend(); I != E; ++I) + Instrs.push_back(&*I); + + for (auto I = Instrs.begin(), E = Instrs.end(); I != E; ++I) { + MachineInstr *MI = *I; + unsigned Opc = MI->getOpcode(); + // Do not touch lifetime markers. This is why the target-independent DCE + // cannot be used. + if (Opc == TargetOpcode::LIFETIME_START || + Opc == TargetOpcode::LIFETIME_END) + continue; + bool Store = false; + if (MI->isInlineAsm() || !MI->isSafeToMove(nullptr, Store)) + continue; + + bool AllDead = true; + SmallVector<unsigned,2> Regs; + for (ConstMIOperands Op(MI); Op.isValid(); ++Op) { + if (!Op->isReg() || !Op->isDef()) + continue; + unsigned R = Op->getReg(); + if (!TargetRegisterInfo::isVirtualRegister(R) || + !MRI->use_nodbg_empty(R)) { + AllDead = false; + break; + } + Regs.push_back(R); + } + if (!AllDead) + continue; + + B->erase(MI); + for (unsigned I = 0, N = Regs.size(); I != N; ++I) + MRI->markUsesInDebugValueAsUndef(Regs[I]); + Changed = true; + } + + return Changed; +} + + +bool HexagonGenInsert::runOnMachineFunction(MachineFunction &MF) { + bool Timing = OptTiming, TimingDetail = Timing && OptTimingDetail; + bool Changed = false; + TimerGroup __G("hexinsert"); + NamedRegionTimer __T("hexinsert", Timing && !TimingDetail); + + // Sanity check: one, but not both. + assert(!OptSelectAll0 || !OptSelectHas0); + + IFMap.clear(); + BaseOrd.clear(); + CellOrd.clear(); + + const auto &ST = MF.getSubtarget<HexagonSubtarget>(); + HII = ST.getInstrInfo(); + HRI = ST.getRegisterInfo(); + MFN = &MF; + MRI = &MF.getRegInfo(); + MDT = &getAnalysis<MachineDominatorTree>(); + + // Clean up before any further processing, so that dead code does not + // get used in a newly generated "insert" instruction. Have a custom + // version of DCE that preserves lifetime markers. Without it, merging + // of stack objects can fail to recognize and merge disjoint objects + // leading to unnecessary stack growth. + Changed |= removeDeadCode(MDT->getRootNode()); + + const HexagonEvaluator HE(*HRI, *MRI, *HII, MF); + BitTracker BTLoc(HE, MF); + BTLoc.trace(isDebug()); + BTLoc.run(); + CellMapShadow MS(BTLoc); + CMS = &MS; + + buildOrderingMF(BaseOrd); + buildOrderingBT(BaseOrd, CellOrd); + + if (isDebug()) { + dbgs() << "Cell ordering:\n"; + for (RegisterOrdering::iterator I = CellOrd.begin(), E = CellOrd.end(); + I != E; ++I) { + unsigned VR = I->first, Pos = I->second; + dbgs() << PrintReg(VR, HRI) << " -> " << Pos << "\n"; + } + } + + // Collect candidates for conversion into the insert forms. + MachineBasicBlock *RootB = MDT->getRoot(); + OrderedRegisterList AvailR(CellOrd); + + { + NamedRegionTimer _T("collection", "hexinsert", TimingDetail); + collectInBlock(RootB, AvailR); + // Complete the information gathered in IFMap. + computeRemovableRegisters(); + } + + if (isDebug()) { + dbgs() << "Candidates after collection:\n"; + dump_map(); + } + + if (IFMap.empty()) + return false; + + { + NamedRegionTimer _T("pruning", "hexinsert", TimingDetail); + pruneCandidates(); + } + + if (isDebug()) { + dbgs() << "Candidates after pruning:\n"; + dump_map(); + } + + if (IFMap.empty()) + return false; + + { + NamedRegionTimer _T("selection", "hexinsert", TimingDetail); + selectCandidates(); + } + + if (isDebug()) { + dbgs() << "Candidates after selection:\n"; + dump_map(); + } + + // Filter out vregs beyond the cutoff. + if (VRegIndexCutoff.getPosition()) { + unsigned Cutoff = VRegIndexCutoff; + typedef SmallVector<IFMapType::iterator,16> IterListType; + IterListType Out; + for (IFMapType::iterator I = IFMap.begin(), E = IFMap.end(); I != E; ++I) { + unsigned Idx = TargetRegisterInfo::virtReg2Index(I->first); + if (Idx >= Cutoff) + Out.push_back(I); + } + for (unsigned i = 0, n = Out.size(); i < n; ++i) + IFMap.erase(Out[i]); + } + + { + NamedRegionTimer _T("generation", "hexinsert", TimingDetail); + Changed = generateInserts(); + } + + return Changed; +} + + +FunctionPass *llvm::createHexagonGenInsert() { + return new HexagonGenInsert(); +} + + +//===----------------------------------------------------------------------===// +// Public Constructor Functions +//===----------------------------------------------------------------------===// + +INITIALIZE_PASS_BEGIN(HexagonGenInsert, "hexinsert", + "Hexagon generate \"insert\" instructions", false, false) +INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) +INITIALIZE_PASS_END(HexagonGenInsert, "hexinsert", + "Hexagon generate \"insert\" instructions", false, false) diff --git a/lib/Target/Hexagon/HexagonGenPredicate.cpp b/lib/Target/Hexagon/HexagonGenPredicate.cpp new file mode 100644 index 000000000000..6905c4f6d125 --- /dev/null +++ b/lib/Target/Hexagon/HexagonGenPredicate.cpp @@ -0,0 +1,525 @@ +//===--- HexagonGenPredicate.cpp ------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "gen-pred" + +#include "llvm/ADT/SetVector.h" +#include "llvm/CodeGen/Passes.h" +#include "llvm/CodeGen/MachineDominators.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineLoopInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "HexagonTargetMachine.h" + +#include <functional> +#include <queue> +#include <set> +#include <vector> + +using namespace llvm; + +namespace llvm { + void initializeHexagonGenPredicatePass(PassRegistry& Registry); + FunctionPass *createHexagonGenPredicate(); +} + +namespace { + struct Register { + unsigned R, S; + Register(unsigned r = 0, unsigned s = 0) : R(r), S(s) {} + Register(const MachineOperand &MO) : R(MO.getReg()), S(MO.getSubReg()) {} + bool operator== (const Register &Reg) const { + return R == Reg.R && S == Reg.S; + } + bool operator< (const Register &Reg) const { + return R < Reg.R || (R == Reg.R && S < Reg.S); + } + }; + struct PrintRegister { + PrintRegister(Register R, const TargetRegisterInfo &I) : Reg(R), TRI(I) {} + friend raw_ostream &operator<< (raw_ostream &OS, const PrintRegister &PR); + private: + Register Reg; + const TargetRegisterInfo &TRI; + }; + raw_ostream &operator<< (raw_ostream &OS, const PrintRegister &PR) + LLVM_ATTRIBUTE_UNUSED; + raw_ostream &operator<< (raw_ostream &OS, const PrintRegister &PR) { + return OS << PrintReg(PR.Reg.R, &PR.TRI, PR.Reg.S); + } + + class HexagonGenPredicate : public MachineFunctionPass { + public: + static char ID; + HexagonGenPredicate() : MachineFunctionPass(ID), TII(0), TRI(0), MRI(0) { + initializeHexagonGenPredicatePass(*PassRegistry::getPassRegistry()); + } + virtual const char *getPassName() const { + return "Hexagon generate predicate operations"; + } + virtual void getAnalysisUsage(AnalysisUsage &AU) const { + AU.addRequired<MachineDominatorTree>(); + AU.addPreserved<MachineDominatorTree>(); + MachineFunctionPass::getAnalysisUsage(AU); + } + virtual bool runOnMachineFunction(MachineFunction &MF); + + private: + typedef SetVector<MachineInstr*> VectOfInst; + typedef std::set<Register> SetOfReg; + typedef std::map<Register,Register> RegToRegMap; + + const HexagonInstrInfo *TII; + const HexagonRegisterInfo *TRI; + MachineRegisterInfo *MRI; + SetOfReg PredGPRs; + VectOfInst PUsers; + RegToRegMap G2P; + + bool isPredReg(unsigned R); + void collectPredicateGPR(MachineFunction &MF); + void processPredicateGPR(const Register &Reg); + unsigned getPredForm(unsigned Opc); + bool isConvertibleToPredForm(const MachineInstr *MI); + bool isScalarCmp(unsigned Opc); + bool isScalarPred(Register PredReg); + Register getPredRegFor(const Register &Reg); + bool convertToPredForm(MachineInstr *MI); + bool eliminatePredCopies(MachineFunction &MF); + }; + + char HexagonGenPredicate::ID = 0; +} + +INITIALIZE_PASS_BEGIN(HexagonGenPredicate, "hexagon-gen-pred", + "Hexagon generate predicate operations", false, false) +INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) +INITIALIZE_PASS_END(HexagonGenPredicate, "hexagon-gen-pred", + "Hexagon generate predicate operations", false, false) + +bool HexagonGenPredicate::isPredReg(unsigned R) { + if (!TargetRegisterInfo::isVirtualRegister(R)) + return false; + const TargetRegisterClass *RC = MRI->getRegClass(R); + return RC == &Hexagon::PredRegsRegClass; +} + + +unsigned HexagonGenPredicate::getPredForm(unsigned Opc) { + using namespace Hexagon; + + switch (Opc) { + case A2_and: + case A2_andp: + return C2_and; + case A4_andn: + case A4_andnp: + return C2_andn; + case M4_and_and: + return C4_and_and; + case M4_and_andn: + return C4_and_andn; + case M4_and_or: + return C4_and_or; + + case A2_or: + case A2_orp: + return C2_or; + case A4_orn: + case A4_ornp: + return C2_orn; + case M4_or_and: + return C4_or_and; + case M4_or_andn: + return C4_or_andn; + case M4_or_or: + return C4_or_or; + + case A2_xor: + case A2_xorp: + return C2_xor; + + case C2_tfrrp: + return COPY; + } + // The opcode corresponding to 0 is TargetOpcode::PHI. We can use 0 here + // to denote "none", but we need to make sure that none of the valid opcodes + // that we return will ever be 0. + assert(PHI == 0 && "Use different value for <none>"); + return 0; +} + + +bool HexagonGenPredicate::isConvertibleToPredForm(const MachineInstr *MI) { + unsigned Opc = MI->getOpcode(); + if (getPredForm(Opc) != 0) + return true; + + // Comparisons against 0 are also convertible. This does not apply to + // A4_rcmpeqi or A4_rcmpneqi, since they produce values 0 or 1, which + // may not match the value that the predicate register would have if + // it was converted to a predicate form. + switch (Opc) { + case Hexagon::C2_cmpeqi: + case Hexagon::C4_cmpneqi: + if (MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) + return true; + break; + } + return false; +} + + +void HexagonGenPredicate::collectPredicateGPR(MachineFunction &MF) { + for (MachineFunction::iterator A = MF.begin(), Z = MF.end(); A != Z; ++A) { + MachineBasicBlock &B = *A; + for (MachineBasicBlock::iterator I = B.begin(), E = B.end(); I != E; ++I) { + MachineInstr *MI = &*I; + unsigned Opc = MI->getOpcode(); + switch (Opc) { + case Hexagon::C2_tfrpr: + case TargetOpcode::COPY: + if (isPredReg(MI->getOperand(1).getReg())) { + Register RD = MI->getOperand(0); + if (TargetRegisterInfo::isVirtualRegister(RD.R)) + PredGPRs.insert(RD); + } + break; + } + } + } +} + + +void HexagonGenPredicate::processPredicateGPR(const Register &Reg) { + DEBUG(dbgs() << LLVM_FUNCTION_NAME << ": " + << PrintReg(Reg.R, TRI, Reg.S) << "\n"); + typedef MachineRegisterInfo::use_iterator use_iterator; + use_iterator I = MRI->use_begin(Reg.R), E = MRI->use_end(); + if (I == E) { + DEBUG(dbgs() << "Dead reg: " << PrintReg(Reg.R, TRI, Reg.S) << '\n'); + MachineInstr *DefI = MRI->getVRegDef(Reg.R); + DefI->eraseFromParent(); + return; + } + + for (; I != E; ++I) { + MachineInstr *UseI = I->getParent(); + if (isConvertibleToPredForm(UseI)) + PUsers.insert(UseI); + } +} + + +Register HexagonGenPredicate::getPredRegFor(const Register &Reg) { + // Create a predicate register for a given Reg. The newly created register + // will have its value copied from Reg, so that it can be later used as + // an operand in other instructions. + assert(TargetRegisterInfo::isVirtualRegister(Reg.R)); + RegToRegMap::iterator F = G2P.find(Reg); + if (F != G2P.end()) + return F->second; + + DEBUG(dbgs() << LLVM_FUNCTION_NAME << ": " << PrintRegister(Reg, *TRI)); + MachineInstr *DefI = MRI->getVRegDef(Reg.R); + assert(DefI); + unsigned Opc = DefI->getOpcode(); + if (Opc == Hexagon::C2_tfrpr || Opc == TargetOpcode::COPY) { + assert(DefI->getOperand(0).isDef() && DefI->getOperand(1).isUse()); + Register PR = DefI->getOperand(1); + G2P.insert(std::make_pair(Reg, PR)); + DEBUG(dbgs() << " -> " << PrintRegister(PR, *TRI) << '\n'); + return PR; + } + + MachineBasicBlock &B = *DefI->getParent(); + DebugLoc DL = DefI->getDebugLoc(); + const TargetRegisterClass *PredRC = &Hexagon::PredRegsRegClass; + unsigned NewPR = MRI->createVirtualRegister(PredRC); + + // For convertible instructions, do not modify them, so that they can + // be coverted later. Generate a copy from Reg to NewPR. + if (isConvertibleToPredForm(DefI)) { + MachineBasicBlock::iterator DefIt = DefI; + BuildMI(B, std::next(DefIt), DL, TII->get(TargetOpcode::COPY), NewPR) + .addReg(Reg.R, 0, Reg.S); + G2P.insert(std::make_pair(Reg, Register(NewPR))); + DEBUG(dbgs() << " -> !" << PrintRegister(Register(NewPR), *TRI) << '\n'); + return Register(NewPR); + } + + llvm_unreachable("Invalid argument"); +} + + +bool HexagonGenPredicate::isScalarCmp(unsigned Opc) { + switch (Opc) { + case Hexagon::C2_cmpeq: + case Hexagon::C2_cmpgt: + case Hexagon::C2_cmpgtu: + case Hexagon::C2_cmpeqp: + case Hexagon::C2_cmpgtp: + case Hexagon::C2_cmpgtup: + case Hexagon::C2_cmpeqi: + case Hexagon::C2_cmpgti: + case Hexagon::C2_cmpgtui: + case Hexagon::C2_cmpgei: + case Hexagon::C2_cmpgeui: + case Hexagon::C4_cmpneqi: + case Hexagon::C4_cmpltei: + case Hexagon::C4_cmplteui: + case Hexagon::C4_cmpneq: + case Hexagon::C4_cmplte: + case Hexagon::C4_cmplteu: + case Hexagon::A4_cmpbeq: + case Hexagon::A4_cmpbeqi: + case Hexagon::A4_cmpbgtu: + case Hexagon::A4_cmpbgtui: + case Hexagon::A4_cmpbgt: + case Hexagon::A4_cmpbgti: + case Hexagon::A4_cmpheq: + case Hexagon::A4_cmphgt: + case Hexagon::A4_cmphgtu: + case Hexagon::A4_cmpheqi: + case Hexagon::A4_cmphgti: + case Hexagon::A4_cmphgtui: + return true; + } + return false; +} + + +bool HexagonGenPredicate::isScalarPred(Register PredReg) { + std::queue<Register> WorkQ; + WorkQ.push(PredReg); + + while (!WorkQ.empty()) { + Register PR = WorkQ.front(); + WorkQ.pop(); + const MachineInstr *DefI = MRI->getVRegDef(PR.R); + if (!DefI) + return false; + unsigned DefOpc = DefI->getOpcode(); + switch (DefOpc) { + case TargetOpcode::COPY: { + const TargetRegisterClass *PredRC = &Hexagon::PredRegsRegClass; + if (MRI->getRegClass(PR.R) != PredRC) + return false; + // If it is a copy between two predicate registers, fall through. + } + case Hexagon::C2_and: + case Hexagon::C2_andn: + case Hexagon::C4_and_and: + case Hexagon::C4_and_andn: + case Hexagon::C4_and_or: + case Hexagon::C2_or: + case Hexagon::C2_orn: + case Hexagon::C4_or_and: + case Hexagon::C4_or_andn: + case Hexagon::C4_or_or: + case Hexagon::C4_or_orn: + case Hexagon::C2_xor: + // Add operands to the queue. + for (ConstMIOperands Mo(DefI); Mo.isValid(); ++Mo) + if (Mo->isReg() && Mo->isUse()) + WorkQ.push(Register(Mo->getReg())); + break; + + // All non-vector compares are ok, everything else is bad. + default: + return isScalarCmp(DefOpc); + } + } + + return true; +} + + +bool HexagonGenPredicate::convertToPredForm(MachineInstr *MI) { + DEBUG(dbgs() << LLVM_FUNCTION_NAME << ": " << MI << " " << *MI); + + unsigned Opc = MI->getOpcode(); + assert(isConvertibleToPredForm(MI)); + unsigned NumOps = MI->getNumOperands(); + for (unsigned i = 0; i < NumOps; ++i) { + MachineOperand &MO = MI->getOperand(i); + if (!MO.isReg() || !MO.isUse()) + continue; + Register Reg(MO); + if (Reg.S && Reg.S != Hexagon::subreg_loreg) + return false; + if (!PredGPRs.count(Reg)) + return false; + } + + MachineBasicBlock &B = *MI->getParent(); + DebugLoc DL = MI->getDebugLoc(); + + unsigned NewOpc = getPredForm(Opc); + // Special case for comparisons against 0. + if (NewOpc == 0) { + switch (Opc) { + case Hexagon::C2_cmpeqi: + NewOpc = Hexagon::C2_not; + break; + case Hexagon::C4_cmpneqi: + NewOpc = TargetOpcode::COPY; + break; + default: + return false; + } + + // If it's a scalar predicate register, then all bits in it are + // the same. Otherwise, to determine whether all bits are 0 or not + // we would need to use any8. + Register PR = getPredRegFor(MI->getOperand(1)); + if (!isScalarPred(PR)) + return false; + // This will skip the immediate argument when creating the predicate + // version instruction. + NumOps = 2; + } + + // Some sanity: check that def is in operand #0. + MachineOperand &Op0 = MI->getOperand(0); + assert(Op0.isDef()); + Register OutR(Op0); + + // Don't use getPredRegFor, since it will create an association between + // the argument and a created predicate register (i.e. it will insert a + // copy if a new predicate register is created). + const TargetRegisterClass *PredRC = &Hexagon::PredRegsRegClass; + Register NewPR = MRI->createVirtualRegister(PredRC); + MachineInstrBuilder MIB = BuildMI(B, MI, DL, TII->get(NewOpc), NewPR.R); + + // Add predicate counterparts of the GPRs. + for (unsigned i = 1; i < NumOps; ++i) { + Register GPR = MI->getOperand(i); + Register Pred = getPredRegFor(GPR); + MIB.addReg(Pred.R, 0, Pred.S); + } + DEBUG(dbgs() << "generated: " << *MIB); + + // Generate a copy-out: NewGPR = NewPR, and replace all uses of OutR + // with NewGPR. + const TargetRegisterClass *RC = MRI->getRegClass(OutR.R); + unsigned NewOutR = MRI->createVirtualRegister(RC); + BuildMI(B, MI, DL, TII->get(TargetOpcode::COPY), NewOutR) + .addReg(NewPR.R, 0, NewPR.S); + MRI->replaceRegWith(OutR.R, NewOutR); + MI->eraseFromParent(); + + // If the processed instruction was C2_tfrrp (i.e. Rn = Pm; Pk = Rn), + // then the output will be a predicate register. Do not visit the + // users of it. + if (!isPredReg(NewOutR)) { + Register R(NewOutR); + PredGPRs.insert(R); + processPredicateGPR(R); + } + return true; +} + + +bool HexagonGenPredicate::eliminatePredCopies(MachineFunction &MF) { + DEBUG(dbgs() << LLVM_FUNCTION_NAME << "\n"); + const TargetRegisterClass *PredRC = &Hexagon::PredRegsRegClass; + bool Changed = false; + VectOfInst Erase; + + // First, replace copies + // IntR = PredR1 + // PredR2 = IntR + // with + // PredR2 = PredR1 + // Such sequences can be generated when a copy-into-pred is generated from + // a gpr register holding a result of a convertible instruction. After + // the convertible instruction is converted, its predicate result will be + // copied back into the original gpr. + + for (MachineFunction::iterator A = MF.begin(), Z = MF.end(); A != Z; ++A) { + MachineBasicBlock &B = *A; + for (MachineBasicBlock::iterator I = B.begin(), E = B.end(); I != E; ++I) { + if (I->getOpcode() != TargetOpcode::COPY) + continue; + Register DR = I->getOperand(0); + Register SR = I->getOperand(1); + if (!TargetRegisterInfo::isVirtualRegister(DR.R)) + continue; + if (!TargetRegisterInfo::isVirtualRegister(SR.R)) + continue; + if (MRI->getRegClass(DR.R) != PredRC) + continue; + if (MRI->getRegClass(SR.R) != PredRC) + continue; + assert(!DR.S && !SR.S && "Unexpected subregister"); + MRI->replaceRegWith(DR.R, SR.R); + Erase.insert(I); + Changed = true; + } + } + + for (VectOfInst::iterator I = Erase.begin(), E = Erase.end(); I != E; ++I) + (*I)->eraseFromParent(); + + return Changed; +} + + +bool HexagonGenPredicate::runOnMachineFunction(MachineFunction &MF) { + TII = MF.getSubtarget<HexagonSubtarget>().getInstrInfo(); + TRI = MF.getSubtarget<HexagonSubtarget>().getRegisterInfo(); + MRI = &MF.getRegInfo(); + PredGPRs.clear(); + PUsers.clear(); + G2P.clear(); + + bool Changed = false; + collectPredicateGPR(MF); + for (SetOfReg::iterator I = PredGPRs.begin(), E = PredGPRs.end(); I != E; ++I) + processPredicateGPR(*I); + + bool Again; + do { + Again = false; + VectOfInst Processed, Copy; + + typedef VectOfInst::iterator iterator; + Copy = PUsers; + for (iterator I = Copy.begin(), E = Copy.end(); I != E; ++I) { + MachineInstr *MI = *I; + bool Done = convertToPredForm(MI); + if (Done) { + Processed.insert(MI); + Again = true; + } + } + Changed |= Again; + + auto Done = [Processed] (MachineInstr *MI) -> bool { + return Processed.count(MI); + }; + PUsers.remove_if(Done); + } while (Again); + + Changed |= eliminatePredCopies(MF); + return Changed; +} + + +FunctionPass *llvm::createHexagonGenPredicate() { + return new HexagonGenPredicate(); +} + diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp index 6e9e69f5a2c7..c739afb70c15 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -459,6 +459,7 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); MachineFunction &MF = DAG.getMachineFunction(); + auto PtrVT = getPointerTy(MF.getDataLayout()); // Check for varargs. int NumNamedVarArgParams = -1; @@ -515,8 +516,8 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVector<SDValue, 8> MemOpChains; auto &HRI = *Subtarget.getRegisterInfo(); - SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, HRI.getStackRegister(), - getPointerTy()); + SDValue StackPtr = + DAG.getCopyFromReg(Chain, dl, HRI.getStackRegister(), PtrVT); // Walk the register/memloc assignments, inserting copies/loads. for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { @@ -574,7 +575,7 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); if (!isTailCall) { - SDValue C = DAG.getConstant(NumBytes, dl, getPointerTy(), true); + SDValue C = DAG.getConstant(NumBytes, dl, PtrVT, true); Chain = DAG.getCALLSEQ_START(Chain, C, dl); } @@ -615,13 +616,13 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, if (flag_aligned_memcpy) { const char *MemcpyName = "__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes"; - Callee = DAG.getTargetExternalSymbol(MemcpyName, getPointerTy()); + Callee = DAG.getTargetExternalSymbol(MemcpyName, PtrVT); flag_aligned_memcpy = false; } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { - Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, getPointerTy()); + Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, PtrVT); } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { - Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy()); + Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT); } // Returns a chain & a flag for retval copy to use. @@ -811,8 +812,8 @@ LowerBR_JT(SDValue Op, SelectionDAG &DAG) const BlockAddress::get(const_cast<BasicBlock *>(MBB->getBasicBlock())); } - SDValue JumpTableBase = DAG.getNode(HexagonISD::JT, dl, - getPointerTy(), TargetJT); + SDValue JumpTableBase = DAG.getNode( + HexagonISD::JT, dl, getPointerTy(DAG.getDataLayout()), TargetJT); SDValue ShiftIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index, DAG.getConstant(2, dl, MVT::i32)); SDValue JTAddress = DAG.getNode(ISD::ADD, dl, MVT::i32, JumpTableBase, @@ -1231,16 +1232,17 @@ SDValue HexagonTargetLowering::LowerGLOBALADDRESS(SDValue Op, const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset(); SDLoc dl(Op); - Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset); + auto PtrVT = getPointerTy(DAG.getDataLayout()); + Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset); const HexagonTargetObjectFile *TLOF = static_cast<const HexagonTargetObjectFile *>( getTargetMachine().getObjFileLowering()); if (TLOF->IsGlobalInSmallSection(GV, getTargetMachine())) { - return DAG.getNode(HexagonISD::CONST32_GP, dl, getPointerTy(), Result); + return DAG.getNode(HexagonISD::CONST32_GP, dl, PtrVT, Result); } - return DAG.getNode(HexagonISD::CONST32, dl, getPointerTy(), Result); + return DAG.getNode(HexagonISD::CONST32, dl, PtrVT, Result); } // Specifies that for loads and stores VT can be promoted to PromotedLdStVT. @@ -1261,7 +1263,8 @@ HexagonTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); SDValue BA_SD = DAG.getTargetBlockAddress(BA, MVT::i32); SDLoc dl(Op); - return DAG.getNode(HexagonISD::CONST32_GP, dl, getPointerTy(), BA_SD); + return DAG.getNode(HexagonISD::CONST32_GP, dl, + getPointerTy(DAG.getDataLayout()), BA_SD); } //===----------------------------------------------------------------------===// @@ -2254,6 +2257,7 @@ HexagonTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { SDValue Offset = Op.getOperand(1); SDValue Handler = Op.getOperand(2); SDLoc dl(Op); + auto PtrVT = getPointerTy(DAG.getDataLayout()); // Mark function as containing a call to EH_RETURN. HexagonMachineFunctionInfo *FuncInfo = @@ -2262,9 +2266,9 @@ HexagonTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { unsigned OffsetReg = Hexagon::R28; - SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), - DAG.getRegister(Hexagon::R30, getPointerTy()), - DAG.getIntPtrConstant(4, dl)); + SDValue StoreAddr = + DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getRegister(Hexagon::R30, PtrVT), + DAG.getIntPtrConstant(4, dl)); Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(), false, false, 0); Chain = DAG.getCopyToReg(Chain, dl, OffsetReg, Offset); @@ -2338,8 +2342,7 @@ HexagonTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, std::pair<unsigned, const TargetRegisterClass *> HexagonTargetLowering::getRegForInlineAsmConstraint( - const TargetRegisterInfo *TRI, const std::string &Constraint, - MVT VT) const { + const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { if (Constraint.size() == 1) { switch (Constraint[0]) { case 'r': // R0-R31 @@ -2372,8 +2375,8 @@ bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { /// isLegalAddressingMode - Return true if the addressing mode represented by /// AM is legal for this target, for a load/store of the specified type. -bool HexagonTargetLowering::isLegalAddressingMode(const AddrMode &AM, - Type *Ty, +bool HexagonTargetLowering::isLegalAddressingMode(const DataLayout &DL, + const AddrMode &AM, Type *Ty, unsigned AS) const { // Allows a signed-extended 11-bit immediate field. if (AM.BaseOffs <= -(1LL << 13) || AM.BaseOffs >= (1LL << 13)-1) @@ -2463,3 +2466,45 @@ bool llvm::isPositiveHalfWord(SDNode *N) { return true; } } + +Value *HexagonTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr, + AtomicOrdering Ord) const { + BasicBlock *BB = Builder.GetInsertBlock(); + Module *M = BB->getParent()->getParent(); + Type *Ty = cast<PointerType>(Addr->getType())->getElementType(); + unsigned SZ = Ty->getPrimitiveSizeInBits(); + assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported"); + Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked + : Intrinsic::hexagon_L4_loadd_locked; + Value *Fn = Intrinsic::getDeclaration(M, IntID); + return Builder.CreateCall(Fn, Addr, "larx"); +} + +/// Perform a store-conditional operation to Addr. Return the status of the +/// store. This should be 0 if the store succeeded, non-zero otherwise. +Value *HexagonTargetLowering::emitStoreConditional(IRBuilder<> &Builder, + Value *Val, Value *Addr, AtomicOrdering Ord) const { + BasicBlock *BB = Builder.GetInsertBlock(); + Module *M = BB->getParent()->getParent(); + Type *Ty = Val->getType(); + unsigned SZ = Ty->getPrimitiveSizeInBits(); + assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported"); + Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked + : Intrinsic::hexagon_S4_stored_locked; + Value *Fn = Intrinsic::getDeclaration(M, IntID); + Value *Call = Builder.CreateCall(Fn, {Addr, Val}, "stcx"); + Value *Cmp = Builder.CreateICmpEQ(Call, Builder.getInt32(0), ""); + Value *Ext = Builder.CreateZExt(Cmp, Type::getInt32Ty(M->getContext())); + return Ext; +} + +bool HexagonTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { + // Do not expand loads and stores that don't exceed 64 bits. + return LI->getType()->getPrimitiveSizeInBits() > 64; +} + +bool HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { + // Do not expand loads and stores that don't exceed 64 bits. + return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64; +} + diff --git a/lib/Target/Hexagon/HexagonISelLowering.h b/lib/Target/Hexagon/HexagonISelLowering.h index b80e8477eb7b..2642abffaddd 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.h +++ b/lib/Target/Hexagon/HexagonISelLowering.h @@ -165,7 +165,8 @@ bool isPositiveHalfWord(SDNode *N); SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; - EVT getSetCCResultType(LLVMContext &C, EVT VT) const override { + EVT getSetCCResultType(const DataLayout &, LLVMContext &C, + EVT VT) const override { if (!VT.isVector()) return MVT::i1; else @@ -179,11 +180,10 @@ bool isPositiveHalfWord(SDNode *N); std::pair<unsigned, const TargetRegisterClass *> getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, - const std::string &Constraint, - MVT VT) const override; + StringRef Constraint, MVT VT) const override; - unsigned getInlineAsmMemConstraint( - const std::string &ConstraintCode) const override { + unsigned + getInlineAsmMemConstraint(StringRef ConstraintCode) const override { if (ConstraintCode == "o") return InlineAsm::Constraint_o; else if (ConstraintCode == "v") @@ -198,8 +198,8 @@ bool isPositiveHalfWord(SDNode *N); /// The type may be VoidTy, in which case only return true if the addressing /// mode is legal for a load/store of any legal type. /// TODO: Handle pre/postinc as well. - bool isLegalAddressingMode(const AddrMode &AM, Type *Ty, - unsigned AS) const override; + bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, + Type *Ty, unsigned AS) const override; bool isFPImmLegal(const APFloat &Imm, EVT VT) const override; /// isLegalICmpImmediate - Return true if the specified immediate is legal @@ -207,6 +207,21 @@ bool isPositiveHalfWord(SDNode *N); /// compare a register against the immediate without having to materialize /// the immediate into a register. bool isLegalICmpImmediate(int64_t Imm) const override; + + // Handling of atomic RMW instructions. + bool hasLoadLinkedStoreConditional() const override { + return true; + } + Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr, + AtomicOrdering Ord) const override; + Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val, + Value *Addr, AtomicOrdering Ord) const override; + bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override; + bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override; + AtomicRMWExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) + const override { + return AtomicRMWExpansionKind::LLSC; + } }; } // end namespace llvm diff --git a/lib/Target/Hexagon/HexagonRegisterInfo.cpp b/lib/Target/Hexagon/HexagonRegisterInfo.cpp index 8f255a08f534..f6bb4a045438 100644 --- a/lib/Target/Hexagon/HexagonRegisterInfo.cpp +++ b/lib/Target/Hexagon/HexagonRegisterInfo.cpp @@ -221,7 +221,7 @@ unsigned HexagonRegisterInfo::getRARegister() const { unsigned HexagonRegisterInfo::getFrameRegister(const MachineFunction &MF) const { - const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); + const HexagonFrameLowering *TFI = getFrameLowering(MF); if (TFI->hasFP(MF)) return Hexagon::R30; return Hexagon::R29; @@ -240,7 +240,8 @@ unsigned HexagonRegisterInfo::getStackRegister() const { bool HexagonRegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const { - return MF.getSubtarget().getFrameLowering()->hasFP(MF); + const HexagonFrameLowering *TFI = getFrameLowering(MF); + return TFI->hasFP(MF); } diff --git a/lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp b/lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp index b5db997eb1b8..276cc69eed0f 100644 --- a/lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp +++ b/lib/Target/Hexagon/HexagonSelectionDAGInfo.cpp @@ -18,12 +18,6 @@ using namespace llvm; bool llvm::flag_aligned_memcpy; -HexagonSelectionDAGInfo::HexagonSelectionDAGInfo(const DataLayout &DL) - : TargetSelectionDAGInfo(&DL) {} - -HexagonSelectionDAGInfo::~HexagonSelectionDAGInfo() { -} - SDValue HexagonSelectionDAGInfo:: EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl, SDValue Chain, diff --git a/lib/Target/Hexagon/HexagonSelectionDAGInfo.h b/lib/Target/Hexagon/HexagonSelectionDAGInfo.h index 8ac2e43f9294..80ac5d7bd9e2 100644 --- a/lib/Target/Hexagon/HexagonSelectionDAGInfo.h +++ b/lib/Target/Hexagon/HexagonSelectionDAGInfo.h @@ -20,8 +20,6 @@ namespace llvm { class HexagonSelectionDAGInfo : public TargetSelectionDAGInfo { public: - explicit HexagonSelectionDAGInfo(const DataLayout &DL); - ~HexagonSelectionDAGInfo(); SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl, SDValue Chain, diff --git a/lib/Target/Hexagon/HexagonSubtarget.cpp b/lib/Target/Hexagon/HexagonSubtarget.cpp index fe6c4f4298b5..cd482b3e3af1 100644 --- a/lib/Target/Hexagon/HexagonSubtarget.cpp +++ b/lib/Target/Hexagon/HexagonSubtarget.cpp @@ -74,7 +74,7 @@ HexagonSubtarget::HexagonSubtarget(const Triple &TT, StringRef CPU, StringRef FS, const TargetMachine &TM) : HexagonGenSubtargetInfo(TT, CPU, FS), CPUString(CPU), InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM, *this), - TSInfo(*TM.getDataLayout()), FrameLowering() { + FrameLowering() { // Initialize scheduling itinerary for the specified CPU. InstrItins = getInstrItineraryForCPU(CPUString); diff --git a/lib/Target/Hexagon/HexagonTargetMachine.cpp b/lib/Target/Hexagon/HexagonTargetMachine.cpp index a173a8087832..b50442969a29 100644 --- a/lib/Target/Hexagon/HexagonTargetMachine.cpp +++ b/lib/Target/Hexagon/HexagonTargetMachine.cpp @@ -37,6 +37,18 @@ static cl::opt<bool> EnableExpandCondsets("hexagon-expand-condsets", cl::init(true), cl::Hidden, cl::ZeroOrMore, cl::desc("Early expansion of MUX")); +static cl::opt<bool> EnableGenInsert("hexagon-insert", cl::init(true), + cl::Hidden, cl::desc("Generate \"insert\" instructions")); + +static cl::opt<bool> EnableCommGEP("hexagon-commgep", cl::init(true), + cl::Hidden, cl::ZeroOrMore, cl::desc("Enable commoning of GEP instructions")); + +static cl::opt<bool> EnableGenExtract("hexagon-extract", cl::init(true), + cl::Hidden, cl::desc("Generate \"extract\" instructions")); + +static cl::opt<bool> EnableGenPred("hexagon-gen-pred", cl::init(true), + cl::Hidden, cl::desc("Enable conversion of arithmetic operations to " + "predicate instructions")); /// HexagonTargetMachineModule - Note that this is used on hosts that /// cannot link in a library unless there are references into the @@ -60,23 +72,23 @@ SchedCustomRegistry("hexagon", "Run Hexagon's custom scheduler", createVLIWMachineSched); namespace llvm { - FunctionPass *createHexagonExpandCondsets(); - FunctionPass *createHexagonISelDag(HexagonTargetMachine &TM, - CodeGenOpt::Level OptLevel); - FunctionPass *createHexagonDelaySlotFillerPass(const TargetMachine &TM); - FunctionPass *createHexagonFPMoverPass(const TargetMachine &TM); - FunctionPass *createHexagonRemoveExtendArgs(const HexagonTargetMachine &TM); FunctionPass *createHexagonCFGOptimizer(); - - FunctionPass *createHexagonSplitConst32AndConst64(); + FunctionPass *createHexagonCommonGEP(); + FunctionPass *createHexagonCopyToCombine(); + FunctionPass *createHexagonExpandCondsets(); FunctionPass *createHexagonExpandPredSpillCode(); - FunctionPass *createHexagonHardwareLoops(); - FunctionPass *createHexagonPeephole(); FunctionPass *createHexagonFixupHwLoops(); + FunctionPass *createHexagonGenExtract(); + FunctionPass *createHexagonGenInsert(); + FunctionPass *createHexagonGenPredicate(); + FunctionPass *createHexagonHardwareLoops(); + FunctionPass *createHexagonISelDag(HexagonTargetMachine &TM, + CodeGenOpt::Level OptLevel); FunctionPass *createHexagonNewValueJump(); - FunctionPass *createHexagonCopyToCombine(); FunctionPass *createHexagonPacketizer(); - FunctionPass *createHexagonNewValueJump(); + FunctionPass *createHexagonPeephole(); + FunctionPass *createHexagonRemoveExtendArgs(const HexagonTargetMachine &TM); + FunctionPass *createHexagonSplitConst32AndConst64(); } // end namespace llvm; /// HexagonTargetMachine ctor - Create an ILP32 architecture model. @@ -122,6 +134,7 @@ public: return createVLIWMachineSched(C); } + void addIRPasses() override; bool addInstSelector() override; void addPreRegAlloc() override; void addPostRegAlloc() override; @@ -134,6 +147,20 @@ TargetPassConfig *HexagonTargetMachine::createPassConfig(PassManagerBase &PM) { return new HexagonPassConfig(this, PM); } +void HexagonPassConfig::addIRPasses() { + TargetPassConfig::addIRPasses(); + bool NoOpt = (getOptLevel() == CodeGenOpt::None); + + addPass(createAtomicExpandPass(TM)); + if (!NoOpt) { + if (EnableCommGEP) + addPass(createHexagonCommonGEP()); + // Replace certain combinations of shifts and ands with extracts. + if (EnableGenExtract) + addPass(createHexagonGenExtract()); + } +} + bool HexagonPassConfig::addInstSelector() { HexagonTargetMachine &TM = getHexagonTargetMachine(); bool NoOpt = (getOptLevel() == CodeGenOpt::None); @@ -144,8 +171,13 @@ bool HexagonPassConfig::addInstSelector() { addPass(createHexagonISelDag(TM, getOptLevel())); if (!NoOpt) { + // Create logical operations on predicate registers. + if (EnableGenPred) + addPass(createHexagonGenPredicate(), false); addPass(createHexagonPeephole()); printAndVerify("After hexagon peephole pass"); + if (EnableGenInsert) + addPass(createHexagonGenInsert(), false); } return false; diff --git a/lib/Target/Hexagon/LLVMBuild.txt b/lib/Target/Hexagon/LLVMBuild.txt index 8259055b3f41..9d288af0214a 100644 --- a/lib/Target/Hexagon/LLVMBuild.txt +++ b/lib/Target/Hexagon/LLVMBuild.txt @@ -39,4 +39,5 @@ required_libraries = SelectionDAG Support Target + TransformUtils add_to_library_groups = Hexagon diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp index 83ce0abd835e..53305d85fd80 100644 --- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp +++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp @@ -46,7 +46,7 @@ MCInstrInfo *llvm::createHexagonMCInstrInfo() { return X; } -static MCRegisterInfo *createHexagonMCRegisterInfo(StringRef TT) { +static MCRegisterInfo *createHexagonMCRegisterInfo(const Triple &TT) { MCRegisterInfo *X = new MCRegisterInfo(); InitHexagonMCRegisterInfo(X, Hexagon::R0); return X; @@ -54,9 +54,7 @@ static MCRegisterInfo *createHexagonMCRegisterInfo(StringRef TT) { static MCSubtargetInfo * createHexagonMCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) { - MCSubtargetInfo *X = new MCSubtargetInfo(); - InitHexagonMCSubtargetInfo(X, TT, CPU, FS); - return X; + return createHexagonMCSubtargetInfoImpl(TT, CPU, FS); } namespace { @@ -151,7 +149,8 @@ static MCAsmInfo *createHexagonMCAsmInfo(const MCRegisterInfo &MRI, return MAI; } -static MCCodeGenInfo *createHexagonMCCodeGenInfo(StringRef TT, Reloc::Model RM, +static MCCodeGenInfo *createHexagonMCCodeGenInfo(const Triple &TT, + Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL) { MCCodeGenInfo *X = new MCCodeGenInfo(); |