diff options
| author | Ed Schouten <ed@FreeBSD.org> | 2009-06-02 17:52:33 +0000 | 
|---|---|---|
| committer | Ed Schouten <ed@FreeBSD.org> | 2009-06-02 17:52:33 +0000 | 
| commit | 009b1c42aa6266385f2c37e227516b24077e6dd7 (patch) | |
| tree | 64ba909838c23261cace781ece27d106134ea451 /lib/CodeGen/LatencyPriorityQueue.cpp | |
Notes
Diffstat (limited to 'lib/CodeGen/LatencyPriorityQueue.cpp')
| -rw-r--r-- | lib/CodeGen/LatencyPriorityQueue.cpp | 114 | 
1 files changed, 114 insertions, 0 deletions
diff --git a/lib/CodeGen/LatencyPriorityQueue.cpp b/lib/CodeGen/LatencyPriorityQueue.cpp new file mode 100644 index 000000000000..2e7b89c494f6 --- /dev/null +++ b/lib/CodeGen/LatencyPriorityQueue.cpp @@ -0,0 +1,114 @@ +//===---- LatencyPriorityQueue.cpp - A latency-oriented priority queue ----===// +// +//                     The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the LatencyPriorityQueue class, which is a +// SchedulingPriorityQueue that schedules using latency information to +// reduce the length of the critical path through the basic block. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "scheduler" +#include "llvm/CodeGen/LatencyPriorityQueue.h" +#include "llvm/Support/Debug.h" +using namespace llvm; + +bool latency_sort::operator()(const SUnit *LHS, const SUnit *RHS) const { +  // The isScheduleHigh flag allows nodes with wraparound dependencies that +  // cannot easily be modeled as edges with latencies to be scheduled as +  // soon as possible in a top-down schedule. +  if (LHS->isScheduleHigh && !RHS->isScheduleHigh) +    return false; +  if (!LHS->isScheduleHigh && RHS->isScheduleHigh) +    return true; + +  unsigned LHSNum = LHS->NodeNum; +  unsigned RHSNum = RHS->NodeNum; + +  // The most important heuristic is scheduling the critical path. +  unsigned LHSLatency = PQ->getLatency(LHSNum); +  unsigned RHSLatency = PQ->getLatency(RHSNum); +  if (LHSLatency < RHSLatency) return true; +  if (LHSLatency > RHSLatency) return false; +   +  // After that, if two nodes have identical latencies, look to see if one will +  // unblock more other nodes than the other. +  unsigned LHSBlocked = PQ->getNumSolelyBlockNodes(LHSNum); +  unsigned RHSBlocked = PQ->getNumSolelyBlockNodes(RHSNum); +  if (LHSBlocked < RHSBlocked) return true; +  if (LHSBlocked > RHSBlocked) return false; +   +  // Finally, just to provide a stable ordering, use the node number as a +  // deciding factor. +  return LHSNum < RHSNum; +} + + +/// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor +/// of SU, return it, otherwise return null. +SUnit *LatencyPriorityQueue::getSingleUnscheduledPred(SUnit *SU) { +  SUnit *OnlyAvailablePred = 0; +  for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); +       I != E; ++I) { +    SUnit &Pred = *I->getSUnit(); +    if (!Pred.isScheduled) { +      // We found an available, but not scheduled, predecessor.  If it's the +      // only one we have found, keep track of it... otherwise give up. +      if (OnlyAvailablePred && OnlyAvailablePred != &Pred) +        return 0; +      OnlyAvailablePred = &Pred; +    } +  } +       +  return OnlyAvailablePred; +} + +void LatencyPriorityQueue::push_impl(SUnit *SU) { +  // Look at all of the successors of this node.  Count the number of nodes that +  // this node is the sole unscheduled node for. +  unsigned NumNodesBlocking = 0; +  for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); +       I != E; ++I) +    if (getSingleUnscheduledPred(I->getSUnit()) == SU) +      ++NumNodesBlocking; +  NumNodesSolelyBlocking[SU->NodeNum] = NumNodesBlocking; +   +  Queue.push(SU); +} + + +// ScheduledNode - As nodes are scheduled, we look to see if there are any +// successor nodes that have a single unscheduled predecessor.  If so, that +// single predecessor has a higher priority, since scheduling it will make +// the node available. +void LatencyPriorityQueue::ScheduledNode(SUnit *SU) { +  for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); +       I != E; ++I) +    AdjustPriorityOfUnscheduledPreds(I->getSUnit()); +} + +/// AdjustPriorityOfUnscheduledPreds - One of the predecessors of SU was just +/// scheduled.  If SU is not itself available, then there is at least one +/// predecessor node that has not been scheduled yet.  If SU has exactly ONE +/// unscheduled predecessor, we want to increase its priority: it getting +/// scheduled will make this node available, so it is better than some other +/// node of the same priority that will not make a node available. +void LatencyPriorityQueue::AdjustPriorityOfUnscheduledPreds(SUnit *SU) { +  if (SU->isAvailable) return;  // All preds scheduled. +   +  SUnit *OnlyAvailablePred = getSingleUnscheduledPred(SU); +  if (OnlyAvailablePred == 0 || !OnlyAvailablePred->isAvailable) return; +   +  // Okay, we found a single predecessor that is available, but not scheduled. +  // Since it is available, it must be in the priority queue.  First remove it. +  remove(OnlyAvailablePred); + +  // Reinsert the node into the priority queue, which recomputes its +  // NumNodesSolelyBlocking value. +  push(OnlyAvailablePred); +}  | 
