summaryrefslogtreecommitdiff
path: root/utils
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-05-17 20:22:39 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-05-17 20:22:39 +0000
commit7af96fb3afd6725a2824a0a5ca5dad34e5e0b056 (patch)
tree6661ffbabf869009597684462f5a3df3beccc952 /utils
parent6b3f41ed88e8e440e11a4fbf20b6600529f80049 (diff)
Diffstat (limited to 'utils')
-rw-r--r--utils/TableGen/AsmMatcherEmitter.cpp4
-rw-r--r--utils/TableGen/GlobalISelEmitter.cpp124
-rwxr-xr-xutils/lit/lit/main.py12
-rw-r--r--utils/lit/lit/run.py103
4 files changed, 79 insertions, 164 deletions
diff --git a/utils/TableGen/AsmMatcherEmitter.cpp b/utils/TableGen/AsmMatcherEmitter.cpp
index a5c2ea6c7acab..264175ae96778 100644
--- a/utils/TableGen/AsmMatcherEmitter.cpp
+++ b/utils/TableGen/AsmMatcherEmitter.cpp
@@ -763,7 +763,8 @@ public:
} // end anonymous namespace
-void MatchableInfo::dump() const {
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void MatchableInfo::dump() const {
errs() << TheDef->getName() << " -- " << "flattened:\"" << AsmString <<"\"\n";
for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i) {
@@ -772,6 +773,7 @@ void MatchableInfo::dump() const {
errs() << '\"' << Op.Token << "\"\n";
}
}
+#endif
static std::pair<StringRef, StringRef>
parseTwoOperandConstraint(StringRef S, ArrayRef<SMLoc> Loc) {
diff --git a/utils/TableGen/GlobalISelEmitter.cpp b/utils/TableGen/GlobalISelEmitter.cpp
index 65a1ea2f0f21d..dc022fe1ceb25 100644
--- a/utils/TableGen/GlobalISelEmitter.cpp
+++ b/utils/TableGen/GlobalISelEmitter.cpp
@@ -775,6 +775,8 @@ public:
void emitCxxCaptureStmts(raw_ostream &OS, RuleMatcher &Rule,
StringRef OperandExpr) const override {
OS << "if (!" << OperandExpr + ".isReg())\n"
+ << " return false;\n"
+ << "if (TRI.isPhysicalRegister(" << OperandExpr + ".getReg()))\n"
<< " return false;\n";
std::string InsnVarName = Rule.defineInsnVar(
OS, *InsnMatcher,
@@ -1242,6 +1244,8 @@ private:
Error importExplicitUseRenderer(BuildMIAction &DstMIBuilder,
TreePatternNode *DstChild,
const InstructionMatcher &InsnMatcher) const;
+ Error importDefaultOperandRenderers(BuildMIAction &DstMIBuilder,
+ DagInit *DefaultOps) const;
Error
importImplicitDefRenderers(BuildMIAction &DstMIBuilder,
const std::vector<Record *> &ImplicitDefs) const;
@@ -1321,8 +1325,27 @@ Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
// Match the used operands (i.e. the children of the operator).
for (unsigned i = 0, e = Src->getNumChildren(); i != e; ++i) {
- if (auto Error = importChildMatcher(InsnMatcher, Src->getChild(i), OpIdx++,
- TempOpIdx))
+ TreePatternNode *SrcChild = Src->getChild(i);
+
+ // For G_INTRINSIC, the operand immediately following the defs is an
+ // intrinsic ID.
+ if (SrcGI.TheDef->getName() == "G_INTRINSIC" && i == 0) {
+ if (!SrcChild->isLeaf())
+ return failedImport("Expected IntInit containing intrinsic ID");
+
+ if (IntInit *SrcChildIntInit =
+ dyn_cast<IntInit>(SrcChild->getLeafValue())) {
+ OperandMatcher &OM =
+ InsnMatcher.addOperand(OpIdx++, SrcChild->getName(), TempOpIdx);
+ OM.addPredicate<IntOperandMatcher>(SrcChildIntInit->getValue());
+ continue;
+ }
+
+ return failedImport("Expected IntInit containing instrinsic ID)");
+ }
+
+ if (auto Error =
+ importChildMatcher(InsnMatcher, SrcChild, OpIdx++, TempOpIdx))
return std::move(Error);
}
@@ -1357,7 +1380,7 @@ Error GlobalISelEmitter::importChildMatcher(InstructionMatcher &InsnMatcher,
auto OpTyOrNone = MVTToLLT(ChildTypes.front().getConcrete());
if (!OpTyOrNone)
- return failedImport("Src operand has an unsupported type");
+ return failedImport("Src operand has an unsupported type (" + to_string(*SrcChild) + ")");
OM.addPredicate<LLTOperandMatcher>(*OpTyOrNone);
// Check for nested instructions.
@@ -1509,59 +1532,23 @@ Expected<BuildMIAction &> GlobalISelEmitter::createAndImportInstructionRenderer(
DstMIBuilder.addRenderer<CopyRenderer>(InsnMatcher, DstIOperand.Name);
}
- // Figure out which operands need defaults inserted. Operands that subclass
- // OperandWithDefaultOps are considered from left to right until we have
- // enough operands to render the instruction.
- SmallSet<unsigned, 2> DefaultOperands;
- unsigned DstINumUses = DstI.Operands.size() - DstI.Operands.NumDefs;
- unsigned NumDefaultOperands = 0;
- for (unsigned I = 0; I < DstINumUses &&
- DstINumUses > Dst->getNumChildren() + NumDefaultOperands;
- ++I) {
- const auto &DstIOperand = DstI.Operands[DstI.Operands.NumDefs + I];
- if (DstIOperand.Rec->isSubClassOf("OperandWithDefaultOps")) {
- DefaultOperands.insert(I);
- NumDefaultOperands +=
- DstIOperand.Rec->getValueAsDag("DefaultOps")->getNumArgs();
- }
- }
- if (DstINumUses > Dst->getNumChildren() + DefaultOperands.size())
- return failedImport("Insufficient operands supplied and default ops "
- "couldn't make up the shortfall");
- if (DstINumUses < Dst->getNumChildren() + DefaultOperands.size())
- return failedImport("Too many operands supplied");
-
// Render the explicit uses.
unsigned Child = 0;
+ unsigned DstINumUses = DstI.Operands.size() - DstI.Operands.NumDefs;
+ unsigned NumDefaultOps = 0;
for (unsigned I = 0; I != DstINumUses; ++I) {
- // If we need to insert default ops here, then do so.
- if (DefaultOperands.count(I)) {
- const auto &DstIOperand = DstI.Operands[DstI.Operands.NumDefs + I];
+ const auto &DstIOperand = DstI.Operands[DstI.Operands.NumDefs + I];
+ // If the operand has default values, introduce them now.
+ // FIXME: Until we have a decent test case that dictates we should do
+ // otherwise, we're going to assume that operands with default values cannot
+ // be specified in the patterns. Therefore, adding them will not cause us to
+ // end up with too many rendered operands.
+ if (DstIOperand.Rec->isSubClassOf("OperandWithDefaultOps")) {
DagInit *DefaultOps = DstIOperand.Rec->getValueAsDag("DefaultOps");
- for (const auto *DefaultOp : DefaultOps->args()) {
- // Look through ValueType operators.
- if (const DagInit *DefaultDagOp = dyn_cast<DagInit>(DefaultOp)) {
- if (const DefInit *DefaultDagOperator =
- dyn_cast<DefInit>(DefaultDagOp->getOperator())) {
- if (DefaultDagOperator->getDef()->isSubClassOf("ValueType"))
- DefaultOp = DefaultDagOp->getArg(0);
- }
- }
-
- if (const DefInit *DefaultDefOp = dyn_cast<DefInit>(DefaultOp)) {
- DstMIBuilder.addRenderer<AddRegisterRenderer>(DefaultDefOp->getDef());
- continue;
- }
-
- if (const IntInit *DefaultIntOp = dyn_cast<IntInit>(DefaultOp)) {
- DstMIBuilder.addRenderer<ImmRenderer>(DefaultIntOp->getValue());
- continue;
- }
-
- return failedImport("Could not add default op");
- }
-
+ if (auto Error = importDefaultOperandRenderers(DstMIBuilder, DefaultOps))
+ return std::move(Error);
+ ++NumDefaultOps;
continue;
}
@@ -1571,9 +1558,44 @@ Expected<BuildMIAction &> GlobalISelEmitter::createAndImportInstructionRenderer(
++Child;
}
+ if (NumDefaultOps + Dst->getNumChildren() != DstINumUses)
+ return failedImport("Expected " + llvm::to_string(DstINumUses) +
+ " used operands but found " +
+ llvm::to_string(Dst->getNumChildren()) +
+ " explicit ones and " + llvm::to_string(NumDefaultOps) +
+ " default ones");
+
return DstMIBuilder;
}
+Error GlobalISelEmitter::importDefaultOperandRenderers(
+ BuildMIAction &DstMIBuilder, DagInit *DefaultOps) const {
+ for (const auto *DefaultOp : DefaultOps->args()) {
+ // Look through ValueType operators.
+ if (const DagInit *DefaultDagOp = dyn_cast<DagInit>(DefaultOp)) {
+ if (const DefInit *DefaultDagOperator =
+ dyn_cast<DefInit>(DefaultDagOp->getOperator())) {
+ if (DefaultDagOperator->getDef()->isSubClassOf("ValueType"))
+ DefaultOp = DefaultDagOp->getArg(0);
+ }
+ }
+
+ if (const DefInit *DefaultDefOp = dyn_cast<DefInit>(DefaultOp)) {
+ DstMIBuilder.addRenderer<AddRegisterRenderer>(DefaultDefOp->getDef());
+ continue;
+ }
+
+ if (const IntInit *DefaultIntOp = dyn_cast<IntInit>(DefaultOp)) {
+ DstMIBuilder.addRenderer<ImmRenderer>(DefaultIntOp->getValue());
+ continue;
+ }
+
+ return failedImport("Could not add default op");
+ }
+
+ return Error::success();
+}
+
Error GlobalISelEmitter::importImplicitDefRenderers(
BuildMIAction &DstMIBuilder,
const std::vector<Record *> &ImplicitDefs) const {
diff --git a/utils/lit/lit/main.py b/utils/lit/lit/main.py
index 10cd7775060f4..a7f407fc210ce 100755
--- a/utils/lit/lit/main.py
+++ b/utils/lit/lit/main.py
@@ -282,15 +282,9 @@ def main_with_tmp(builtinParameters):
debug_group.add_argument("--show-tests", dest="showTests",
help="Show all discovered tests",
action="store_true", default=False)
- debug_group.add_argument("--use-process-pool", dest="executionStrategy",
- help="Run tests in parallel with a process pool",
- action="store_const", const="PROCESS_POOL")
debug_group.add_argument("--use-processes", dest="executionStrategy",
help="Run tests in parallel with processes (not threads)",
action="store_const", const="PROCESSES")
- debug_group.add_argument("--use-threads", dest="executionStrategy",
- help="Run tests in parallel with threads (not processes)",
- action="store_const", const="THREADS")
opts = parser.parse_args()
args = opts.test_paths
@@ -305,9 +299,6 @@ def main_with_tmp(builtinParameters):
if opts.numThreads is None:
opts.numThreads = lit.util.detectCPUs()
- if opts.executionStrategy is None:
- opts.executionStrategy = 'PROCESS_POOL'
-
if opts.maxFailures == 0:
parser.error("Setting --max-failures to 0 does not have any effect.")
@@ -490,8 +481,7 @@ def main_with_tmp(builtinParameters):
startTime = time.time()
display = TestingProgressDisplay(opts, len(run.tests), progressBar)
try:
- run.execute_tests(display, opts.numThreads, opts.maxTime,
- opts.executionStrategy)
+ run.execute_tests(display, opts.numThreads, opts.maxTime)
except KeyboardInterrupt:
sys.exit(2)
display.finish()
diff --git a/utils/lit/lit/run.py b/utils/lit/lit/run.py
index 27c7a9e59f8b1..aa4fdc18b8779 100644
--- a/utils/lit/lit/run.py
+++ b/utils/lit/lit/run.py
@@ -13,11 +13,7 @@ try:
except ImportError:
win32api = None
-try:
- import multiprocessing
-except ImportError:
- multiprocessing = None
-
+import multiprocessing
import lit.Test
def abort_now():
@@ -227,8 +223,7 @@ class Run(object):
def execute_test(self, test):
return execute_test(test, self.lit_config, self.parallelism_semaphores)
- def execute_tests(self, display, jobs, max_time=None,
- execution_strategy=None):
+ def execute_tests(self, display, jobs, max_time=None):
"""
execute_tests(display, jobs, [max_time])
@@ -249,100 +244,6 @@ class Run(object):
computed. Tests which were not actually executed (for any reason) will
be given an UNRESOLVED result.
"""
-
- if execution_strategy == 'PROCESS_POOL':
- self.execute_tests_with_mp_pool(display, jobs, max_time)
- return
- # FIXME: Standardize on the PROCESS_POOL execution strategy and remove
- # the other two strategies.
-
- use_processes = execution_strategy == 'PROCESSES'
-
- # Choose the appropriate parallel execution implementation.
- consumer = None
- if jobs != 1 and use_processes and multiprocessing:
- try:
- task_impl = multiprocessing.Process
- queue_impl = multiprocessing.Queue
- sem_impl = multiprocessing.Semaphore
- canceled_flag = multiprocessing.Value('i', 0)
- consumer = MultiprocessResultsConsumer(self, display, jobs)
- except:
- # multiprocessing fails to initialize with certain OpenBSD and
- # FreeBSD Python versions: http://bugs.python.org/issue3770
- # Unfortunately the error raised also varies by platform.
- self.lit_config.note('failed to initialize multiprocessing')
- consumer = None
- if not consumer:
- task_impl = threading.Thread
- queue_impl = queue.Queue
- sem_impl = threading.Semaphore
- canceled_flag = LockedValue(0)
- consumer = ThreadResultsConsumer(display)
-
- self.parallelism_semaphores = {k: sem_impl(v)
- for k, v in self.lit_config.parallelism_groups.items()}
-
- # Create the test provider.
- provider = TestProvider(queue_impl, canceled_flag)
- handleFailures(provider, consumer, self.lit_config.maxFailures)
-
- # Putting tasks into the threading or multiprocessing Queue may block,
- # so do it in a separate thread.
- # https://docs.python.org/2/library/multiprocessing.html
- # e.g: On Mac OS X, we will hang if we put 2^15 elements in the queue
- # without taking any out.
- queuer = task_impl(target=provider.queue_tests, args=(self.tests, jobs))
- queuer.start()
-
- # Install a console-control signal handler on Windows.
- if win32api is not None:
- def console_ctrl_handler(type):
- provider.cancel()
- return True
- win32api.SetConsoleCtrlHandler(console_ctrl_handler, True)
-
- # Install a timeout handler, if requested.
- if max_time is not None:
- def timeout_handler():
- provider.cancel()
- timeout_timer = threading.Timer(max_time, timeout_handler)
- timeout_timer.start()
-
- # If not using multiple tasks, just run the tests directly.
- if jobs == 1:
- run_one_tester(self, provider, consumer)
- else:
- # Otherwise, execute the tests in parallel
- self._execute_tests_in_parallel(task_impl, provider, consumer, jobs)
-
- queuer.join()
-
- # Cancel the timeout handler.
- if max_time is not None:
- timeout_timer.cancel()
-
- # Update results for any tests which weren't run.
- for test in self.tests:
- if test.result is None:
- test.setResult(lit.Test.Result(lit.Test.UNRESOLVED, '', 0.0))
-
- def _execute_tests_in_parallel(self, task_impl, provider, consumer, jobs):
- # Start all of the tasks.
- tasks = [task_impl(target=run_one_tester,
- args=(self, provider, consumer))
- for i in range(jobs)]
- for t in tasks:
- t.start()
-
- # Allow the consumer to handle results, if necessary.
- consumer.handle_results()
-
- # Wait for all the tasks to complete.
- for t in tasks:
- t.join()
-
- def execute_tests_with_mp_pool(self, display, jobs, max_time=None):
# Don't do anything if we aren't going to run any tests.
if not self.tests or jobs == 0:
return