summaryrefslogtreecommitdiff
path: root/utils/lit
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2013-12-22 00:04:03 +0000
committerDimitry Andric <dim@FreeBSD.org>2013-12-22 00:04:03 +0000
commitf8af5cf600354830d4ccf59732403f0f073eccb9 (patch)
tree2ba0398b4c42ad4f55561327538044fd2c925a8b /utils/lit
parent59d6cff90eecf31cb3dd860c4e786674cfdd42eb (diff)
Diffstat (limited to 'utils/lit')
-rw-r--r--utils/lit/TODO168
-rw-r--r--utils/lit/examples/README.txt7
-rw-r--r--utils/lit/examples/many-tests/README.txt10
-rw-r--r--utils/lit/examples/many-tests/lit.cfg (renamed from utils/lit/lit/ExampleTests/ManyTests/lit.local.cfg)2
-rw-r--r--utils/lit/lit/ExampleTests/Clang/fsyntax-only.c4
-rw-r--r--utils/lit/lit/ExampleTests/Clang/lit.cfg47
-rw-r--r--utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/data.txt1
-rw-r--r--utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/pct-S.ll1
-rw-r--r--utils/lit/lit/ExampleTests/LLVM.InTree/test/lit.cfg66
-rw-r--r--utils/lit/lit/ExampleTests/LLVM.InTree/test/lit.site.cfg7
-rw-r--r--utils/lit/lit/ExampleTests/LLVM.OutOfTree/lit.local.cfg1
-rw-r--r--utils/lit/lit/ExampleTests/LLVM.OutOfTree/obj/test/Foo/lit.local.cfg0
-rw-r--r--utils/lit/lit/ExampleTests/LLVM.OutOfTree/obj/test/lit.site.cfg8
-rw-r--r--utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/data.txt1
-rw-r--r--utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/pct-S.ll1
-rw-r--r--utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/lit.cfg66
-rw-r--r--utils/lit/lit/ExampleTests/ShExternal/lit.local.cfg6
-rw-r--r--utils/lit/lit/ExampleTests/ShInternal/lit.local.cfg6
-rw-r--r--utils/lit/lit/ExampleTests/fail.c2
-rw-r--r--utils/lit/lit/ExampleTests/lit.cfg26
-rw-r--r--utils/lit/lit/ExampleTests/pass.c1
-rw-r--r--utils/lit/lit/ExampleTests/required-and-missing.c4
-rw-r--r--utils/lit/lit/ExampleTests/required-and-present.c2
-rw-r--r--utils/lit/lit/ExampleTests/vg-fail.c4
-rw-r--r--utils/lit/lit/ExampleTests/xfail-feature.c4
-rw-r--r--utils/lit/lit/ExampleTests/xfail.c2
-rw-r--r--utils/lit/lit/ExampleTests/xpass.c2
-rw-r--r--utils/lit/lit/LitConfig.py54
-rw-r--r--utils/lit/lit/LitTestCase.py22
-rw-r--r--utils/lit/lit/ProgressBar.py29
-rw-r--r--utils/lit/lit/ShCommands.py42
-rw-r--r--utils/lit/lit/ShUtil.py50
-rw-r--r--utils/lit/lit/Test.py167
-rw-r--r--utils/lit/lit/TestFormats.py226
-rw-r--r--utils/lit/lit/TestRunner.py247
-rw-r--r--utils/lit/lit/TestingConfig.py184
-rw-r--r--utils/lit/lit/__init__.py5
-rw-r--r--utils/lit/lit/discovery.py32
-rw-r--r--utils/lit/lit/formats/__init__.py4
-rw-r--r--utils/lit/lit/formats/base.py118
-rw-r--r--utils/lit/lit/formats/googletest.py114
-rw-r--r--utils/lit/lit/formats/shtest.py12
-rwxr-xr-xutils/lit/lit/main.py391
-rw-r--r--utils/lit/lit/run.py277
-rw-r--r--utils/lit/lit/util.py (renamed from utils/lit/lit/Util.py)74
-rw-r--r--utils/lit/setup.py9
-rw-r--r--utils/lit/tests/Inputs/discovery/lit.cfg4
-rw-r--r--utils/lit/tests/Inputs/discovery/subdir/lit.local.cfg3
-rw-r--r--utils/lit/tests/Inputs/discovery/subsuite/lit.cfg1
-rw-r--r--utils/lit/tests/Inputs/exec-discovery-in-tree/lit.cfg4
-rw-r--r--utils/lit/tests/Inputs/exec-discovery-in-tree/obj/lit.site.cfg2
-rw-r--r--utils/lit/tests/Inputs/exec-discovery/lit.site.cfg2
-rwxr-xr-xutils/lit/tests/Inputs/googletest-format/DummySubDir/OneTest34
-rw-r--r--utils/lit/tests/Inputs/googletest-format/lit.cfg3
-rw-r--r--utils/lit/tests/Inputs/progress-bar/lit.cfg1
-rw-r--r--utils/lit/tests/Inputs/shtest-format/argv0.txt6
-rw-r--r--utils/lit/tests/Inputs/shtest-format/external_shell/fail.txt2
-rw-r--r--utils/lit/tests/Inputs/shtest-format/external_shell/fail_with_bad_encoding.txt5
-rw-r--r--utils/lit/tests/Inputs/shtest-format/external_shell/lit.local.cfg1
-rwxr-xr-xutils/lit/tests/Inputs/shtest-format/external_shell/write-bad-encoding.sh3
-rw-r--r--utils/lit/tests/Inputs/shtest-format/fail.txt1
-rw-r--r--utils/lit/tests/Inputs/shtest-format/lit.cfg1
-rw-r--r--utils/lit/tests/Inputs/shtest-shell/lit.cfg1
-rw-r--r--utils/lit/tests/Inputs/test-data/lit.cfg44
-rw-r--r--utils/lit/tests/Inputs/test-data/metrics.ini7
-rw-r--r--utils/lit/tests/Inputs/unittest-adaptor/lit.cfg1
-rw-r--r--utils/lit/tests/discovery.py51
-rw-r--r--utils/lit/tests/googletest-format.py20
-rw-r--r--utils/lit/tests/lit.cfg15
-rw-r--r--utils/lit/tests/shell-parsing.py2
-rw-r--r--utils/lit/tests/shtest-encoding.py3
-rw-r--r--utils/lit/tests/shtest-format.py44
-rw-r--r--utils/lit/tests/test-data.py12
-rw-r--r--utils/lit/tests/test-output.py21
-rw-r--r--utils/lit/tests/unittest-adaptor.py2
75 files changed, 1691 insertions, 1111 deletions
diff --git a/utils/lit/TODO b/utils/lit/TODO
index d2ff842f3145f..c1a60c6f4f09f 100644
--- a/utils/lit/TODO
+++ b/utils/lit/TODO
@@ -1,26 +1,166 @@
- - Move temp directory name into local test config.
+================
+ lit TODO Items
+================
- - Add --show-unsupported, don't show by default?
+Infrastructure
+==============
- - Optionally use multiprocessing.
+1. Change to always load suites, then resolve command line arguments?
- - Support valgrind in all configs, and LLVM style valgrind.
+ Currently we expect each input argument to be a path on disk; we do a
+ recursive search to find the test suite for each item, but then we only do a
+ local search based at the input path to find tests. Additionally, for any path
+ that matches a file on disk we explicitly construct a test instance (bypassing
+ the formats on discovery implementation).
- - Support a timeout / ulimit.
+ This has a couple problems:
- - Rename 'lit' injected variable for config to be lit_config.
+ * The test format doesn't have control over the test instances that result
+ from file paths.
- - Allow import of 'lit' in test suite definitions.
+ * It isn't possible to specify virtual tests as inputs. For example, it is not
+ possible to specify an individual subtest to run with the googletest format.
- - Create an explicit test suite object (instead of using the top-level
- TestingConfig object).
+ * The test format doesn't have full control over the discovery of tests in
+ subdirectories.
- - Allow 'lit' driver to cooperate with test suites to add options (or at least
- sanitize accepted params).
+ Instead, we should move to a model whereby first all of the input specifiers
+ are resolved to test suites, and then the resolution of the input specifier is
+ delegated to each test suite. This could take a couple forms:
- - Consider move to identifying all tests by path-to-test-suite and then path to
+ * We could resolve to test suites, then fully load each test suite, then have
+ a fixed process to map input specifiers to tests in the test suite
+ (presumably based on path-in-suite derivations). This has the benefit of
+ being consistent across all test formats, but the downside of requiring
+ loading the entire test suite.
+
+ * We could delegate all of the resolution of specifiers to the test
+ suite. This would allow formats that anticipate large test suites to manage
+ their own resolution for better performance. We could provide a default
+ resolution strategy that was similar to what we do now (start at subpaths
+ for directories, but allow the test format control over what happens for
+ individual tests).
+
+2. Consider move to identifying all tests by path-to-test-suite and then path to
subtest, and don't use test suite names.
- - Consider move to change workflow to always load suites, then resolve command
- line arguments.
+ Currently the test suite name is presented as part of test names, but it has
+ no other useful function, and it is something that has to be skipped over to
+ cut-and-paste a name to subsequently use to rerun a test. If we just
+ represented each test suite by the path to its suite, then it would allow more
+ easy cut-and-paste of the test output lines. This has the downside that the
+ lines might get rather long.
+
+3. Allow 'lit' driver to cooperate with test formats and suites to add options
+ (or at least sanitize accepted params).
+
+ We have started to use the --params method more and more extensively, and it is
+ cumbersome and error prone. Additionally, there are currently various options
+ ``lit`` honors that should more correctly be specified as belonging to the
+ ShTest test format.
+
+ It would be really nice if we could allow test formats and test suites to add
+ their own options to be parsed. The difficulty here, of course, is that we
+ don't know what test formats or test suites are in use until we have parsed the
+ input specifiers. For test formats we could ostensibly require all the possible
+ formats to be registered in order to have options, but for test suites we would
+ certainly have to load the suite before we can query it for what options it
+ understands.
+
+ That leaves us with the following options:
+
+ * Currently we could almost get away with parsing the input specifiers without
+ having done option parsing first (the exception is ``--config-prefix``) but
+ that isn't a very extensible design.
+
+ * We could make a distinction in the command line syntax for test format and
+ test suite options. For example, we could require something like::
+
+ lit -j 1 -sv input-specifier -- --some-format-option
+
+ which would be relatively easy to implement with optparser (I think).
+
+ * We could allow fully interspersed arguments by first extracting the options
+ lit knows about and parsing them, then dispatching the remainder to the
+ formats. This seems the most convenient for users, who are unlikely to care
+ about (or even be aware of) the distinction between the generic lit
+ infrastructure and format or suite specific options.
+
+4. Eliminate duplicate execution models for ShTest tests.
+
+ Currently, the ShTest format uses tests written with shell-script like syntax,
+ and executes them in one of two ways. The first way is by converting them into
+ a bash script and literally executing externally them using bash. The second
+ way is through the use of an internal shell parser and shell execution code
+ (built on the subprocess module). The external execution mode is used on most
+ Unix systems that have bash, the internal execution mode is used on Windows.
+
+ Having two ways to do the same thing is error prone and leads to unnecessary
+ complexity in the testing environment. Additionally, because the mode that
+ converts scripts to bash doesn't try and validate the syntax, it is possible
+ to write tests that use bash shell features unsupported by the internal
+ shell. Such tests won't work on Windows but this may not be obvious to the
+ developer writing the test.
+
+ Another limitation is that when executing the scripts externally, the ShTest
+ format has no idea which commands fail, or what output comes from which
+ commands, so this limits how convenient the output of ShTest failures can be
+ and limits other features (for example, knowing what temporary files were
+ written).
+
+ We should eliminate having two ways of executing the same tests to reduce
+ platform differences and make it easier to develop new features in the ShTest
+ module. This is currently blocked on:
+
+ * The external execution mode is faster in some situations, because it avoids
+ being bottlenecked on the GIL. This can hopefully be obviated simply by
+ using --use-processes.
+
+ * Some tests in LLVM/Clang are explicitly disabled with the internal shell
+ (because they use features specific to bash). We would need to rewrite these
+ tests, or add additional features to the internal shell handling to allow
+ them to pass.
+
+5. Consider changing core to support setup vs. execute distinction.
+
+ Many of the existing test formats are cleanly divided into two phases, once
+ parses the test format and extracts XFAIL and REQUIRES information, etc., and
+ the other code actually executes the test.
+
+ We could make this distinction part of the core infrastructure and that would
+ enable a couple things:
+
+ * The REQUIREs handling could be lifted to the core, which is nice.
+
+ * This would provide a clear place to insert subtest support, because the
+ setup phase could be responsible for providing subtests back to the
+ core. That would provide part of the infrastructure to parallelize them, for
+ example, and would probably interact well with other possible features like
+ parameterized tests.
+
+ * This affords a clean implementation of --no-execute.
+
+ * One possible downside could be for test formats that cannot determine their
+ subtests without having executed the test. Supporting such formats would
+ either force the test to actually be executed in the setup stage (which
+ might be ok, as long as the API was explicitly phrased to support that), or
+ would mean we are forced into supporting subtests as return values from the
+ execute phase.
+
+ Any format can just keep all of its code in execute, presumably, so the only
+ cost of implementing this is its impact on the API and futures changes.
+
+
+Miscellaneous
+=============
+
+* Move temp directory name into local test config.
+
+* Add --show-unsupported, don't show by default?
+
+* Support valgrind in all configs, and LLVM style valgrind.
+
+* Support a timeout / ulimit.
+* Create an explicit test suite object (instead of using the top-level
+ TestingConfig object).
diff --git a/utils/lit/examples/README.txt b/utils/lit/examples/README.txt
new file mode 100644
index 0000000000000..a59daa8f770a3
--- /dev/null
+++ b/utils/lit/examples/README.txt
@@ -0,0 +1,7 @@
+==============
+ lit Examples
+==============
+
+This directory contains examples of 'lit' test suite configurations. The test
+suites they define can be run with 'lit examples/example-name', for more details
+see the README in each example.
diff --git a/utils/lit/examples/many-tests/README.txt b/utils/lit/examples/many-tests/README.txt
new file mode 100644
index 0000000000000..6bffff1468c08
--- /dev/null
+++ b/utils/lit/examples/many-tests/README.txt
@@ -0,0 +1,10 @@
+========================
+ Many Tests lit Example
+========================
+
+This directory contains a trivial lit test suite configuration that defines a
+custom test format which just generates a large (N=10000) number of tests that
+do a small amount of work in the Python test execution code.
+
+This test suite is useful for testing the performance of lit on large numbers of
+tests.
diff --git a/utils/lit/lit/ExampleTests/ManyTests/lit.local.cfg b/utils/lit/examples/many-tests/lit.cfg
index 6cc47522b16c9..8f7b940b6eac6 100644
--- a/utils/lit/lit/ExampleTests/ManyTests/lit.local.cfg
+++ b/utils/lit/examples/many-tests/lit.cfg
@@ -1,6 +1,6 @@
# -*- Python -*-
-Test = lit.Test
+from lit import Test
class ManyTests(object):
def __init__(self, N=10000):
diff --git a/utils/lit/lit/ExampleTests/Clang/fsyntax-only.c b/utils/lit/lit/ExampleTests/Clang/fsyntax-only.c
deleted file mode 100644
index a4a064ba0cf1b..0000000000000
--- a/utils/lit/lit/ExampleTests/Clang/fsyntax-only.c
+++ /dev/null
@@ -1,4 +0,0 @@
-// RUN: clang -fsyntax-only -Xclang -verify %s
-
-int f0(void) {} // expected-warning {{control reaches end of non-void function}}
-
diff --git a/utils/lit/lit/ExampleTests/Clang/lit.cfg b/utils/lit/lit/ExampleTests/Clang/lit.cfg
deleted file mode 100644
index 9295bd9ddbb75..0000000000000
--- a/utils/lit/lit/ExampleTests/Clang/lit.cfg
+++ /dev/null
@@ -1,47 +0,0 @@
-# -*- Python -*-
-
-# Configuration file for the 'lit' test runner.
-
-# name: The name of this test suite.
-config.name = 'Clang'
-
-# testFormat: The test format to use to interpret tests.
-#
-# For now we require '&&' between commands, until they get globally killed and
-# the test runner updated.
-config.test_format = lit.formats.ShTest(execute_external = True)
-
-# suffixes: A list of file extensions to treat as test files.
-config.suffixes = ['.c', '.cpp', '.m', '.mm']
-
-# target_triple: Used by ShTest format for XFAIL checks.
-config.target_triple = 'foo'
-
-###
-
-# Discover the 'clang' and 'clangcc' to use.
-
-import os
-
-def inferClang(PATH):
- # Determine which clang to use.
- clang = os.getenv('CLANG')
-
- # If the user set clang in the environment, definitely use that and don't
- # try to validate.
- if clang:
- return clang
-
- # Otherwise look in the path.
- clang = lit.util.which('clang', PATH)
-
- if not clang:
- lit.fatal("couldn't find 'clang' program, try setting "
- "CLANG in your environment")
-
- return clang
-
-clang = inferClang(config.environment['PATH'])
-if not lit.quiet:
- lit.note('using clang: %r' % clang)
-config.substitutions.append( (' clang ', ' ' + clang + ' ') )
diff --git a/utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/data.txt b/utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/data.txt
deleted file mode 100644
index 45b983be36b73..0000000000000
--- a/utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/data.txt
+++ /dev/null
@@ -1 +0,0 @@
-hi
diff --git a/utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/pct-S.ll b/utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/pct-S.ll
deleted file mode 100644
index 3ff363315a32e..0000000000000
--- a/utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/pct-S.ll
+++ /dev/null
@@ -1 +0,0 @@
-; RUN: grep "hi" %S/data.txt
diff --git a/utils/lit/lit/ExampleTests/LLVM.InTree/test/lit.cfg b/utils/lit/lit/ExampleTests/LLVM.InTree/test/lit.cfg
deleted file mode 100644
index 533c44501ff6f..0000000000000
--- a/utils/lit/lit/ExampleTests/LLVM.InTree/test/lit.cfg
+++ /dev/null
@@ -1,66 +0,0 @@
-# -*- Python -*-
-
-# Configuration file for the 'lit' test runner.
-
-import os
-
-# name: The name of this test suite.
-config.name = 'LLVM'
-
-# testFormat: The test format to use to interpret tests.
-config.test_format = lit.formats.ShTest()
-
-# suffixes: A list of file extensions to treat as test files, this is actually
-# set by on_clone().
-config.suffixes = [ '.ll' ]
-
-# test_source_root: The root path where tests are located.
-config.test_source_root = os.path.dirname(__file__)
-
-# test_exec_root: The root path where tests should be run.
-llvm_obj_root = getattr(config, 'llvm_obj_root', None)
-if llvm_obj_root is not None:
- config.test_exec_root = os.path.join(llvm_obj_root, 'test')
-
-###
-
-import os
-
-# Check that the object root is known.
-if config.test_exec_root is None:
- # Otherwise, we haven't loaded the site specific configuration (the user is
- # probably trying to run on a test file directly, and either the site
- # configuration hasn't been created by the build system, or we are in an
- # out-of-tree build situation).
-
- # Try to detect the situation where we are using an out-of-tree build by
- # looking for 'llvm-config'.
- #
- # FIXME: I debated (i.e., wrote and threw away) adding logic to
- # automagically generate the lit.site.cfg if we are in some kind of fresh
- # build situation. This means knowing how to invoke the build system
- # though, and I decided it was too much magic.
-
- llvm_config = lit.util.which('llvm-config', config.environment['PATH'])
- if not llvm_config:
- lit.fatal('No site specific configuration available!')
-
- # Get the source and object roots.
- llvm_src_root = lit.util.capture(['llvm-config', '--src-root']).strip()
- llvm_obj_root = lit.util.capture(['llvm-config', '--obj-root']).strip()
-
- # Validate that we got a tree which points to here.
- this_src_root = os.path.dirname(config.test_source_root)
- if os.path.realpath(llvm_src_root) != os.path.realpath(this_src_root):
- lit.fatal('No site specific configuration available!')
-
- # Check that the site specific configuration exists.
- site_cfg = os.path.join(llvm_obj_root, 'test', 'lit.site.cfg')
- if not os.path.exists(site_cfg):
- lit.fatal('No site specific configuration available!')
-
- # Okay, that worked. Notify the user of the automagic, and reconfigure.
- lit.note('using out-of-tree build at %r' % llvm_obj_root)
- lit.load_config(config, site_cfg)
- raise SystemExit
-
diff --git a/utils/lit/lit/ExampleTests/LLVM.InTree/test/lit.site.cfg b/utils/lit/lit/ExampleTests/LLVM.InTree/test/lit.site.cfg
deleted file mode 100644
index d45f3ac76205f..0000000000000
--- a/utils/lit/lit/ExampleTests/LLVM.InTree/test/lit.site.cfg
+++ /dev/null
@@ -1,7 +0,0 @@
-# -*- Python -*-
-
-# Preserve some key paths for use by main LLVM test suite config.
-config.llvm_obj_root = os.path.dirname(os.path.dirname(__file__))
-
-# Let the main config do the real work.
-lit.load_config(config, os.path.join(config.llvm_obj_root, 'test/lit.cfg'))
diff --git a/utils/lit/lit/ExampleTests/LLVM.OutOfTree/lit.local.cfg b/utils/lit/lit/ExampleTests/LLVM.OutOfTree/lit.local.cfg
deleted file mode 100644
index 80d0c7ead6b7e..0000000000000
--- a/utils/lit/lit/ExampleTests/LLVM.OutOfTree/lit.local.cfg
+++ /dev/null
@@ -1 +0,0 @@
-config.excludes = ['src']
diff --git a/utils/lit/lit/ExampleTests/LLVM.OutOfTree/obj/test/Foo/lit.local.cfg b/utils/lit/lit/ExampleTests/LLVM.OutOfTree/obj/test/Foo/lit.local.cfg
deleted file mode 100644
index e69de29bb2d1d..0000000000000
--- a/utils/lit/lit/ExampleTests/LLVM.OutOfTree/obj/test/Foo/lit.local.cfg
+++ /dev/null
diff --git a/utils/lit/lit/ExampleTests/LLVM.OutOfTree/obj/test/lit.site.cfg b/utils/lit/lit/ExampleTests/LLVM.OutOfTree/obj/test/lit.site.cfg
deleted file mode 100644
index 94a02d8f8532b..0000000000000
--- a/utils/lit/lit/ExampleTests/LLVM.OutOfTree/obj/test/lit.site.cfg
+++ /dev/null
@@ -1,8 +0,0 @@
-# -*- Python -*-
-
-# Preserve some key paths for use by main LLVM test suite config.
-config.llvm_obj_root = os.path.dirname(os.path.dirname(__file__))
-
-# Let the main config do the real work.
-lit.load_config(config, os.path.join(config.llvm_obj_root,
- '../src/test/lit.cfg'))
diff --git a/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/data.txt b/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/data.txt
deleted file mode 100644
index 45b983be36b73..0000000000000
--- a/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/data.txt
+++ /dev/null
@@ -1 +0,0 @@
-hi
diff --git a/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/pct-S.ll b/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/pct-S.ll
deleted file mode 100644
index 3ff363315a32e..0000000000000
--- a/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/pct-S.ll
+++ /dev/null
@@ -1 +0,0 @@
-; RUN: grep "hi" %S/data.txt
diff --git a/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/lit.cfg b/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/lit.cfg
deleted file mode 100644
index 533c44501ff6f..0000000000000
--- a/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/lit.cfg
+++ /dev/null
@@ -1,66 +0,0 @@
-# -*- Python -*-
-
-# Configuration file for the 'lit' test runner.
-
-import os
-
-# name: The name of this test suite.
-config.name = 'LLVM'
-
-# testFormat: The test format to use to interpret tests.
-config.test_format = lit.formats.ShTest()
-
-# suffixes: A list of file extensions to treat as test files, this is actually
-# set by on_clone().
-config.suffixes = [ '.ll' ]
-
-# test_source_root: The root path where tests are located.
-config.test_source_root = os.path.dirname(__file__)
-
-# test_exec_root: The root path where tests should be run.
-llvm_obj_root = getattr(config, 'llvm_obj_root', None)
-if llvm_obj_root is not None:
- config.test_exec_root = os.path.join(llvm_obj_root, 'test')
-
-###
-
-import os
-
-# Check that the object root is known.
-if config.test_exec_root is None:
- # Otherwise, we haven't loaded the site specific configuration (the user is
- # probably trying to run on a test file directly, and either the site
- # configuration hasn't been created by the build system, or we are in an
- # out-of-tree build situation).
-
- # Try to detect the situation where we are using an out-of-tree build by
- # looking for 'llvm-config'.
- #
- # FIXME: I debated (i.e., wrote and threw away) adding logic to
- # automagically generate the lit.site.cfg if we are in some kind of fresh
- # build situation. This means knowing how to invoke the build system
- # though, and I decided it was too much magic.
-
- llvm_config = lit.util.which('llvm-config', config.environment['PATH'])
- if not llvm_config:
- lit.fatal('No site specific configuration available!')
-
- # Get the source and object roots.
- llvm_src_root = lit.util.capture(['llvm-config', '--src-root']).strip()
- llvm_obj_root = lit.util.capture(['llvm-config', '--obj-root']).strip()
-
- # Validate that we got a tree which points to here.
- this_src_root = os.path.dirname(config.test_source_root)
- if os.path.realpath(llvm_src_root) != os.path.realpath(this_src_root):
- lit.fatal('No site specific configuration available!')
-
- # Check that the site specific configuration exists.
- site_cfg = os.path.join(llvm_obj_root, 'test', 'lit.site.cfg')
- if not os.path.exists(site_cfg):
- lit.fatal('No site specific configuration available!')
-
- # Okay, that worked. Notify the user of the automagic, and reconfigure.
- lit.note('using out-of-tree build at %r' % llvm_obj_root)
- lit.load_config(config, site_cfg)
- raise SystemExit
-
diff --git a/utils/lit/lit/ExampleTests/ShExternal/lit.local.cfg b/utils/lit/lit/ExampleTests/ShExternal/lit.local.cfg
deleted file mode 100644
index 1061da62fd343..0000000000000
--- a/utils/lit/lit/ExampleTests/ShExternal/lit.local.cfg
+++ /dev/null
@@ -1,6 +0,0 @@
-# -*- Python -*-
-
-config.test_format = lit.formats.ShTest(execute_external = True)
-
-config.suffixes = ['.c']
-
diff --git a/utils/lit/lit/ExampleTests/ShInternal/lit.local.cfg b/utils/lit/lit/ExampleTests/ShInternal/lit.local.cfg
deleted file mode 100644
index 448eaa4092b63..0000000000000
--- a/utils/lit/lit/ExampleTests/ShInternal/lit.local.cfg
+++ /dev/null
@@ -1,6 +0,0 @@
-# -*- Python -*-
-
-config.test_format = lit.formats.ShTest(execute_external = False)
-
-config.suffixes = ['.c']
-
diff --git a/utils/lit/lit/ExampleTests/fail.c b/utils/lit/lit/ExampleTests/fail.c
deleted file mode 100644
index 84db41a5889ed..0000000000000
--- a/utils/lit/lit/ExampleTests/fail.c
+++ /dev/null
@@ -1,2 +0,0 @@
-// RUN: echo 'I am some stdout'
-// RUN: false
diff --git a/utils/lit/lit/ExampleTests/lit.cfg b/utils/lit/lit/ExampleTests/lit.cfg
deleted file mode 100644
index 164daba90373a..0000000000000
--- a/utils/lit/lit/ExampleTests/lit.cfg
+++ /dev/null
@@ -1,26 +0,0 @@
-# -*- Python -*-
-
-# Configuration file for the 'lit' test runner.
-
-# name: The name of this test suite.
-config.name = 'Examples'
-
-# suffixes: A list of file extensions to treat as test files.
-config.suffixes = ['.c', '.cpp', '.m', '.mm', '.ll']
-
-# testFormat: The test format to use to interpret tests.
-config.test_format = lit.formats.ShTest()
-
-# test_source_root: The path where tests are located (default is the test suite
-# root).
-config.test_source_root = None
-
-# test_exec_root: The path where tests are located (default is the test suite
-# root).
-config.test_exec_root = None
-
-# target_triple: Used by ShTest format for XFAIL checks.
-config.target_triple = 'foo'
-
-# available_features: Used by ShTest format for REQUIRES checks.
-config.available_features.add('some-feature-name')
diff --git a/utils/lit/lit/ExampleTests/pass.c b/utils/lit/lit/ExampleTests/pass.c
deleted file mode 100644
index 5c1031cccc41a..0000000000000
--- a/utils/lit/lit/ExampleTests/pass.c
+++ /dev/null
@@ -1 +0,0 @@
-// RUN: true
diff --git a/utils/lit/lit/ExampleTests/required-and-missing.c b/utils/lit/lit/ExampleTests/required-and-missing.c
deleted file mode 100644
index 47ba72e4a314a..0000000000000
--- a/utils/lit/lit/ExampleTests/required-and-missing.c
+++ /dev/null
@@ -1,4 +0,0 @@
-// This test shouldn't be run, the required feature is missing.
-//
-// RUN: false
-// REQUIRES: some-missing-feature-name
diff --git a/utils/lit/lit/ExampleTests/required-and-present.c b/utils/lit/lit/ExampleTests/required-and-present.c
deleted file mode 100644
index 2a09e08e5ae91..0000000000000
--- a/utils/lit/lit/ExampleTests/required-and-present.c
+++ /dev/null
@@ -1,2 +0,0 @@
-// RUN: true
-// REQUIRES: some-feature-name
diff --git a/utils/lit/lit/ExampleTests/vg-fail.c b/utils/lit/lit/ExampleTests/vg-fail.c
deleted file mode 100644
index e3339ff91aabd..0000000000000
--- a/utils/lit/lit/ExampleTests/vg-fail.c
+++ /dev/null
@@ -1,4 +0,0 @@
-// This test should XPASS, when run without valgrind.
-
-// RUN: true
-// XFAIL: valgrind
diff --git a/utils/lit/lit/ExampleTests/xfail-feature.c b/utils/lit/lit/ExampleTests/xfail-feature.c
deleted file mode 100644
index 3444bf870080a..0000000000000
--- a/utils/lit/lit/ExampleTests/xfail-feature.c
+++ /dev/null
@@ -1,4 +0,0 @@
-// This test should XPASS.
-
-// RUN: true
-// XFAIL: some-feature-name
diff --git a/utils/lit/lit/ExampleTests/xfail.c b/utils/lit/lit/ExampleTests/xfail.c
deleted file mode 100644
index b36cd99a30008..0000000000000
--- a/utils/lit/lit/ExampleTests/xfail.c
+++ /dev/null
@@ -1,2 +0,0 @@
-// RUN: false
-// XFAIL: *
diff --git a/utils/lit/lit/ExampleTests/xpass.c b/utils/lit/lit/ExampleTests/xpass.c
deleted file mode 100644
index ad84990f7e22f..0000000000000
--- a/utils/lit/lit/ExampleTests/xpass.c
+++ /dev/null
@@ -1,2 +0,0 @@
-// RUN: true
-// XFAIL
diff --git a/utils/lit/lit/LitConfig.py b/utils/lit/lit/LitConfig.py
index 9bcf20b2f11ce..b0dde5db86868 100644
--- a/utils/lit/lit/LitConfig.py
+++ b/utils/lit/lit/LitConfig.py
@@ -1,3 +1,13 @@
+from __future__ import absolute_import
+import inspect
+import os
+import sys
+
+import lit.Test
+import lit.formats
+import lit.TestingConfig
+import lit.util
+
class LitConfig:
"""LitConfig - Configuration data for a 'lit' test runner instance, shared
across all tests.
@@ -8,29 +18,19 @@ class LitConfig:
easily.
"""
- # Provide access to Test module.
- import Test
-
- # Provide access to built-in formats.
- import TestFormats as formats
-
- # Provide access to built-in utility functions.
- import Util as util
-
def __init__(self, progname, path, quiet,
useValgrind, valgrindLeakCheck, valgrindArgs,
- noExecute, ignoreStdErr, debug, isWindows,
+ noExecute, debug, isWindows,
params, config_prefix = None):
# The name of the test runner.
self.progname = progname
# The items to add to the PATH environment variable.
- self.path = list(map(str, path))
+ self.path = [str(p) for p in path]
self.quiet = bool(quiet)
self.useValgrind = bool(useValgrind)
self.valgrindLeakCheck = bool(valgrindLeakCheck)
self.valgrindUserArgs = list(valgrindArgs)
self.noExecute = noExecute
- self.ignoreStdErr = ignoreStdErr
self.debug = debug
self.isWindows = bool(isWindows)
self.params = dict(params)
@@ -61,27 +61,19 @@ class LitConfig:
def load_config(self, config, path):
"""load_config(config, path) - Load a config object from an alternate
path."""
- from TestingConfig import TestingConfig
if self.debug:
self.note('load_config from %r' % path)
- return TestingConfig.frompath(path, config.parent, self,
- mustExist = True,
- config = config)
+ config.load_from_path(path, self)
+ return config
def getBashPath(self):
"""getBashPath - Get the path to 'bash'"""
- import os, Util
-
if self.bashPath is not None:
return self.bashPath
- self.bashPath = Util.which('bash', os.pathsep.join(self.path))
+ self.bashPath = lit.util.which('bash', os.pathsep.join(self.path))
if self.bashPath is None:
- # Check some known paths.
- for path in ('/bin/bash', '/usr/bin/bash', '/usr/local/bin/bash'):
- if os.path.exists(path):
- self.bashPath = path
- break
+ self.bashPath = lit.util.which('bash')
if self.bashPath is None:
self.warning("Unable to find 'bash'.")
@@ -90,15 +82,14 @@ class LitConfig:
return self.bashPath
def getToolsPath(self, dir, paths, tools):
- import os, Util
if dir is not None and os.path.isabs(dir) and os.path.isdir(dir):
- if not Util.checkToolsPath(dir, tools):
+ if not lit.util.checkToolsPath(dir, tools):
return None
else:
- dir = Util.whichTools(tools, paths)
+ dir = lit.util.whichTools(tools, paths)
# bash
- self.bashPath = Util.which('bash', dir)
+ self.bashPath = lit.util.which('bash', dir)
if self.bashPath is None:
self.note("Unable to find 'bash.exe'.")
self.bashPath = ''
@@ -106,8 +97,6 @@ class LitConfig:
return dir
def _write_message(self, kind, message):
- import inspect, os, sys
-
# Get the file/line where this message was generated.
f = inspect.currentframe()
# Step out of _write_message, and then out of wrapper.
@@ -115,8 +104,8 @@ class LitConfig:
file,line,_,_,_ = inspect.getframeinfo(f)
location = '%s:%d' % (os.path.basename(file), line)
- print >>sys.stderr, '%s: %s: %s: %s' % (self.progname, location,
- kind, message)
+ sys.stderr.write('%s: %s: %s: %s\n' % (self.progname, location,
+ kind, message))
def note(self, message):
self._write_message('note', message)
@@ -130,6 +119,5 @@ class LitConfig:
self.numErrors += 1
def fatal(self, message):
- import sys
self._write_message('fatal', message)
sys.exit(2)
diff --git a/utils/lit/lit/LitTestCase.py b/utils/lit/lit/LitTestCase.py
index 8951185843571..e04846c7bd6a4 100644
--- a/utils/lit/lit/LitTestCase.py
+++ b/utils/lit/lit/LitTestCase.py
@@ -1,5 +1,7 @@
+from __future__ import absolute_import
import unittest
-import Test
+
+import lit.Test
"""
TestCase adaptor for providing a 'unittest' compatible interface to 'lit' tests.
@@ -9,10 +11,10 @@ class UnresolvedError(RuntimeError):
pass
class LitTestCase(unittest.TestCase):
- def __init__(self, test, lit_config):
+ def __init__(self, test, run):
unittest.TestCase.__init__(self)
self._test = test
- self._lit_config = lit_config
+ self._run = run
def id(self):
return self._test.getFullName()
@@ -21,10 +23,12 @@ class LitTestCase(unittest.TestCase):
return self._test.getFullName()
def runTest(self):
- tr, output = self._test.config.test_format.execute(
- self._test, self._lit_config)
+ # Run the test.
+ self._run.execute_test(self._test)
- if tr is Test.UNRESOLVED:
- raise UnresolvedError(output)
- elif tr.isFailure:
- self.fail(output)
+ # Adapt the result to unittest.
+ result = self._test.result
+ if result.code is lit.Test.UNRESOLVED:
+ raise UnresolvedError(result.output)
+ elif result.code.isFailure:
+ self.fail(result.output)
diff --git a/utils/lit/lit/ProgressBar.py b/utils/lit/lit/ProgressBar.py
index 5c85a175c5caa..e3644f1fa634f 100644
--- a/utils/lit/lit/ProgressBar.py
+++ b/utils/lit/lit/ProgressBar.py
@@ -5,6 +5,10 @@
import sys, re, time
+def to_bytes(str):
+ # Encode to Latin1 to get binary data.
+ return str.encode('ISO-8859-1')
+
class TerminalController:
"""
A class that can be used to portably generate formatted output to
@@ -16,13 +20,13 @@ class TerminalController:
output to the terminal:
>>> term = TerminalController()
- >>> print 'This is '+term.GREEN+'green'+term.NORMAL
+ >>> print('This is '+term.GREEN+'green'+term.NORMAL)
Alternatively, the `render()` method can used, which replaces
'${action}' with the string required to perform 'action':
>>> term = TerminalController()
- >>> print term.render('This is ${GREEN}green${NORMAL}')
+ >>> print(term.render('This is ${GREEN}green${NORMAL}'))
If the terminal doesn't support a given action, then the value of
the corresponding instance variable will be set to ''. As a
@@ -34,7 +38,7 @@ class TerminalController:
>>> term = TerminalController()
>>> if term.CLEAR_SCREEN:
- ... print 'This terminal supports clearning the screen.'
+ ... print('This terminal supports clearning the screen.')
Finally, if the width and height of the terminal are known, then
they will be stored in the `COLS` and `LINES` attributes.
@@ -116,26 +120,34 @@ class TerminalController:
set_fg = self._tigetstr('setf')
if set_fg:
for i,color in zip(range(len(self._COLORS)), self._COLORS):
- setattr(self, color, curses.tparm(set_fg, i) or '')
+ setattr(self, color, self._tparm(set_fg, i))
set_fg_ansi = self._tigetstr('setaf')
if set_fg_ansi:
for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
- setattr(self, color, curses.tparm(set_fg_ansi, i) or '')
+ setattr(self, color, self._tparm(set_fg_ansi, i))
set_bg = self._tigetstr('setb')
if set_bg:
for i,color in zip(range(len(self._COLORS)), self._COLORS):
- setattr(self, 'BG_'+color, curses.tparm(set_bg, i) or '')
+ setattr(self, 'BG_'+color, self._tparm(set_bg, i))
set_bg_ansi = self._tigetstr('setab')
if set_bg_ansi:
for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
- setattr(self, 'BG_'+color, curses.tparm(set_bg_ansi, i) or '')
+ setattr(self, 'BG_'+color, self._tparm(set_bg_ansi, i))
+
+ def _tparm(self, arg, index):
+ import curses
+ return curses.tparm(to_bytes(arg), index).decode('ascii') or ''
def _tigetstr(self, cap_name):
# String capabilities can include "delays" of the form "$<2>".
# For any modern terminal, we should be able to just ignore
# these, so strip them out.
import curses
- cap = curses.tigetstr(cap_name) or ''
+ cap = curses.tigetstr(cap_name)
+ if cap is None:
+ cap = ''
+ else:
+ cap = cap.decode('ascii')
return re.sub(r'\$<\d+>[/*]?', '', cap)
def render(self, template):
@@ -269,7 +281,6 @@ class ProgressBar:
self.cleared = 1
def test():
- import time
tc = TerminalController()
p = ProgressBar(tc, 'Tests')
for i in range(101):
diff --git a/utils/lit/lit/ShCommands.py b/utils/lit/lit/ShCommands.py
index 4550437ce2272..9ca9e8c91c0d4 100644
--- a/utils/lit/lit/ShCommands.py
+++ b/utils/lit/lit/ShCommands.py
@@ -6,12 +6,12 @@ class Command:
def __repr__(self):
return 'Command(%r, %r)' % (self.args, self.redirects)
- def __cmp__(self, other):
+ def __eq__(self, other):
if not isinstance(other, Command):
- return -1
+ return False
- return cmp((self.args, self.redirects),
- (other.args, other.redirects))
+ return ((self.args, self.redirects) ==
+ (other.args, other.redirects))
def toShell(self, file):
for arg in self.args:
@@ -20,20 +20,20 @@ class Command:
elif '"' not in arg and '$' not in arg:
quoted = '"%s"' % arg
else:
- raise NotImplementedError,'Unable to quote %r' % arg
- print >>file, quoted,
+ raise NotImplementedError('Unable to quote %r' % arg)
+ file.write(quoted)
# For debugging / validation.
import ShUtil
dequoted = list(ShUtil.ShLexer(quoted).lex())
if dequoted != [arg]:
- raise NotImplementedError,'Unable to quote %r' % arg
+ raise NotImplementedError('Unable to quote %r' % arg)
for r in self.redirects:
if len(r[0]) == 1:
- print >>file, "%s '%s'" % (r[0][0], r[1]),
+ file.write("%s '%s'" % (r[0][0], r[1]))
else:
- print >>file, "%s%s '%s'" % (r[0][1], r[0][0], r[1]),
+ file.write("%s%s '%s'" % (r[0][1], r[0][0], r[1]))
class Pipeline:
def __init__(self, commands, negate=False, pipe_err=False):
@@ -45,22 +45,22 @@ class Pipeline:
return 'Pipeline(%r, %r, %r)' % (self.commands, self.negate,
self.pipe_err)
- def __cmp__(self, other):
+ def __eq__(self, other):
if not isinstance(other, Pipeline):
- return -1
+ return False
- return cmp((self.commands, self.negate, self.pipe_err),
- (other.commands, other.negate, self.pipe_err))
+ return ((self.commands, self.negate, self.pipe_err) ==
+ (other.commands, other.negate, self.pipe_err))
def toShell(self, file, pipefail=False):
if pipefail != self.pipe_err:
- raise ValueError,'Inconsistent "pipefail" attribute!'
+ raise ValueError('Inconsistent "pipefail" attribute!')
if self.negate:
- print >>file, '!',
+ file.write('! ')
for cmd in self.commands:
cmd.toShell(file)
if cmd is not self.commands[-1]:
- print >>file, '|\n ',
+ file.write('|\n ')
class Seq:
def __init__(self, lhs, op, rhs):
@@ -72,14 +72,14 @@ class Seq:
def __repr__(self):
return 'Seq(%r, %r, %r)' % (self.lhs, self.op, self.rhs)
- def __cmp__(self, other):
+ def __eq__(self, other):
if not isinstance(other, Seq):
- return -1
+ return False
- return cmp((self.lhs, self.op, self.rhs),
- (other.lhs, other.op, other.rhs))
+ return ((self.lhs, self.op, self.rhs) ==
+ (other.lhs, other.op, other.rhs))
def toShell(self, file, pipefail=False):
self.lhs.toShell(file, pipefail)
- print >>file, ' %s\n' % self.op
+ file.write(' %s\n' % self.op)
self.rhs.toShell(file, pipefail)
diff --git a/utils/lit/lit/ShUtil.py b/utils/lit/lit/ShUtil.py
index 50f79103199bd..1945ba723bcd6 100644
--- a/utils/lit/lit/ShUtil.py
+++ b/utils/lit/lit/ShUtil.py
@@ -1,7 +1,8 @@
+from __future__ import absolute_import
import itertools
-import Util
-from ShCommands import Command, Pipeline, Seq
+import lit.util
+from lit.ShCommands import Command, Pipeline, Seq
class ShLexer:
def __init__(self, data, win32Escapes = False):
@@ -74,8 +75,8 @@ class ShLexer:
# Outside of a string, '\\' escapes everything.
self.eat()
if self.pos == self.end:
- Util.warning("escape at end of quoted argument in: %r" %
- self.data)
+ lit.util.warning(
+ "escape at end of quoted argument in: %r" % self.data)
return str
str += self.eat()
else:
@@ -92,8 +93,8 @@ class ShLexer:
# Inside a '"' quoted string, '\\' only escapes the quote
# character and backslash, otherwise it is preserved.
if self.pos == self.end:
- Util.warning("escape at end of quoted argument in: %r" %
- self.data)
+ lit.util.warning(
+ "escape at end of quoted argument in: %r" % self.data)
return str
c = self.eat()
if c == '"': #
@@ -104,7 +105,7 @@ class ShLexer:
str += '\\' + c
else:
str += c
- Util.warning("missing quote character in %r" % self.data)
+ lit.util.warning("missing quote character in %r" % self.data)
return str
def lex_arg_checked(self, c):
@@ -116,9 +117,11 @@ class ShLexer:
reference = self.lex_arg_slow(c)
if res is not None:
if res != reference:
- raise ValueError,"Fast path failure: %r != %r" % (res, reference)
+ raise ValueError("Fast path failure: %r != %r" % (
+ res, reference))
if self.pos != end:
- raise ValueError,"Fast path failure: %r != %r" % (self.pos, end)
+ raise ValueError("Fast path failure: %r != %r" % (
+ self.pos, end))
return reference
def lex_arg(self, c):
@@ -166,28 +169,28 @@ class ShLexer:
###
class ShParser:
- def __init__(self, data, win32Escapes = False):
+ def __init__(self, data, win32Escapes = False, pipefail = False):
self.data = data
+ self.pipefail = pipefail
self.tokens = ShLexer(data, win32Escapes = win32Escapes).lex()
def lex(self):
- try:
- return self.tokens.next()
- except StopIteration:
- return None
+ for item in self.tokens:
+ return item
+ return None
def look(self):
- next = self.lex()
- if next is not None:
- self.tokens = itertools.chain([next], self.tokens)
- return next
+ token = self.lex()
+ if token is not None:
+ self.tokens = itertools.chain([token], self.tokens)
+ return token
def parse_command(self):
tok = self.lex()
if not tok:
- raise ValueError,"empty command!"
+ raise ValueError("empty command!")
if isinstance(tok, tuple):
- raise ValueError,"syntax error near unexpected token %r" % tok[0]
+ raise ValueError("syntax error near unexpected token %r" % tok[0])
args = [tok]
redirects = []
@@ -212,7 +215,7 @@ class ShParser:
op = self.lex()
arg = self.lex()
if not arg:
- raise ValueError,"syntax error near token %r" % op[0]
+ raise ValueError("syntax error near token %r" % op[0])
redirects.append((op, arg))
return Command(args, redirects)
@@ -224,7 +227,7 @@ class ShParser:
while self.look() == ('|',):
self.lex()
commands.append(self.parse_command())
- return Pipeline(commands, negate)
+ return Pipeline(commands, negate, self.pipefail)
def parse(self):
lhs = self.parse_pipeline()
@@ -234,7 +237,8 @@ class ShParser:
assert isinstance(operator, tuple) and len(operator) == 1
if not self.look():
- raise ValueError, "missing argument to operator %r" % operator[0]
+ raise ValueError(
+ "missing argument to operator %r" % operator[0])
# FIXME: Operator precedence!!
lhs = Seq(lhs, operator[0], self.parse_pipeline())
diff --git a/utils/lit/lit/Test.py b/utils/lit/lit/Test.py
index 9471e3a98bf52..b4988f530dbb4 100644
--- a/utils/lit/lit/Test.py
+++ b/utils/lit/lit/Test.py
@@ -1,8 +1,21 @@
import os
-# Test results.
+# Test result codes.
+
+class ResultCode(object):
+ """Test result codes."""
+
+ # We override __new__ and __getnewargs__ to ensure that pickling still
+ # provides unique ResultCode objects in any particular instance.
+ _instances = {}
+ def __new__(cls, name, isFailure):
+ res = cls._instances.get(name)
+ if res is None:
+ cls._instances[name] = res = super(ResultCode, cls).__new__(cls)
+ return res
+ def __getnewargs__(self):
+ return (self.name, self.isFailure)
-class TestResult:
def __init__(self, name, isFailure):
self.name = name
self.isFailure = isFailure
@@ -11,20 +24,87 @@ class TestResult:
return '%s%r' % (self.__class__.__name__,
(self.name, self.isFailure))
-PASS = TestResult('PASS', False)
-XFAIL = TestResult('XFAIL', False)
-FAIL = TestResult('FAIL', True)
-XPASS = TestResult('XPASS', True)
-UNRESOLVED = TestResult('UNRESOLVED', True)
-UNSUPPORTED = TestResult('UNSUPPORTED', False)
+PASS = ResultCode('PASS', False)
+XFAIL = ResultCode('XFAIL', False)
+FAIL = ResultCode('FAIL', True)
+XPASS = ResultCode('XPASS', True)
+UNRESOLVED = ResultCode('UNRESOLVED', True)
+UNSUPPORTED = ResultCode('UNSUPPORTED', False)
-# Test classes.
+# Test metric values.
-class TestFormat:
- """TestFormat - Test information provider."""
+class MetricValue(object):
+ def format(self):
+ """
+ format() -> str
- def __init__(self, name):
- self.name = name
+ Convert this metric to a string suitable for displaying as part of the
+ console output.
+ """
+ raise RuntimeError("abstract method")
+
+ def todata(self):
+ """
+ todata() -> json-serializable data
+
+ Convert this metric to content suitable for serializing in the JSON test
+ output.
+ """
+ raise RuntimeError("abstract method")
+
+class IntMetricValue(MetricValue):
+ def __init__(self, value):
+ self.value = value
+
+ def format(self):
+ return str(self.value)
+
+ def todata(self):
+ return self.value
+
+class RealMetricValue(MetricValue):
+ def __init__(self, value):
+ self.value = value
+
+ def format(self):
+ return '%.4f' % self.value
+
+ def todata(self):
+ return self.value
+
+# Test results.
+
+class Result(object):
+ """Wrapper for the results of executing an individual test."""
+
+ def __init__(self, code, output='', elapsed=None):
+ # The result code.
+ self.code = code
+ # The test output.
+ self.output = output
+ # The wall timing to execute the test, if timing.
+ self.elapsed = elapsed
+ # The metrics reported by this test.
+ self.metrics = {}
+
+ def addMetric(self, name, value):
+ """
+ addMetric(name, value)
+
+ Attach a test metric to the test result, with the given name and list of
+ values. It is an error to attempt to attach the metrics with the same
+ name multiple times.
+
+ Each value must be an instance of a MetricValue subclass.
+ """
+ if name in self.metrics:
+ raise ValueError("result already includes metrics for %r" % (
+ name,))
+ if not isinstance(value, MetricValue):
+ raise TypeError("unexpected metric value: %r" % (value,))
+ self.metrics[name] = value
+
+# Test classes.
class TestSuite:
"""TestSuite - Information on a group of tests.
@@ -52,27 +132,28 @@ class Test:
self.suite = suite
self.path_in_suite = path_in_suite
self.config = config
- # The test result code, once complete.
+ # A list of conditions under which this test is expected to fail. These
+ # can optionally be provided by test format handlers, and will be
+ # honored when the test result is supplied.
+ self.xfails = []
+ # The test result, once complete.
self.result = None
- # Any additional output from the test, once complete.
- self.output = None
- # The wall time to execute this test, if timing and once complete.
- self.elapsed = None
- # The repeat index of this test, or None.
- self.index = None
-
- def copyWithIndex(self, index):
- import copy
- res = copy.copy(self)
- res.index = index
- return res
- def setResult(self, result, output, elapsed):
- assert self.result is None, "Test result already set!"
+ def setResult(self, result):
+ if self.result is not None:
+ raise ArgumentError("test result already set")
+ if not isinstance(result, Result):
+ raise ArgumentError("unexpected result type")
+
self.result = result
- self.output = output
- self.elapsed = elapsed
+ # Apply the XFAIL handling to resolve the result exit code.
+ if self.isExpectedToFail():
+ if self.result.code == PASS:
+ self.result.code = XPASS
+ elif self.result.code == FAIL:
+ self.result.code = XFAIL
+
def getFullName(self):
return self.suite.config.name + ' :: ' + '/'.join(self.path_in_suite)
@@ -81,3 +162,29 @@ class Test:
def getExecPath(self):
return self.suite.getExecPath(self.path_in_suite)
+
+ def isExpectedToFail(self):
+ """
+ isExpectedToFail() -> bool
+
+ Check whether this test is expected to fail in the current
+ configuration. This check relies on the test xfails property which by
+ some test formats may not be computed until the test has first been
+ executed.
+ """
+
+ # Check if any of the xfails match an available feature or the target.
+ for item in self.xfails:
+ # If this is the wildcard, it always fails.
+ if item == '*':
+ return True
+
+ # If this is an exact match for one of the features, it fails.
+ if item in self.config.available_features:
+ return True
+
+ # If this is a part of the target triple, it fails.
+ if item in self.suite.config.target_triple:
+ return True
+
+ return False
diff --git a/utils/lit/lit/TestFormats.py b/utils/lit/lit/TestFormats.py
deleted file mode 100644
index 26541f183bf80..0000000000000
--- a/utils/lit/lit/TestFormats.py
+++ /dev/null
@@ -1,226 +0,0 @@
-import os
-import sys
-
-import Test
-import TestRunner
-import Util
-
-kIsWindows = sys.platform in ['win32', 'cygwin']
-
-class GoogleTest(object):
- def __init__(self, test_sub_dir, test_suffix):
- self.test_sub_dir = os.path.normcase(str(test_sub_dir)).split(';')
- self.test_suffix = str(test_suffix)
-
- # On Windows, assume tests will also end in '.exe'.
- if kIsWindows:
- self.test_suffix += '.exe'
-
- def getGTestTests(self, path, litConfig, localConfig):
- """getGTestTests(path) - [name]
-
- Return the tests available in gtest executable.
-
- Args:
- path: String path to a gtest executable
- litConfig: LitConfig instance
- localConfig: TestingConfig instance"""
-
- try:
- lines = Util.capture([path, '--gtest_list_tests'],
- env=localConfig.environment)
- if kIsWindows:
- lines = lines.replace('\r', '')
- lines = lines.split('\n')
- except:
- litConfig.error("unable to discover google-tests in %r" % path)
- raise StopIteration
-
- nested_tests = []
- for ln in lines:
- if not ln.strip():
- continue
-
- prefix = ''
- index = 0
- while ln[index*2:index*2+2] == ' ':
- index += 1
- while len(nested_tests) > index:
- nested_tests.pop()
-
- ln = ln[index*2:]
- if ln.endswith('.'):
- nested_tests.append(ln)
- else:
- yield ''.join(nested_tests) + ln
-
- def getTestsInExecutable(self, testSuite, path_in_suite, execpath,
- litConfig, localConfig):
- if not execpath.endswith(self.test_suffix):
- return
- (dirname, basename) = os.path.split(execpath)
- # Discover the tests in this executable.
- for testname in self.getGTestTests(execpath, litConfig, localConfig):
- testPath = path_in_suite + (dirname, basename, testname)
- yield Test.Test(testSuite, testPath, localConfig)
-
- def getTestsInDirectory(self, testSuite, path_in_suite,
- litConfig, localConfig):
- source_path = testSuite.getSourcePath(path_in_suite)
- for filename in os.listdir(source_path):
- filepath = os.path.join(source_path, filename)
- if os.path.isdir(filepath):
- # Iterate over executables in a directory.
- if not os.path.normcase(filename) in self.test_sub_dir:
- continue
- for subfilename in os.listdir(filepath):
- execpath = os.path.join(filepath, subfilename)
- for test in self.getTestsInExecutable(
- testSuite, path_in_suite, execpath,
- litConfig, localConfig):
- yield test
- elif ('.' in self.test_sub_dir):
- for test in self.getTestsInExecutable(
- testSuite, path_in_suite, filepath,
- litConfig, localConfig):
- yield test
-
- def execute(self, test, litConfig):
- testPath,testName = os.path.split(test.getSourcePath())
- while not os.path.exists(testPath):
- # Handle GTest parametrized and typed tests, whose name includes
- # some '/'s.
- testPath, namePrefix = os.path.split(testPath)
- testName = os.path.join(namePrefix, testName)
-
- cmd = [testPath, '--gtest_filter=' + testName]
- if litConfig.useValgrind:
- cmd = litConfig.valgrindArgs + cmd
-
- if litConfig.noExecute:
- return Test.PASS, ''
-
- out, err, exitCode = TestRunner.executeCommand(
- cmd, env=test.config.environment)
-
- if not exitCode:
- return Test.PASS,''
-
- return Test.FAIL, out + err
-
-###
-
-class FileBasedTest(object):
- def getTestsInDirectory(self, testSuite, path_in_suite,
- litConfig, localConfig):
- source_path = testSuite.getSourcePath(path_in_suite)
- for filename in os.listdir(source_path):
- # Ignore dot files and excluded tests.
- if (filename.startswith('.') or
- filename in localConfig.excludes):
- continue
-
- filepath = os.path.join(source_path, filename)
- if not os.path.isdir(filepath):
- base,ext = os.path.splitext(filename)
- if ext in localConfig.suffixes:
- yield Test.Test(testSuite, path_in_suite + (filename,),
- localConfig)
-
-class ShTest(FileBasedTest):
- def __init__(self, execute_external = False):
- self.execute_external = execute_external
-
- def execute(self, test, litConfig):
- return TestRunner.executeShTest(test, litConfig,
- self.execute_external)
-
-###
-
-import re
-import tempfile
-
-class OneCommandPerFileTest:
- # FIXME: Refactor into generic test for running some command on a directory
- # of inputs.
-
- def __init__(self, command, dir, recursive=False,
- pattern=".*", useTempInput=False):
- if isinstance(command, str):
- self.command = [command]
- else:
- self.command = list(command)
- if dir is not None:
- dir = str(dir)
- self.dir = dir
- self.recursive = bool(recursive)
- self.pattern = re.compile(pattern)
- self.useTempInput = useTempInput
-
- def getTestsInDirectory(self, testSuite, path_in_suite,
- litConfig, localConfig):
- dir = self.dir
- if dir is None:
- dir = testSuite.getSourcePath(path_in_suite)
-
- for dirname,subdirs,filenames in os.walk(dir):
- if not self.recursive:
- subdirs[:] = []
-
- subdirs[:] = [d for d in subdirs
- if (d != '.svn' and
- d not in localConfig.excludes)]
-
- for filename in filenames:
- if (filename.startswith('.') or
- not self.pattern.match(filename) or
- filename in localConfig.excludes):
- continue
-
- path = os.path.join(dirname,filename)
- suffix = path[len(dir):]
- if suffix.startswith(os.sep):
- suffix = suffix[1:]
- test = Test.Test(testSuite,
- path_in_suite + tuple(suffix.split(os.sep)),
- localConfig)
- # FIXME: Hack?
- test.source_path = path
- yield test
-
- def createTempInput(self, tmp, test):
- abstract
-
- def execute(self, test, litConfig):
- if test.config.unsupported:
- return (Test.UNSUPPORTED, 'Test is unsupported')
-
- cmd = list(self.command)
-
- # If using temp input, create a temporary file and hand it to the
- # subclass.
- if self.useTempInput:
- tmp = tempfile.NamedTemporaryFile(suffix='.cpp')
- self.createTempInput(tmp, test)
- tmp.flush()
- cmd.append(tmp.name)
- elif hasattr(test, 'source_path'):
- cmd.append(test.source_path)
- else:
- cmd.append(test.getSourcePath())
-
- out, err, exitCode = TestRunner.executeCommand(cmd)
-
- diags = out + err
- if not exitCode and not diags.strip():
- return Test.PASS,''
-
- # Try to include some useful information.
- report = """Command: %s\n""" % ' '.join(["'%s'" % a
- for a in cmd])
- if self.useTempInput:
- report += """Temporary File: %s\n""" % tmp.name
- report += "--\n%s--\n""" % open(tmp.name).read()
- report += """Output:\n--\n%s--""" % diags
-
- return Test.FAIL, report
diff --git a/utils/lit/lit/TestRunner.py b/utils/lit/lit/TestRunner.py
index 84176996a8c8d..97524179988d2 100644
--- a/utils/lit/lit/TestRunner.py
+++ b/utils/lit/lit/TestRunner.py
@@ -1,14 +1,12 @@
+from __future__ import absolute_import
import os, signal, subprocess, sys
-import StringIO
-
-import ShUtil
-import Test
-import Util
-
+import re
import platform
import tempfile
-import re
+import lit.ShUtil as ShUtil
+import lit.Test as Test
+import lit.util
class InternalShellError(Exception):
def __init__(self, command, message):
@@ -23,25 +21,6 @@ kUseCloseFDs = not kIsWindows
# Use temporary files to replace /dev/null on Windows.
kAvoidDevNull = kIsWindows
-def executeCommand(command, cwd=None, env=None):
- # Close extra file handles on UNIX (on Windows this cannot be done while
- # also redirecting input).
- close_fds = not kIsWindows
-
- p = subprocess.Popen(command, cwd=cwd,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- env=env, close_fds=close_fds)
- out,err = p.communicate()
- exitCode = p.wait()
-
- # Detect Ctrl-C in subprocess.
- if exitCode == -signal.SIGINT:
- raise KeyboardInterrupt
-
- return out, err, exitCode
-
def executeShCmd(cmd, cfg, cwd, results):
if isinstance(cmd, ShUtil.Seq):
if cmd.op == ';':
@@ -66,7 +45,7 @@ def executeShCmd(cmd, cfg, cwd, results):
res = executeShCmd(cmd.rhs, cfg, cwd, results)
return res
- raise ValueError,'Unknown shell command: %r' % cmd.op
+ raise ValueError('Unknown shell command: %r' % cmd.op)
assert isinstance(cmd, ShUtil.Pipeline)
procs = []
@@ -152,8 +131,8 @@ def executeShCmd(cmd, cfg, cwd, results):
# Resolve the executable path ourselves.
args = list(j.args)
- args[0] = Util.which(args[0], cfg.environment['PATH'])
- if not args[0]:
+ executable = lit.util.which(args[0], cfg.environment['PATH'])
+ if not executable:
raise InternalShellError(j, '%r: command not found' % j.args[0])
# Replace uses of /dev/null with temporary files.
@@ -166,6 +145,7 @@ def executeShCmd(cmd, cfg, cwd, results):
args[i] = f.name
procs.append(subprocess.Popen(args, cwd=cwd,
+ executable = executable,
stdin = stdin,
stdout = stdout,
stderr = stderr,
@@ -219,10 +199,22 @@ def executeShCmd(cmd, cfg, cwd, results):
if res == -signal.SIGINT:
raise KeyboardInterrupt
+ # Ensure the resulting output is always of string type.
+ try:
+ out = str(out.decode('ascii'))
+ except:
+ out = str(out)
+ try:
+ err = str(err.decode('ascii'))
+ except:
+ err = str(err)
+
results.append((cmd.commands[i], out, err, res))
if cmd.pipe_err:
# Python treats the exit code as a signed char.
- if res < 0:
+ if exitCode is None:
+ exitCode = res
+ elif res < 0:
exitCode = min(exitCode, res)
else:
exitCode = max(exitCode, res)
@@ -245,9 +237,10 @@ def executeScriptInternal(test, litConfig, tmpBase, commands, cwd):
cmds = []
for ln in commands:
try:
- cmds.append(ShUtil.ShParser(ln, litConfig.isWindows).parse())
+ cmds.append(ShUtil.ShParser(ln, litConfig.isWindows,
+ test.config.pipefail).parse())
except:
- return (Test.FAIL, "shell parser error on: %r" % ln)
+ return lit.Test.Result(Test.FAIL, "shell parser error on: %r" % ln)
cmd = cmds[0]
for c in cmds[1:]:
@@ -256,7 +249,8 @@ def executeScriptInternal(test, litConfig, tmpBase, commands, cwd):
results = []
try:
exitCode = executeShCmd(cmd, test.config, cwd, results)
- except InternalShellError,e:
+ except InternalShellError:
+ e = sys.exc_info()[1]
exitCode = 127
results.append((e.command, '', e.message, exitCode))
@@ -284,6 +278,8 @@ def executeScript(test, litConfig, tmpBase, commands, cwd):
if isWin32CMDEXE:
f.write('\nif %ERRORLEVEL% NEQ 0 EXIT\n'.join(commands))
else:
+ if test.config.pipefail:
+ f.write('set -o pipefail;')
f.write('{ ' + '; } &&\n{ '.join(commands) + '; }')
f.write('\n')
f.close()
@@ -300,24 +296,60 @@ def executeScript(test, litConfig, tmpBase, commands, cwd):
# run on clang with no real loss.
command = litConfig.valgrindArgs + command
- return executeCommand(command, cwd=cwd, env=test.config.environment)
-
-def isExpectedFail(test, xfails):
- # Check if any of the xfails match an available feature or the target.
- for item in xfails:
- # If this is the wildcard, it always fails.
- if item == '*':
- return True
+ return lit.util.executeCommand(command, cwd=cwd,
+ env=test.config.environment)
- # If this is an exact match for one of the features, it fails.
- if item in test.config.available_features:
- return True
+def parseIntegratedTestScriptCommands(source_path):
+ """
+ parseIntegratedTestScriptCommands(source_path) -> commands
- # If this is a part of the target triple, it fails.
- if item in test.suite.config.target_triple:
- return True
+ Parse the commands in an integrated test script file into a list of
+ (line_number, command_type, line).
+ """
- return False
+ # This code is carefully written to be dual compatible with Python 2.5+ and
+ # Python 3 without requiring input files to always have valid codings. The
+ # trick we use is to open the file in binary mode and use the regular
+ # expression library to find the commands, with it scanning strings in
+ # Python2 and bytes in Python3.
+ #
+ # Once we find a match, we do require each script line to be decodable to
+ # ascii, so we convert the outputs to ascii before returning. This way the
+ # remaining code can work with "strings" agnostic of the executing Python
+ # version.
+
+ def to_bytes(str):
+ # Encode to Latin1 to get binary data.
+ return str.encode('ISO-8859-1')
+ keywords = ('RUN:', 'XFAIL:', 'REQUIRES:', 'END.')
+ keywords_re = re.compile(
+ to_bytes("(%s)(.*)\n" % ("|".join(k for k in keywords),)))
+
+ f = open(source_path, 'rb')
+ try:
+ # Read the entire file contents.
+ data = f.read()
+
+ # Iterate over the matches.
+ line_number = 1
+ last_match_position = 0
+ for match in keywords_re.finditer(data):
+ # Compute the updated line number by counting the intervening
+ # newlines.
+ match_position = match.start()
+ line_number += data.count(to_bytes('\n'), last_match_position,
+ match_position)
+ last_match_position = match_position
+
+ # Convert the keyword and line to ascii strings and yield the
+ # command. Note that we take care to return regular strings in
+ # Python 2, to avoid other code having to differentiate between the
+ # str and unicode types.
+ keyword,ln = match.groups()
+ yield (line_number, str(keyword[:-1].decode('ascii')),
+ str(ln.decode('ascii')))
+ finally:
+ f.close()
def parseIntegratedTestScript(test, normalize_slashes=False,
extra_substitutions=[]):
@@ -336,8 +368,6 @@ def parseIntegratedTestScript(test, normalize_slashes=False,
execdir,execbase = os.path.split(execpath)
tmpDir = os.path.join(execdir, 'Output')
tmpBase = os.path.join(tmpDir, execbase)
- if test.index is not None:
- tmpBase += '_%d' % test.index
# Normalize slashes, if requested.
if normalize_slashes:
@@ -358,18 +388,21 @@ def parseIntegratedTestScript(test, normalize_slashes=False,
('%T', tmpDir),
('#_MARKER_#', '%')])
+ # "%/[STpst]" should be normalized.
+ substitutions.extend([
+ ('%/s', sourcepath.replace('\\', '/')),
+ ('%/S', sourcedir.replace('\\', '/')),
+ ('%/p', sourcedir.replace('\\', '/')),
+ ('%/t', tmpBase.replace('\\', '/') + '.tmp'),
+ ('%/T', tmpDir.replace('\\', '/')),
+ ])
+
# Collect the test lines from the script.
script = []
- xfails = []
requires = []
- line_number = 0
- for ln in open(sourcepath):
- line_number += 1
- if 'RUN:' in ln:
- # Isolate the command to run.
- index = ln.index('RUN:')
- ln = ln[index+4:]
-
+ for line_number, command_type, ln in \
+ parseIntegratedTestScriptCommands(sourcepath):
+ if command_type == 'RUN':
# Trim trailing whitespace.
ln = ln.rstrip()
@@ -387,16 +420,17 @@ def parseIntegratedTestScript(test, normalize_slashes=False,
script[-1] = script[-1][:-1] + ln
else:
script.append(ln)
- elif 'XFAIL:' in ln:
- items = ln[ln.index('XFAIL:') + 6:].split(',')
- xfails.extend([s.strip() for s in items])
- elif 'REQUIRES:' in ln:
- items = ln[ln.index('REQUIRES:') + 9:].split(',')
- requires.extend([s.strip() for s in items])
- elif 'END.' in ln:
- # Check for END. lines.
- if ln[ln.index('END.'):].strip() == 'END.':
+ elif command_type == 'XFAIL':
+ test.xfails.extend([s.strip() for s in ln.split(',')])
+ elif command_type == 'REQUIRES':
+ requires.extend([s.strip() for s in ln.split(',')])
+ elif command_type == 'END':
+ # END commands are only honored if the rest of the line is empty.
+ if not ln.strip():
break
+ else:
+ raise ValueError("unknown script command type: %r" % (
+ command_type,))
# Apply substitutions to the script. Allow full regular
# expression syntax. Replace each matching occurrence of regular
@@ -410,46 +444,27 @@ def parseIntegratedTestScript(test, normalize_slashes=False,
# Strip the trailing newline and any extra whitespace.
return ln.strip()
- script = map(processLine, script)
+ script = [processLine(ln)
+ for ln in script]
# Verify the script contains a run line.
if not script:
- return (Test.UNRESOLVED, "Test has no run line!")
+ return lit.Test.Result(Test.UNRESOLVED, "Test has no run line!")
# Check for unterminated run lines.
if script[-1][-1] == '\\':
- return (Test.UNRESOLVED, "Test has unterminated run lines (with '\\')")
+ return lit.Test.Result(Test.UNRESOLVED,
+ "Test has unterminated run lines (with '\\')")
# Check that we have the required features:
missing_required_features = [f for f in requires
if f not in test.config.available_features]
if missing_required_features:
msg = ', '.join(missing_required_features)
- return (Test.UNSUPPORTED,
- "Test requires the following features: %s" % msg)
-
- isXFail = isExpectedFail(test, xfails)
- return script,isXFail,tmpBase,execdir
-
-def formatTestOutput(status, out, err, exitCode, script):
- output = StringIO.StringIO()
- print >>output, "Script:"
- print >>output, "--"
- print >>output, '\n'.join(script)
- print >>output, "--"
- print >>output, "Exit Code: %r" % exitCode,
- print >>output
- if out:
- print >>output, "Command Output (stdout):"
- print >>output, "--"
- output.write(out)
- print >>output, "--"
- if err:
- print >>output, "Command Output (stderr):"
- print >>output, "--"
- output.write(err)
- print >>output, "--"
- return (status, output.getvalue())
+ return lit.Test.Result(Test.UNSUPPORTED,
+ "Test requires the following features: %s" % msg)
+
+ return script,tmpBase,execdir
def executeShTest(test, litConfig, useExternalSh,
extra_substitutions=[]):
@@ -457,39 +472,37 @@ def executeShTest(test, litConfig, useExternalSh,
return (Test.UNSUPPORTED, 'Test is unsupported')
res = parseIntegratedTestScript(test, useExternalSh, extra_substitutions)
- if len(res) == 2:
+ if isinstance(res, lit.Test.Result):
return res
-
- script, isXFail, tmpBase, execdir = res
-
if litConfig.noExecute:
- return (Test.PASS, '')
+ return lit.Test.Result(Test.PASS)
+
+ script, tmpBase, execdir = res
# Create the output directory if it does not already exist.
- Util.mkdir_p(os.path.dirname(tmpBase))
+ lit.util.mkdir_p(os.path.dirname(tmpBase))
if useExternalSh:
res = executeScript(test, litConfig, tmpBase, script, execdir)
else:
res = executeScriptInternal(test, litConfig, tmpBase, script, execdir)
- if len(res) == 2:
+ if isinstance(res, lit.Test.Result):
return res
out,err,exitCode = res
- if isXFail:
- ok = exitCode != 0
- if ok:
- status = Test.XFAIL
- else:
- status = Test.XPASS
+ if exitCode == 0:
+ status = Test.PASS
else:
- ok = exitCode == 0
- if ok:
- status = Test.PASS
- else:
- status = Test.FAIL
+ status = Test.FAIL
+
+ # Form the output log.
+ output = """Script:\n--\n%s\n--\nExit Code: %d\n\n""" % (
+ '\n'.join(script), exitCode)
- if ok:
- return (status,'')
+ # Append the outputs, if present.
+ if out:
+ output += """Command Output (stdout):\n--\n%s\n--\n""" % (out,)
+ if err:
+ output += """Command Output (stderr):\n--\n%s\n--\n""" % (err,)
- return formatTestOutput(status, out, err, exitCode, script)
+ return lit.Test.Result(status, output)
diff --git a/utils/lit/lit/TestingConfig.py b/utils/lit/lit/TestingConfig.py
index a1f79a3bfc4e2..4a34b77e175b9 100644
--- a/utils/lit/lit/TestingConfig.py
+++ b/utils/lit/lit/TestingConfig.py
@@ -1,85 +1,116 @@
import os
import sys
+PY2 = sys.version_info[0] < 3
+
class TestingConfig:
""""
TestingConfig - Information on the tests inside a suite.
"""
@staticmethod
- def frompath(path, parent, litConfig, mustExist, config = None):
- if config is None:
- # Set the environment based on the command line arguments.
- environment = {
- 'LIBRARY_PATH' : os.environ.get('LIBRARY_PATH',''),
- 'LD_LIBRARY_PATH' : os.environ.get('LD_LIBRARY_PATH',''),
- 'PATH' : os.pathsep.join(litConfig.path +
- [os.environ.get('PATH','')]),
- 'SYSTEMROOT' : os.environ.get('SYSTEMROOT',''),
- 'TERM' : os.environ.get('TERM',''),
- 'LLVM_DISABLE_CRASH_REPORT' : '1',
- }
-
- if sys.platform == 'win32':
- environment.update({
- 'INCLUDE' : os.environ.get('INCLUDE',''),
- 'PATHEXT' : os.environ.get('PATHEXT',''),
- 'PYTHONUNBUFFERED' : '1',
- 'TEMP' : os.environ.get('TEMP',''),
- 'TMP' : os.environ.get('TMP',''),
- })
-
- # Set the default available features based on the LitConfig.
- available_features = []
- if litConfig.useValgrind:
- available_features.append('valgrind')
- if litConfig.valgrindLeakCheck:
- available_features.append('vg_leak')
-
- config = TestingConfig(parent,
- name = '<unnamed>',
- suffixes = set(),
- test_format = None,
- environment = environment,
- substitutions = [],
- unsupported = False,
- on_clone = None,
- test_exec_root = None,
- test_source_root = None,
- excludes = [],
- available_features = available_features)
-
- if os.path.exists(path):
- # FIXME: Improve detection and error reporting of errors in the
- # config file.
- f = open(path)
- cfg_globals = dict(globals())
- cfg_globals['config'] = config
- cfg_globals['lit'] = litConfig
- cfg_globals['__file__'] = path
- try:
- exec f in cfg_globals
- if litConfig.debug:
- litConfig.note('... loaded config %r' % path)
- except SystemExit,status:
- # We allow normal system exit inside a config file to just
- # return control without error.
- if status.args:
- raise
- f.close()
- else:
- if mustExist:
- litConfig.fatal('unable to load config from %r ' % path)
- elif litConfig.debug:
- litConfig.note('... config not found - %r' %path)
+ def fromdefaults(litConfig):
+ """
+ fromdefaults(litConfig) -> TestingConfig
+
+ Create a TestingConfig object with default values.
+ """
+ # Set the environment based on the command line arguments.
+ environment = {
+ 'LIBRARY_PATH' : os.environ.get('LIBRARY_PATH',''),
+ 'LD_LIBRARY_PATH' : os.environ.get('LD_LIBRARY_PATH',''),
+ 'PATH' : os.pathsep.join(litConfig.path +
+ [os.environ.get('PATH','')]),
+ 'SYSTEMROOT' : os.environ.get('SYSTEMROOT',''),
+ 'TERM' : os.environ.get('TERM',''),
+ 'LLVM_DISABLE_CRASH_REPORT' : '1',
+ }
+
+ if sys.platform == 'win32':
+ environment.update({
+ 'INCLUDE' : os.environ.get('INCLUDE',''),
+ 'PATHEXT' : os.environ.get('PATHEXT',''),
+ 'PYTHONUNBUFFERED' : '1',
+ 'TEMP' : os.environ.get('TEMP',''),
+ 'TMP' : os.environ.get('TMP',''),
+ })
+
+ # The option to preserve TEMP, TMP, and TMPDIR.
+ # This is intended to check how many temporary files would be generated
+ # (and be not cleaned up) in automated builders.
+ if os.environ.has_key('LIT_PRESERVES_TMP'):
+ environment.update({
+ 'TEMP' : os.environ.get('TEMP',''),
+ 'TMP' : os.environ.get('TMP',''),
+ 'TMPDIR' : os.environ.get('TMPDIR',''),
+ })
+
+ # Set the default available features based on the LitConfig.
+ available_features = []
+ if litConfig.useValgrind:
+ available_features.append('valgrind')
+ if litConfig.valgrindLeakCheck:
+ available_features.append('vg_leak')
- config.finish(litConfig)
- return config
+ return TestingConfig(None,
+ name = '<unnamed>',
+ suffixes = set(),
+ test_format = None,
+ environment = environment,
+ substitutions = [],
+ unsupported = False,
+ test_exec_root = None,
+ test_source_root = None,
+ excludes = [],
+ available_features = available_features,
+ pipefail = True)
+
+ def load_from_path(self, path, litConfig):
+ """
+ load_from_path(path, litConfig)
+
+ Load the configuration module at the provided path into the given config
+ object.
+ """
+
+ # Load the config script data.
+ f = open(path)
+ try:
+ data = f.read()
+ except:
+ litConfig.fatal('unable to load config file: %r' % (path,))
+ f.close()
+
+ # Execute the config script to initialize the object.
+ cfg_globals = dict(globals())
+ cfg_globals['config'] = self
+ cfg_globals['lit_config'] = litConfig
+ cfg_globals['__file__'] = path
+ try:
+ if PY2:
+ exec("exec data in cfg_globals")
+ else:
+ exec(data, cfg_globals)
+ if litConfig.debug:
+ litConfig.note('... loaded config %r' % path)
+ except SystemExit:
+ e = sys.exc_info()[1]
+ # We allow normal system exit inside a config file to just
+ # return control without error.
+ if e.args:
+ raise
+ except:
+ import traceback
+ litConfig.fatal(
+ 'unable to parse config file %r, traceback: %s' % (
+ path, traceback.format_exc()))
+
+ self.finish(litConfig)
def __init__(self, parent, name, suffixes, test_format,
- environment, substitutions, unsupported, on_clone,
+ environment, substitutions, unsupported,
test_exec_root, test_source_root, excludes,
- available_features):
+ available_features, pipefail):
self.parent = parent
self.name = str(name)
self.suffixes = set(suffixes)
@@ -87,24 +118,11 @@ class TestingConfig:
self.environment = dict(environment)
self.substitutions = list(substitutions)
self.unsupported = unsupported
- self.on_clone = on_clone
self.test_exec_root = test_exec_root
self.test_source_root = test_source_root
self.excludes = set(excludes)
self.available_features = set(available_features)
-
- def clone(self, path):
- # FIXME: Chain implementations?
- #
- # FIXME: Allow extra parameters?
- cfg = TestingConfig(self, self.name, self.suffixes, self.test_format,
- self.environment, self.substitutions,
- self.unsupported, self.on_clone,
- self.test_exec_root, self.test_source_root,
- self.excludes, self.available_features)
- if cfg.on_clone:
- cfg.on_clone(self, cfg, path)
- return cfg
+ self.pipefail = pipefail
def finish(self, litConfig):
"""finish() - Finish this config object, after loading is complete."""
diff --git a/utils/lit/lit/__init__.py b/utils/lit/lit/__init__.py
index 3e61bbd770c87..3967fdd020a0c 100644
--- a/utils/lit/lit/__init__.py
+++ b/utils/lit/lit/__init__.py
@@ -1,10 +1,11 @@
"""'lit' Testing Tool"""
-from main import main
+from __future__ import absolute_import
+from .main import main
__author__ = 'Daniel Dunbar'
__email__ = 'daniel@zuster.org'
__versioninfo__ = (0, 3, 0)
-__version__ = '.'.join(map(str, __versioninfo__)) + 'dev'
+__version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev'
__all__ = []
diff --git a/utils/lit/lit/discovery.py b/utils/lit/lit/discovery.py
index 64a9510f955b3..c3c0f283b5582 100644
--- a/utils/lit/lit/discovery.py
+++ b/utils/lit/lit/discovery.py
@@ -2,9 +2,11 @@
Test discovery functions.
"""
+import copy
import os
import sys
+import lit.run
from lit.TestingConfig import TestingConfig
from lit import LitConfig, Test
@@ -38,11 +40,12 @@ def getTestSuite(item, litConfig, cache):
ts, relative = search(parent)
return (ts, relative + (base,))
- # We found a config file, load it.
+ # We found a test suite, create a new config for it and load it.
if litConfig.debug:
litConfig.note('loading suite config %r' % cfgpath)
- cfg = TestingConfig.frompath(cfgpath, None, litConfig, mustExist = True)
+ cfg = TestingConfig.fromdefaults(litConfig)
+ cfg.load_from_path(cfgpath, litConfig)
source_root = os.path.realpath(cfg.test_source_root or path)
exec_root = os.path.realpath(cfg.test_exec_root or path)
return Test.TestSuite(cfg.name, source_root, exec_root, cfg), ()
@@ -78,14 +81,21 @@ def getLocalConfig(ts, path_in_suite, litConfig, cache):
else:
parent = search(path_in_suite[:-1])
- # Load the local configuration.
+ # Check if there is a local configuration file.
source_path = ts.getSourcePath(path_in_suite)
cfgpath = os.path.join(source_path, litConfig.local_config_name)
+
+ # If not, just reuse the parent config.
+ if not os.path.exists(cfgpath):
+ return parent
+
+ # Otherwise, copy the current config and load the local configuration
+ # file into it.
+ config = copy.copy(parent)
if litConfig.debug:
litConfig.note('loading local config %r' % cfgpath)
- return TestingConfig.frompath(cfgpath, parent, litConfig,
- mustExist = False,
- config = parent.clone(cfgpath))
+ config.load_from_path(cfgpath, litConfig)
+ return config
def search(path_in_suite):
key = (ts, path_in_suite)
@@ -215,7 +225,7 @@ def find_tests_for_inputs(lit_config, inputs):
# If there were any errors during test discovery, exit now.
if lit_config.numErrors:
- print >>sys.stderr, '%d errors, exiting.' % lit_config.numErrors
+ sys.stderr.write('%d errors, exiting.\n' % lit_config.numErrors)
sys.exit(2)
return tests
@@ -233,13 +243,13 @@ def load_test_suite(inputs):
valgrindLeakCheck = False,
valgrindArgs = [],
noExecute = False,
- ignoreStdErr = False,
debug = False,
isWindows = (platform.system()=='Windows'),
params = {})
- tests = find_tests_for_inputs(litConfig, inputs)
+ # Perform test discovery.
+ run = lit.run.Run(litConfig, find_tests_for_inputs(litConfig, inputs))
# Return a unittest test suite which just runs the tests in order.
- return unittest.TestSuite([LitTestCase(test, litConfig) for test in tests])
-
+ return unittest.TestSuite([LitTestCase(test, run)
+ for test in run.tests])
diff --git a/utils/lit/lit/formats/__init__.py b/utils/lit/lit/formats/__init__.py
new file mode 100644
index 0000000000000..68627084176a5
--- /dev/null
+++ b/utils/lit/lit/formats/__init__.py
@@ -0,0 +1,4 @@
+from __future__ import absolute_import
+from lit.formats.base import TestFormat, FileBasedTest, OneCommandPerFileTest
+from lit.formats.googletest import GoogleTest
+from lit.formats.shtest import ShTest
diff --git a/utils/lit/lit/formats/base.py b/utils/lit/lit/formats/base.py
new file mode 100644
index 0000000000000..9e5420ba7672c
--- /dev/null
+++ b/utils/lit/lit/formats/base.py
@@ -0,0 +1,118 @@
+from __future__ import absolute_import
+import os
+import sys
+
+import lit.Test
+import lit.util
+
+class TestFormat(object):
+ pass
+
+###
+
+class FileBasedTest(TestFormat):
+ def getTestsInDirectory(self, testSuite, path_in_suite,
+ litConfig, localConfig):
+ source_path = testSuite.getSourcePath(path_in_suite)
+ for filename in os.listdir(source_path):
+ # Ignore dot files and excluded tests.
+ if (filename.startswith('.') or
+ filename in localConfig.excludes):
+ continue
+
+ filepath = os.path.join(source_path, filename)
+ if not os.path.isdir(filepath):
+ base,ext = os.path.splitext(filename)
+ if ext in localConfig.suffixes:
+ yield lit.Test.Test(testSuite, path_in_suite + (filename,),
+ localConfig)
+
+###
+
+import re
+import tempfile
+
+class OneCommandPerFileTest(TestFormat):
+ # FIXME: Refactor into generic test for running some command on a directory
+ # of inputs.
+
+ def __init__(self, command, dir, recursive=False,
+ pattern=".*", useTempInput=False):
+ if isinstance(command, str):
+ self.command = [command]
+ else:
+ self.command = list(command)
+ if dir is not None:
+ dir = str(dir)
+ self.dir = dir
+ self.recursive = bool(recursive)
+ self.pattern = re.compile(pattern)
+ self.useTempInput = useTempInput
+
+ def getTestsInDirectory(self, testSuite, path_in_suite,
+ litConfig, localConfig):
+ dir = self.dir
+ if dir is None:
+ dir = testSuite.getSourcePath(path_in_suite)
+
+ for dirname,subdirs,filenames in os.walk(dir):
+ if not self.recursive:
+ subdirs[:] = []
+
+ subdirs[:] = [d for d in subdirs
+ if (d != '.svn' and
+ d not in localConfig.excludes)]
+
+ for filename in filenames:
+ if (filename.startswith('.') or
+ not self.pattern.match(filename) or
+ filename in localConfig.excludes):
+ continue
+
+ path = os.path.join(dirname,filename)
+ suffix = path[len(dir):]
+ if suffix.startswith(os.sep):
+ suffix = suffix[1:]
+ test = lit.Test.Test(
+ testSuite, path_in_suite + tuple(suffix.split(os.sep)),
+ localConfig)
+ # FIXME: Hack?
+ test.source_path = path
+ yield test
+
+ def createTempInput(self, tmp, test):
+ abstract
+
+ def execute(self, test, litConfig):
+ if test.config.unsupported:
+ return (lit.Test.UNSUPPORTED, 'Test is unsupported')
+
+ cmd = list(self.command)
+
+ # If using temp input, create a temporary file and hand it to the
+ # subclass.
+ if self.useTempInput:
+ tmp = tempfile.NamedTemporaryFile(suffix='.cpp')
+ self.createTempInput(tmp, test)
+ tmp.flush()
+ cmd.append(tmp.name)
+ elif hasattr(test, 'source_path'):
+ cmd.append(test.source_path)
+ else:
+ cmd.append(test.getSourcePath())
+
+ out, err, exitCode = lit.util.executeCommand(cmd)
+
+ diags = out + err
+ if not exitCode and not diags.strip():
+ return lit.Test.PASS,''
+
+ # Try to include some useful information.
+ report = """Command: %s\n""" % ' '.join(["'%s'" % a
+ for a in cmd])
+ if self.useTempInput:
+ report += """Temporary File: %s\n""" % tmp.name
+ report += "--\n%s--\n""" % open(tmp.name).read()
+ report += """Output:\n--\n%s--""" % diags
+
+ return lit.Test.FAIL, report
diff --git a/utils/lit/lit/formats/googletest.py b/utils/lit/lit/formats/googletest.py
new file mode 100644
index 0000000000000..b77e184d2f6dc
--- /dev/null
+++ b/utils/lit/lit/formats/googletest.py
@@ -0,0 +1,114 @@
+from __future__ import absolute_import
+import os
+import sys
+
+import lit.Test
+import lit.TestRunner
+import lit.util
+from .base import TestFormat
+
+kIsWindows = sys.platform in ['win32', 'cygwin']
+
+class GoogleTest(TestFormat):
+ def __init__(self, test_sub_dir, test_suffix):
+ self.test_sub_dir = os.path.normcase(str(test_sub_dir)).split(';')
+ self.test_suffix = str(test_suffix)
+
+ # On Windows, assume tests will also end in '.exe'.
+ if kIsWindows:
+ self.test_suffix += '.exe'
+
+ def getGTestTests(self, path, litConfig, localConfig):
+ """getGTestTests(path) - [name]
+
+ Return the tests available in gtest executable.
+
+ Args:
+ path: String path to a gtest executable
+ litConfig: LitConfig instance
+ localConfig: TestingConfig instance"""
+
+ try:
+ lines = lit.util.capture([path, '--gtest_list_tests'],
+ env=localConfig.environment)
+ lines = lines.decode('ascii')
+ if kIsWindows:
+ lines = lines.replace('\r', '')
+ lines = lines.split('\n')
+ except:
+ litConfig.error("unable to discover google-tests in %r" % path)
+ raise StopIteration
+
+ nested_tests = []
+ for ln in lines:
+ if not ln.strip():
+ continue
+
+ prefix = ''
+ index = 0
+ while ln[index*2:index*2+2] == ' ':
+ index += 1
+ while len(nested_tests) > index:
+ nested_tests.pop()
+
+ ln = ln[index*2:]
+ if ln.endswith('.'):
+ nested_tests.append(ln)
+ else:
+ yield ''.join(nested_tests) + ln
+
+ # Note: path_in_suite should not include the executable name.
+ def getTestsInExecutable(self, testSuite, path_in_suite, execpath,
+ litConfig, localConfig):
+ if not execpath.endswith(self.test_suffix):
+ return
+ (dirname, basename) = os.path.split(execpath)
+ # Discover the tests in this executable.
+ for testname in self.getGTestTests(execpath, litConfig, localConfig):
+ testPath = path_in_suite + (basename, testname)
+ yield lit.Test.Test(testSuite, testPath, localConfig)
+
+ def getTestsInDirectory(self, testSuite, path_in_suite,
+ litConfig, localConfig):
+ source_path = testSuite.getSourcePath(path_in_suite)
+ for filename in os.listdir(source_path):
+ filepath = os.path.join(source_path, filename)
+ if os.path.isdir(filepath):
+ # Iterate over executables in a directory.
+ if not os.path.normcase(filename) in self.test_sub_dir:
+ continue
+ dirpath_in_suite = path_in_suite + (filename, )
+ for subfilename in os.listdir(filepath):
+ execpath = os.path.join(filepath, subfilename)
+ for test in self.getTestsInExecutable(
+ testSuite, dirpath_in_suite, execpath,
+ litConfig, localConfig):
+ yield test
+ elif ('.' in self.test_sub_dir):
+ for test in self.getTestsInExecutable(
+ testSuite, path_in_suite, filepath,
+ litConfig, localConfig):
+ yield test
+
+ def execute(self, test, litConfig):
+ testPath,testName = os.path.split(test.getSourcePath())
+ while not os.path.exists(testPath):
+ # Handle GTest parametrized and typed tests, whose name includes
+ # some '/'s.
+ testPath, namePrefix = os.path.split(testPath)
+ testName = os.path.join(namePrefix, testName)
+
+ cmd = [testPath, '--gtest_filter=' + testName]
+ if litConfig.useValgrind:
+ cmd = litConfig.valgrindArgs + cmd
+
+ if litConfig.noExecute:
+ return lit.Test.PASS, ''
+
+ out, err, exitCode = lit.util.executeCommand(
+ cmd, env=test.config.environment)
+
+ if not exitCode:
+ return lit.Test.PASS,''
+
+ return lit.Test.FAIL, out + err
diff --git a/utils/lit/lit/formats/shtest.py b/utils/lit/lit/formats/shtest.py
new file mode 100644
index 0000000000000..30a6a3310b011
--- /dev/null
+++ b/utils/lit/lit/formats/shtest.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import
+
+import lit.TestRunner
+from .base import FileBasedTest
+
+class ShTest(FileBasedTest):
+ def __init__(self, execute_external = False):
+ self.execute_external = execute_external
+
+ def execute(self, test, litConfig):
+ return lit.TestRunner.executeShTest(test, litConfig,
+ self.execute_external)
diff --git a/utils/lit/lit/main.py b/utils/lit/lit/main.py
index de97a8e1aaf68..6f672a01eb3df 100755
--- a/utils/lit/lit/main.py
+++ b/utils/lit/lit/main.py
@@ -6,39 +6,24 @@ lit - LLVM Integrated Tester.
See lit.pod for more information.
"""
-import math, os, platform, random, re, sys, time, threading, traceback
-
-import ProgressBar
-import TestRunner
-import Util
-
-import LitConfig
-import Test
-
+from __future__ import absolute_import
+import math, os, platform, random, re, sys, time
+
+import lit.ProgressBar
+import lit.LitConfig
+import lit.Test
+import lit.run
+import lit.util
import lit.discovery
-class TestingProgressDisplay:
+class TestingProgressDisplay(object):
def __init__(self, opts, numTests, progressBar=None):
self.opts = opts
self.numTests = numTests
self.current = None
- self.lock = threading.Lock()
self.progressBar = progressBar
self.completed = 0
- def update(self, test):
- # Avoid locking overhead in quiet mode
- if self.opts.quiet and not test.result.isFailure:
- self.completed += 1
- return
-
- # Output lock.
- self.lock.acquire()
- try:
- self.handleUpdate(test)
- finally:
- self.lock.release()
-
def finish(self):
if self.progressBar:
self.progressBar.clear()
@@ -47,114 +32,86 @@ class TestingProgressDisplay:
elif self.opts.succinct:
sys.stdout.write('\n')
- def handleUpdate(self, test):
+ def update(self, test):
self.completed += 1
if self.progressBar:
self.progressBar.update(float(self.completed)/self.numTests,
test.getFullName())
- if self.opts.succinct and not test.result.isFailure:
+ if not test.result.code.isFailure and \
+ (self.opts.quiet or self.opts.succinct):
return
if self.progressBar:
self.progressBar.clear()
- print '%s: %s (%d of %d)' % (test.result.name, test.getFullName(),
- self.completed, self.numTests)
-
- if test.result.isFailure and self.opts.showOutput:
- print "%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
- '*'*20)
- print test.output
- print "*" * 20
-
+ # Show the test result line.
+ test_name = test.getFullName()
+ print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
+ self.completed, self.numTests))
+
+ # Show the test failure output, if requested.
+ if test.result.code.isFailure and self.opts.showOutput:
+ print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
+ '*'*20))
+ print(test.result.output)
+ print("*" * 20)
+
+ # Report test metrics, if present.
+ if test.result.metrics:
+ print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
+ '*'*10))
+ items = sorted(test.result.metrics.items())
+ for metric_name, value in items:
+ print('%s: %s ' % (metric_name, value.format()))
+ print("*" * 10)
+
+ # Ensure the output is flushed.
sys.stdout.flush()
-class TestProvider:
- def __init__(self, tests, maxTime):
- self.maxTime = maxTime
- self.iter = iter(tests)
- self.lock = threading.Lock()
- self.startTime = time.time()
-
- def get(self):
- # Check if we have run out of time.
- if self.maxTime is not None:
- if time.time() - self.startTime > self.maxTime:
- return None
-
- # Otherwise take the next test.
- self.lock.acquire()
- try:
- item = self.iter.next()
- except StopIteration:
- item = None
- self.lock.release()
- return item
-
-class Tester(threading.Thread):
- def __init__(self, litConfig, provider, display):
- threading.Thread.__init__(self)
- self.litConfig = litConfig
- self.provider = provider
- self.display = display
-
- def run(self):
- while 1:
- item = self.provider.get()
- if item is None:
- break
- self.runTest(item)
-
- def runTest(self, test):
- result = None
- startTime = time.time()
- try:
- result, output = test.config.test_format.execute(test,
- self.litConfig)
- except KeyboardInterrupt:
- # This is a sad hack. Unfortunately subprocess goes
- # bonkers with ctrl-c and we start forking merrily.
- print '\nCtrl-C detected, goodbye.'
- os.kill(0,9)
- except:
- if self.litConfig.debug:
- raise
- result = Test.UNRESOLVED
- output = 'Exception during script execution:\n'
- output += traceback.format_exc()
- output += '\n'
- elapsed = time.time() - startTime
-
- test.setResult(result, output, elapsed)
- self.display.update(test)
-
-def runTests(numThreads, litConfig, provider, display):
- # If only using one testing thread, don't use threads at all; this lets us
- # profile, among other things.
- if numThreads == 1:
- t = Tester(litConfig, provider, display)
- t.run()
- return
-
- # Otherwise spin up the testing threads and wait for them to finish.
- testers = [Tester(litConfig, provider, display)
- for i in range(numThreads)]
- for t in testers:
- t.start()
+def write_test_results(run, lit_config, testing_time, output_path):
try:
- for t in testers:
- t.join()
- except KeyboardInterrupt:
- sys.exit(2)
+ import json
+ except ImportError:
+ lit_config.fatal('test output unsupported with Python 2.5')
+
+ # Construct the data we will write.
+ data = {}
+ # Encode the current lit version as a schema version.
+ data['__version__'] = lit.__versioninfo__
+ data['elapsed'] = testing_time
+ # FIXME: Record some information on the lit configuration used?
+ # FIXME: Record information from the individual test suites?
+
+ # Encode the tests.
+ data['tests'] = tests_data = []
+ for test in run.tests:
+ test_data = {
+ 'name' : test.getFullName(),
+ 'code' : test.result.code.name,
+ 'output' : test.result.output,
+ 'elapsed' : test.result.elapsed }
+
+ # Add test metrics, if present.
+ if test.result.metrics:
+ test_data['metrics'] = metrics_data = {}
+ for key, value in test.result.metrics.items():
+ metrics_data[key] = value.todata()
+
+ tests_data.append(test_data)
+
+ # Write the output.
+ f = open(output_path, 'w')
+ try:
+ json.dump(data, f, indent=2, sort_keys=True)
+ f.write('\n')
+ finally:
+ f.close()
def main(builtinParameters = {}):
- # Bump the GIL check interval, its more important to get any one thread to a
- # blocking operation (hopefully exec) than to try and unblock other threads.
- #
- # FIXME: This is a hack.
- import sys
- sys.setcheckinterval(1000)
+ # Use processes by default on Unix platforms.
+ isWindows = platform.system() == 'Windows'
+ useProcessesIsDefault = not isWindows
global options
from optparse import OptionParser, OptionGroup
@@ -183,6 +140,9 @@ def main(builtinParameters = {}):
group.add_option("-v", "--verbose", dest="showOutput",
help="Show all test output",
action="store_true", default=False)
+ group.add_option("-o", "--output", dest="output_path",
+ help="Write test results to the provided path",
+ action="store", type=str, metavar="PATH")
group.add_option("", "--no-progress-bar", dest="useProgressBar",
help="Do not use curses based progress bar",
action="store_false", default=True)
@@ -232,9 +192,15 @@ def main(builtinParameters = {}):
group.add_option("", "--show-suites", dest="showSuites",
help="Show discovered test suites",
action="store_true", default=False)
- group.add_option("", "--repeat", dest="repeatTests", metavar="N",
- help="Repeat tests N times (for timing)",
- action="store", default=None, type=int)
+ group.add_option("", "--show-tests", dest="showTests",
+ help="Show all discovered tests",
+ action="store_true", default=False)
+ group.add_option("", "--use-processes", dest="useProcesses",
+ help="Run tests in parallel with processes (not threads)",
+ action="store_true", default=useProcessesIsDefault)
+ group.add_option("", "--use-threads", dest="useProcesses",
+ help="Run tests in parallel with threads (not processes)",
+ action="store_false", default=useProcessesIsDefault)
parser.add_option_group(group)
(opts, args) = parser.parse_args()
@@ -248,7 +214,7 @@ def main(builtinParameters = {}):
# I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
# threads by default there.
if sys.hexversion >= 0x2050200:
- opts.numThreads = Util.detectCPUs()
+ opts.numThreads = lit.util.detectCPUs()
else:
opts.numThreads = 1
@@ -264,38 +230,54 @@ def main(builtinParameters = {}):
userParams[name] = val
# Create the global config object.
- litConfig = LitConfig.LitConfig(progname = os.path.basename(sys.argv[0]),
- path = opts.path,
- quiet = opts.quiet,
- useValgrind = opts.useValgrind,
- valgrindLeakCheck = opts.valgrindLeakCheck,
- valgrindArgs = opts.valgrindArgs,
- noExecute = opts.noExecute,
- ignoreStdErr = False,
- debug = opts.debug,
- isWindows = (platform.system()=='Windows'),
- params = userParams,
- config_prefix = opts.configPrefix)
-
- tests = lit.discovery.find_tests_for_inputs(litConfig, inputs)
-
- if opts.showSuites:
+ litConfig = lit.LitConfig.LitConfig(
+ progname = os.path.basename(sys.argv[0]),
+ path = opts.path,
+ quiet = opts.quiet,
+ useValgrind = opts.useValgrind,
+ valgrindLeakCheck = opts.valgrindLeakCheck,
+ valgrindArgs = opts.valgrindArgs,
+ noExecute = opts.noExecute,
+ debug = opts.debug,
+ isWindows = isWindows,
+ params = userParams,
+ config_prefix = opts.configPrefix)
+
+ # Perform test discovery.
+ run = lit.run.Run(litConfig,
+ lit.discovery.find_tests_for_inputs(litConfig, inputs))
+
+ if opts.showSuites or opts.showTests:
+ # Aggregate the tests by suite.
suitesAndTests = {}
- for t in tests:
+ for t in run.tests:
if t.suite not in suitesAndTests:
suitesAndTests[t.suite] = []
suitesAndTests[t.suite].append(t)
-
- print '-- Test Suites --'
- suitesAndTests = suitesAndTests.items()
- suitesAndTests.sort(key = lambda (ts,_): ts.name)
- for ts,ts_tests in suitesAndTests:
- print ' %s - %d tests' %(ts.name, len(ts_tests))
- print ' Source Root: %s' % ts.source_root
- print ' Exec Root : %s' % ts.exec_root
+ suitesAndTests = list(suitesAndTests.items())
+ suitesAndTests.sort(key = lambda item: item[0].name)
+
+ # Show the suites, if requested.
+ if opts.showSuites:
+ print('-- Test Suites --')
+ for ts,ts_tests in suitesAndTests:
+ print(' %s - %d tests' %(ts.name, len(ts_tests)))
+ print(' Source Root: %s' % ts.source_root)
+ print(' Exec Root : %s' % ts.exec_root)
+
+ # Show the tests, if requested.
+ if opts.showTests:
+ print('-- Available Tests --')
+ for ts,ts_tests in suitesAndTests:
+ ts_tests.sort(key = lambda test: test.path_in_suite)
+ for test in ts_tests:
+ print(' %s' % (test.getFullName(),))
+
+ # Exit.
+ sys.exit(0)
# Select and order the tests.
- numTotalTests = len(tests)
+ numTotalTests = len(run.tests)
# First, select based on the filter expression if given.
if opts.filter:
@@ -304,113 +286,106 @@ def main(builtinParameters = {}):
except:
parser.error("invalid regular expression for --filter: %r" % (
opts.filter))
- tests = [t for t in tests
- if rex.search(t.getFullName())]
+ run.tests = [t for t in run.tests
+ if rex.search(t.getFullName())]
# Then select the order.
if opts.shuffle:
- random.shuffle(tests)
+ random.shuffle(run.tests)
else:
- tests.sort(key = lambda t: t.getFullName())
+ run.tests.sort(key = lambda t: t.getFullName())
# Finally limit the number of tests, if desired.
if opts.maxTests is not None:
- tests = tests[:opts.maxTests]
+ run.tests = run.tests[:opts.maxTests]
# Don't create more threads than tests.
- opts.numThreads = min(len(tests), opts.numThreads)
+ opts.numThreads = min(len(run.tests), opts.numThreads)
extra = ''
- if len(tests) != numTotalTests:
+ if len(run.tests) != numTotalTests:
extra = ' of %d' % numTotalTests
- header = '-- Testing: %d%s tests, %d threads --'%(len(tests),extra,
+ header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
opts.numThreads)
- if opts.repeatTests:
- tests = [t.copyWithIndex(i)
- for t in tests
- for i in range(opts.repeatTests)]
-
progressBar = None
if not opts.quiet:
if opts.succinct and opts.useProgressBar:
try:
- tc = ProgressBar.TerminalController()
- progressBar = ProgressBar.ProgressBar(tc, header)
+ tc = lit.ProgressBar.TerminalController()
+ progressBar = lit.ProgressBar.ProgressBar(tc, header)
except ValueError:
- print header
- progressBar = ProgressBar.SimpleProgressBar('Testing: ')
+ print(header)
+ progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
else:
- print header
+ print(header)
startTime = time.time()
- display = TestingProgressDisplay(opts, len(tests), progressBar)
- provider = TestProvider(tests, opts.maxTime)
- runTests(opts.numThreads, litConfig, provider, display)
+ display = TestingProgressDisplay(opts, len(run.tests), progressBar)
+ try:
+ run.execute_tests(display, opts.numThreads, opts.maxTime,
+ opts.useProcesses)
+ except KeyboardInterrupt:
+ sys.exit(2)
display.finish()
+ testing_time = time.time() - startTime
if not opts.quiet:
- print 'Testing Time: %.2fs'%(time.time() - startTime)
+ print('Testing Time: %.2fs' % (testing_time,))
- # Update results for any tests which weren't run.
- for t in tests:
- if t.result is None:
- t.setResult(Test.UNRESOLVED, '', 0.0)
+ # Write out the test data, if requested.
+ if opts.output_path is not None:
+ write_test_results(run, litConfig, testing_time, opts.output_path)
# List test results organized by kind.
hasFailures = False
byCode = {}
- for t in tests:
- if t.result not in byCode:
- byCode[t.result] = []
- byCode[t.result].append(t)
- if t.result.isFailure:
+ for test in run.tests:
+ if test.result.code not in byCode:
+ byCode[test.result.code] = []
+ byCode[test.result.code].append(test)
+ if test.result.code.isFailure:
hasFailures = True
- # FIXME: Show unresolved and (optionally) unsupported tests.
- for title,code in (('Unexpected Passing Tests', Test.XPASS),
- ('Failing Tests', Test.FAIL)):
+ # Print each test in any of the failing groups.
+ for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
+ ('Failing Tests', lit.Test.FAIL),
+ ('Unresolved Tests', lit.Test.UNRESOLVED)):
elts = byCode.get(code)
if not elts:
continue
- print '*'*20
- print '%s (%d):' % (title, len(elts))
- for t in elts:
- print ' %s' % t.getFullName()
- print
-
- if opts.timeTests:
- # Collate, in case we repeated tests.
- times = {}
- for t in tests:
- key = t.getFullName()
- times[key] = times.get(key, 0.) + t.elapsed
-
- byTime = list(times.items())
- byTime.sort(key = lambda (name,elapsed): elapsed)
- if byTime:
- Util.printHistogram(byTime, title='Tests')
-
- for name,code in (('Expected Passes ', Test.PASS),
- ('Expected Failures ', Test.XFAIL),
- ('Unsupported Tests ', Test.UNSUPPORTED),
- ('Unresolved Tests ', Test.UNRESOLVED),
- ('Unexpected Passes ', Test.XPASS),
- ('Unexpected Failures', Test.FAIL),):
+ print('*'*20)
+ print('%s (%d):' % (title, len(elts)))
+ for test in elts:
+ print(' %s' % test.getFullName())
+ sys.stdout.write('\n')
+
+ if opts.timeTests and run.tests:
+ # Order by time.
+ test_times = [(test.getFullName(), test.result.elapsed)
+ for test in run.tests]
+ lit.util.printHistogram(test_times, title='Tests')
+
+ for name,code in (('Expected Passes ', lit.Test.PASS),
+ ('Expected Failures ', lit.Test.XFAIL),
+ ('Unsupported Tests ', lit.Test.UNSUPPORTED),
+ ('Unresolved Tests ', lit.Test.UNRESOLVED),
+ ('Unexpected Passes ', lit.Test.XPASS),
+ ('Unexpected Failures', lit.Test.FAIL),):
if opts.quiet and not code.isFailure:
continue
N = len(byCode.get(code,[]))
if N:
- print ' %s: %d' % (name,N)
+ print(' %s: %d' % (name,N))
# If we encountered any additional errors, exit abnormally.
if litConfig.numErrors:
- print >>sys.stderr, '\n%d error(s), exiting.' % litConfig.numErrors
+ sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
sys.exit(2)
# Warn about warnings.
if litConfig.numWarnings:
- print >>sys.stderr, '\n%d warning(s) in tests.' % litConfig.numWarnings
+ sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
if hasFailures:
sys.exit(1)
diff --git a/utils/lit/lit/run.py b/utils/lit/lit/run.py
new file mode 100644
index 0000000000000..27c414d6dd65e
--- /dev/null
+++ b/utils/lit/lit/run.py
@@ -0,0 +1,277 @@
+import os
+import threading
+import time
+import traceback
+try:
+ import Queue as queue
+except ImportError:
+ import queue
+
+try:
+ import win32api
+except ImportError:
+ win32api = None
+
+try:
+ import multiprocessing
+except ImportError:
+ multiprocessing = None
+
+import lit.Test
+
+###
+# Test Execution Implementation
+
+class LockedValue(object):
+ def __init__(self, value):
+ self.lock = threading.Lock()
+ self._value = value
+
+ def _get_value(self):
+ self.lock.acquire()
+ try:
+ return self._value
+ finally:
+ self.lock.release()
+
+ def _set_value(self, value):
+ self.lock.acquire()
+ try:
+ self._value = value
+ finally:
+ self.lock.release()
+
+ value = property(_get_value, _set_value)
+
+class TestProvider(object):
+ def __init__(self, tests, num_jobs, queue_impl, canceled_flag):
+ self.canceled_flag = canceled_flag
+
+ # Create a shared queue to provide the test indices.
+ self.queue = queue_impl()
+ for i in range(len(tests)):
+ self.queue.put(i)
+ for i in range(num_jobs):
+ self.queue.put(None)
+
+ def cancel(self):
+ self.canceled_flag.value = 1
+
+ def get(self):
+ # Check if we are canceled.
+ if self.canceled_flag.value:
+ return None
+
+ # Otherwise take the next test.
+ return self.queue.get()
+
+class Tester(object):
+ def __init__(self, run_instance, provider, consumer):
+ self.run_instance = run_instance
+ self.provider = provider
+ self.consumer = consumer
+
+ def run(self):
+ while True:
+ item = self.provider.get()
+ if item is None:
+ break
+ self.run_test(item)
+ self.consumer.task_finished()
+
+ def run_test(self, test_index):
+ test = self.run_instance.tests[test_index]
+ try:
+ self.run_instance.execute_test(test)
+ except KeyboardInterrupt:
+ # This is a sad hack. Unfortunately subprocess goes
+ # bonkers with ctrl-c and we start forking merrily.
+ print('\nCtrl-C detected, goodbye.')
+ os.kill(0,9)
+ self.consumer.update(test_index, test)
+
+class ThreadResultsConsumer(object):
+ def __init__(self, display):
+ self.display = display
+ self.lock = threading.Lock()
+
+ def update(self, test_index, test):
+ self.lock.acquire()
+ try:
+ self.display.update(test)
+ finally:
+ self.lock.release()
+
+ def task_finished(self):
+ pass
+
+ def handle_results(self):
+ pass
+
+class MultiprocessResultsConsumer(object):
+ def __init__(self, run, display, num_jobs):
+ self.run = run
+ self.display = display
+ self.num_jobs = num_jobs
+ self.queue = multiprocessing.Queue()
+
+ def update(self, test_index, test):
+ # This method is called in the child processes, and communicates the
+ # results to the actual display implementation via an output queue.
+ self.queue.put((test_index, test.result))
+
+ def task_finished(self):
+ # This method is called in the child processes, and communicates that
+ # individual tasks are complete.
+ self.queue.put(None)
+
+ def handle_results(self):
+ # This method is called in the parent, and consumes the results from the
+ # output queue and dispatches to the actual display. The method will
+ # complete after each of num_jobs tasks has signalled completion.
+ completed = 0
+ while completed != self.num_jobs:
+ # Wait for a result item.
+ item = self.queue.get()
+ if item is None:
+ completed += 1
+ continue
+
+ # Update the test result in the parent process.
+ index,result = item
+ test = self.run.tests[index]
+ test.result = result
+
+ self.display.update(test)
+
+def run_one_tester(run, provider, display):
+ tester = Tester(run, provider, display)
+ tester.run()
+
+###
+
+class Run(object):
+ """
+ This class represents a concrete, configured testing run.
+ """
+
+ def __init__(self, lit_config, tests):
+ self.lit_config = lit_config
+ self.tests = tests
+
+ def execute_test(self, test):
+ result = None
+ start_time = time.time()
+ try:
+ result = test.config.test_format.execute(test, self.lit_config)
+
+ # Support deprecated result from execute() which returned the result
+ # code and additional output as a tuple.
+ if isinstance(result, tuple):
+ code, output = result
+ result = lit.Test.Result(code, output)
+ elif not isinstance(result, lit.Test.Result):
+ raise ValueError("unexpected result from test execution")
+ except KeyboardInterrupt:
+ raise
+ except:
+ if self.lit_config.debug:
+ raise
+ output = 'Exception during script execution:\n'
+ output += traceback.format_exc()
+ output += '\n'
+ result = lit.Test.Result(lit.Test.UNRESOLVED, output)
+ result.elapsed = time.time() - start_time
+
+ test.setResult(result)
+
+ def execute_tests(self, display, jobs, max_time=None,
+ use_processes=False):
+ """
+ execute_tests(display, jobs, [max_time])
+
+ Execute each of the tests in the run, using up to jobs number of
+ parallel tasks, and inform the display of each individual result. The
+ provided tests should be a subset of the tests available in this run
+ object.
+
+ If max_time is non-None, it should be a time in seconds after which to
+ stop executing tests.
+
+ The display object will have its update method called with each test as
+ it is completed. The calls are guaranteed to be locked with respect to
+ one another, but are *not* guaranteed to be called on the same thread as
+ this method was invoked on.
+
+ Upon completion, each test in the run will have its result
+ computed. Tests which were not actually executed (for any reason) will
+ be given an UNRESOLVED result.
+ """
+
+ # Choose the appropriate parallel execution implementation.
+ consumer = None
+ if jobs != 1 and use_processes and multiprocessing:
+ try:
+ task_impl = multiprocessing.Process
+ queue_impl = multiprocessing.Queue
+ canceled_flag = multiprocessing.Value('i', 0)
+ consumer = MultiprocessResultsConsumer(self, display, jobs)
+ except:
+ # multiprocessing fails to initialize with certain OpenBSD and
+ # FreeBSD Python versions: http://bugs.python.org/issue3770
+ # Unfortunately the error raised also varies by platform.
+ self.lit_config.note('failed to initialize multiprocessing')
+ consumer = None
+ if not consumer:
+ task_impl = threading.Thread
+ queue_impl = queue.Queue
+ canceled_flag = LockedValue(0)
+ consumer = ThreadResultsConsumer(display)
+
+ # Create the test provider.
+ provider = TestProvider(self.tests, jobs, queue_impl, canceled_flag)
+
+ # Install a console-control signal handler on Windows.
+ if win32api is not None:
+ def console_ctrl_handler(type):
+ provider.cancel()
+ return True
+ win32api.SetConsoleCtrlHandler(console_ctrl_handler, True)
+
+ # Install a timeout handler, if requested.
+ if max_time is not None:
+ def timeout_handler():
+ provider.cancel()
+ timeout_timer = threading.Timer(max_time, timeout_handler)
+ timeout_timer.start()
+
+ # If not using multiple tasks, just run the tests directly.
+ if jobs == 1:
+ run_one_tester(self, provider, consumer)
+ else:
+ # Otherwise, execute the tests in parallel
+ self._execute_tests_in_parallel(task_impl, provider, consumer, jobs)
+
+ # Cancel the timeout handler.
+ if max_time is not None:
+ timeout_timer.cancel()
+
+ # Update results for any tests which weren't run.
+ for test in self.tests:
+ if test.result is None:
+ test.setResult(lit.Test.Result(lit.Test.UNRESOLVED, '', 0.0))
+
+ def _execute_tests_in_parallel(self, task_impl, provider, consumer, jobs):
+ # Start all of the tasks.
+ tasks = [task_impl(target=run_one_tester,
+ args=(self, provider, consumer))
+ for i in range(jobs)]
+ for t in tasks:
+ t.start()
+
+ # Allow the consumer to handle results, if necessary.
+ consumer.handle_results()
+
+ # Wait for all the tasks to complete.
+ for t in tasks:
+ t.join()
diff --git a/utils/lit/lit/Util.py b/utils/lit/lit/util.py
index f29480900ce76..2b1010c1870ce 100644
--- a/utils/lit/lit/Util.py
+++ b/utils/lit/lit/util.py
@@ -1,4 +1,11 @@
-import os, sys
+import errno
+import itertools
+import math
+import os
+import platform
+import signal
+import subprocess
+import sys
def detectCPUs():
"""
@@ -6,7 +13,7 @@ def detectCPUs():
"""
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
- if os.sysconf_names.has_key("SC_NPROCESSORS_ONLN"):
+ if "SC_NPROCESSORS_ONLN" in os.sysconf_names:
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
@@ -14,7 +21,7 @@ def detectCPUs():
else: # OSX:
return int(capture(['sysctl', '-n', 'hw.ncpu']))
# Windows:
- if os.environ.has_key("NUMBER_OF_PROCESSORS"):
+ if "NUMBER_OF_PROCESSORS" in os.environ:
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"])
if ncpus > 0:
return ncpus
@@ -23,8 +30,6 @@ def detectCPUs():
def mkdir_p(path):
"""mkdir_p(path) - Make the "path" directory, if it does not exist; this
will also make directories for any missing parent directories."""
- import errno
-
if not path or os.path.exists(path):
return
@@ -34,13 +39,13 @@ def mkdir_p(path):
try:
os.mkdir(path)
- except OSError,e:
+ except OSError:
+ e = sys.exc_info()[1]
# Ignore EEXIST, which may occur during a race condition.
if e.errno != errno.EEXIST:
raise
def capture(args, env=None):
- import subprocess
"""capture(command) - Run the given command (or argv list) in a shell and
return the standard output."""
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
@@ -92,9 +97,7 @@ def whichTools(tools, paths):
return None
def printHistogram(items, title = 'Items'):
- import itertools, math
-
- items.sort(key = lambda (_,v): v)
+ items.sort(key = lambda item: item[1])
maxValue = max([v for _,v in items])
@@ -115,27 +118,52 @@ def printHistogram(items, title = 'Items'):
barW = 40
hr = '-' * (barW + 34)
- print '\nSlowest %s:' % title
- print hr
+ print('\nSlowest %s:' % title)
+ print(hr)
for name,value in items[-20:]:
- print '%.2fs: %s' % (value, name)
- print '\n%s Times:' % title
- print hr
+ print('%.2fs: %s' % (value, name))
+ print('\n%s Times:' % title)
+ print(hr)
pDigits = int(math.ceil(math.log(maxValue, 10)))
pfDigits = max(0, 3-pDigits)
if pfDigits:
pDigits += pfDigits + 1
cDigits = int(math.ceil(math.log(len(items), 10)))
- print "[%s] :: [%s] :: [%s]" % ('Range'.center((pDigits+1)*2 + 3),
+ print("[%s] :: [%s] :: [%s]" % ('Range'.center((pDigits+1)*2 + 3),
'Percentage'.center(barW),
- 'Count'.center(cDigits*2 + 1))
- print hr
+ 'Count'.center(cDigits*2 + 1)))
+ print(hr)
for i,row in enumerate(histo):
pct = float(len(row)) / len(items)
w = int(barW * pct)
- print "[%*.*fs,%*.*fs)" % (pDigits, pfDigits, i*barH,
- pDigits, pfDigits, (i+1)*barH),
- print ":: [%s%s] :: [%*d/%*d]" % ('*'*w, ' '*(barW-w),
- cDigits, len(row),
- cDigits, len(items))
+ print("[%*.*fs,%*.*fs) :: [%s%s] :: [%*d/%*d]" % (
+ pDigits, pfDigits, i*barH, pDigits, pfDigits, (i+1)*barH,
+ '*'*w, ' '*(barW-w), cDigits, len(row), cDigits, len(items)))
+
+# Close extra file handles on UNIX (on Windows this cannot be done while
+# also redirecting input).
+kUseCloseFDs = not (platform.system() == 'Windows')
+def executeCommand(command, cwd=None, env=None):
+ p = subprocess.Popen(command, cwd=cwd,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=env, close_fds=kUseCloseFDs)
+ out,err = p.communicate()
+ exitCode = p.wait()
+
+ # Detect Ctrl-C in subprocess.
+ if exitCode == -signal.SIGINT:
+ raise KeyboardInterrupt
+
+ # Ensure the resulting output is always of string type.
+ try:
+ out = str(out.decode('ascii'))
+ except:
+ out = str(out)
+ try:
+ err = str(err.decode('ascii'))
+ except:
+ err = str(err)
+ return out, err, exitCode
diff --git a/utils/lit/setup.py b/utils/lit/setup.py
index a94e6ea833e96..10de6bb4ee79b 100644
--- a/utils/lit/setup.py
+++ b/utils/lit/setup.py
@@ -1,7 +1,14 @@
import lit
+import os
-# FIXME: Support distutils?
from setuptools import setup, find_packages
+
+# setuptools expects to be invoked from within the directory of setup.py, but it
+# is nice to allow:
+# python path/to/setup.py install
+# to work (for scripts, etc.)
+os.chdir(os.path.dirname(os.path.abspath(__file__)))
+
setup(
name = "lit",
version = lit.__version__,
diff --git a/utils/lit/tests/Inputs/discovery/lit.cfg b/utils/lit/tests/Inputs/discovery/lit.cfg
index 4049ab16f9cce..c48ca0bc03651 100644
--- a/utils/lit/tests/Inputs/discovery/lit.cfg
+++ b/utils/lit/tests/Inputs/discovery/lit.cfg
@@ -1,3 +1,4 @@
+import lit.formats
config.name = 'top-level-suite'
config.suffixes = ['.txt']
config.test_format = lit.formats.ShTest()
@@ -8,3 +9,6 @@ config.test_format = lit.formats.ShTest()
#
#config.test_source_root = None
#config.test_exec_root = None
+
+# Check that arbitrary config values are copied (tested by subdir/lit.local.cfg).
+config.an_extra_variable = False
diff --git a/utils/lit/tests/Inputs/discovery/subdir/lit.local.cfg b/utils/lit/tests/Inputs/discovery/subdir/lit.local.cfg
index 5ae6b3cd017d9..631cb602b0d9b 100644
--- a/utils/lit/tests/Inputs/discovery/subdir/lit.local.cfg
+++ b/utils/lit/tests/Inputs/discovery/subdir/lit.local.cfg
@@ -1 +1,4 @@
config.suffixes = ['.py']
+
+# Check that the arbitrary config values in our parent was inherited.
+assert hasattr(config, 'an_extra_variable')
diff --git a/utils/lit/tests/Inputs/discovery/subsuite/lit.cfg b/utils/lit/tests/Inputs/discovery/subsuite/lit.cfg
index 0c2979d74adcf..b49329abfde61 100644
--- a/utils/lit/tests/Inputs/discovery/subsuite/lit.cfg
+++ b/utils/lit/tests/Inputs/discovery/subsuite/lit.cfg
@@ -1,3 +1,4 @@
+import lit.formats
config.name = 'sub-suite'
config.suffixes = ['.txt']
config.test_format = lit.formats.ShTest()
diff --git a/utils/lit/tests/Inputs/exec-discovery-in-tree/lit.cfg b/utils/lit/tests/Inputs/exec-discovery-in-tree/lit.cfg
index 342b2fdd3c896..ae25b4f4acb42 100644
--- a/utils/lit/tests/Inputs/exec-discovery-in-tree/lit.cfg
+++ b/utils/lit/tests/Inputs/exec-discovery-in-tree/lit.cfg
@@ -1,6 +1,8 @@
+import lit.formats
+
# Verify that the site configuration was loaded.
if config.test_source_root is None or config.test_exec_root is None:
- lit.fatal("No site specific configuration")
+ lit_config.fatal("No site specific configuration")
config.name = 'exec-discovery-in-tree-suite'
config.suffixes = ['.txt']
diff --git a/utils/lit/tests/Inputs/exec-discovery-in-tree/obj/lit.site.cfg b/utils/lit/tests/Inputs/exec-discovery-in-tree/obj/lit.site.cfg
index de9a3d0c6dfd6..4061c894072ff 100644
--- a/utils/lit/tests/Inputs/exec-discovery-in-tree/obj/lit.site.cfg
+++ b/utils/lit/tests/Inputs/exec-discovery-in-tree/obj/lit.site.cfg
@@ -1,4 +1,4 @@
import os
config.test_exec_root = os.path.dirname(__file__)
config.test_source_root = os.path.dirname(config.test_exec_root)
-lit.load_config(config, os.path.join(config.test_source_root, "lit.cfg")) \ No newline at end of file
+lit_config.load_config(config, os.path.join(config.test_source_root, "lit.cfg")) \ No newline at end of file
diff --git a/utils/lit/tests/Inputs/exec-discovery/lit.site.cfg b/utils/lit/tests/Inputs/exec-discovery/lit.site.cfg
index 796569a301b88..ac273c797c5f3 100644
--- a/utils/lit/tests/Inputs/exec-discovery/lit.site.cfg
+++ b/utils/lit/tests/Inputs/exec-discovery/lit.site.cfg
@@ -2,4 +2,4 @@
import os
config.test_exec_root = os.path.dirname(__file__)
config.test_source_root = os.path.join(os.path.dirname(config.test_exec_root), "discovery")
-lit.load_config(config, os.path.join(config.test_source_root, "lit.cfg"))
+lit_config.load_config(config, os.path.join(config.test_source_root, "lit.cfg"))
diff --git a/utils/lit/tests/Inputs/googletest-format/DummySubDir/OneTest b/utils/lit/tests/Inputs/googletest-format/DummySubDir/OneTest
new file mode 100755
index 0000000000000..9dff137f4dec6
--- /dev/null
+++ b/utils/lit/tests/Inputs/googletest-format/DummySubDir/OneTest
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+
+import sys
+
+if len(sys.argv) != 2:
+ raise ValueError("unexpected number of args")
+
+if sys.argv[1] == "--gtest_list_tests":
+ print("""\
+FirstTest.
+ subTestA
+ subTestB
+ParameterizedTest/0.
+ subTest
+ParameterizedTest/1.
+ subTest""")
+ sys.exit(0)
+elif not sys.argv[1].startswith("--gtest_filter="):
+ raise ValueError("unexpected argument: %r" % (sys.argv[1]))
+
+test_name = sys.argv[1].split('=',1)[1]
+if test_name == 'FirstTest.subTestA':
+ print('I am subTest A, I PASS')
+ sys.exit(0)
+elif test_name == 'FirstTest.subTestB':
+ print('I am subTest B, I FAIL')
+ print('And I have two lines of output')
+ sys.exit(1)
+elif test_name in ('ParameterizedTest/0.subTest',
+ 'ParameterizedTest/1.subTest'):
+ print('I am a parameterized test, I also PASS')
+ sys.exit(0)
+else:
+ raise SystemExit("error: invalid test name: %r" % (test_name,))
diff --git a/utils/lit/tests/Inputs/googletest-format/lit.cfg b/utils/lit/tests/Inputs/googletest-format/lit.cfg
new file mode 100644
index 0000000000000..f2f6cda8db6c0
--- /dev/null
+++ b/utils/lit/tests/Inputs/googletest-format/lit.cfg
@@ -0,0 +1,3 @@
+import lit.formats
+config.name = 'googletest-format'
+config.test_format = lit.formats.GoogleTest('DummySubDir', 'Test')
diff --git a/utils/lit/tests/Inputs/progress-bar/lit.cfg b/utils/lit/tests/Inputs/progress-bar/lit.cfg
index 4878b65609681..7f31129ad114b 100644
--- a/utils/lit/tests/Inputs/progress-bar/lit.cfg
+++ b/utils/lit/tests/Inputs/progress-bar/lit.cfg
@@ -1,3 +1,4 @@
+import lit.formats
config.name = 'shtest-shell'
config.suffixes = ['.txt']
config.test_format = lit.formats.ShTest()
diff --git a/utils/lit/tests/Inputs/shtest-format/argv0.txt b/utils/lit/tests/Inputs/shtest-format/argv0.txt
new file mode 100644
index 0000000000000..2ff289014bc09
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-format/argv0.txt
@@ -0,0 +1,6 @@
+# Check that we route argv[0] as it was written, instead of the resolved
+# path. This is important for some tools, in particular '[' which at least on OS
+# X only recognizes that it is in '['-mode when its argv[0] is exactly
+# '['. Otherwise it will refuse to accept the trailing closing bracket.
+#
+# RUN: [ "A" = "A" ]
diff --git a/utils/lit/tests/Inputs/shtest-format/external_shell/fail.txt b/utils/lit/tests/Inputs/shtest-format/external_shell/fail.txt
index 1e74be5dbd4bc..069e37619e79f 100644
--- a/utils/lit/tests/Inputs/shtest-format/external_shell/fail.txt
+++ b/utils/lit/tests/Inputs/shtest-format/external_shell/fail.txt
@@ -1,3 +1,5 @@
# Run a command that fails with error on stdout.
#
+# RUN: echo "line 1: failed test output on stdout"
+# RUN: echo "line 2: failed test output on stdout"
# RUN: cat "does-not-exist"
diff --git a/utils/lit/tests/Inputs/shtest-format/external_shell/fail_with_bad_encoding.txt b/utils/lit/tests/Inputs/shtest-format/external_shell/fail_with_bad_encoding.txt
new file mode 100644
index 0000000000000..f6157e66c97c7
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-format/external_shell/fail_with_bad_encoding.txt
@@ -0,0 +1,5 @@
+# Run a command that fails with error on stdout.
+#
+# RUN: %S/write-bad-encoding.sh
+# RUN: false
+
diff --git a/utils/lit/tests/Inputs/shtest-format/external_shell/lit.local.cfg b/utils/lit/tests/Inputs/shtest-format/external_shell/lit.local.cfg
index d14d1479772da..5e87c72991983 100644
--- a/utils/lit/tests/Inputs/shtest-format/external_shell/lit.local.cfg
+++ b/utils/lit/tests/Inputs/shtest-format/external_shell/lit.local.cfg
@@ -1 +1,2 @@
+import lit.formats
config.test_format = lit.formats.ShTest(execute_external=True)
diff --git a/utils/lit/tests/Inputs/shtest-format/external_shell/write-bad-encoding.sh b/utils/lit/tests/Inputs/shtest-format/external_shell/write-bad-encoding.sh
new file mode 100755
index 0000000000000..6b622cb232e29
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-format/external_shell/write-bad-encoding.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+echo "a line with bad encoding: Â."
diff --git a/utils/lit/tests/Inputs/shtest-format/fail.txt b/utils/lit/tests/Inputs/shtest-format/fail.txt
index 49932c3006e15..8c305eb416be0 100644
--- a/utils/lit/tests/Inputs/shtest-format/fail.txt
+++ b/utils/lit/tests/Inputs/shtest-format/fail.txt
@@ -1 +1,2 @@
+# RUN: printf "line 1: failed test output on stdout\nline 2: failed test output on stdout"
# RUN: false
diff --git a/utils/lit/tests/Inputs/shtest-format/lit.cfg b/utils/lit/tests/Inputs/shtest-format/lit.cfg
index 78dd1bfb2e3a2..9b47985a3d868 100644
--- a/utils/lit/tests/Inputs/shtest-format/lit.cfg
+++ b/utils/lit/tests/Inputs/shtest-format/lit.cfg
@@ -1,3 +1,4 @@
+import lit.formats
config.name = 'shtest-format'
config.suffixes = ['.txt']
config.test_format = lit.formats.ShTest()
diff --git a/utils/lit/tests/Inputs/shtest-shell/lit.cfg b/utils/lit/tests/Inputs/shtest-shell/lit.cfg
index 4878b65609681..7f31129ad114b 100644
--- a/utils/lit/tests/Inputs/shtest-shell/lit.cfg
+++ b/utils/lit/tests/Inputs/shtest-shell/lit.cfg
@@ -1,3 +1,4 @@
+import lit.formats
config.name = 'shtest-shell'
config.suffixes = ['.txt']
config.test_format = lit.formats.ShTest()
diff --git a/utils/lit/tests/Inputs/test-data/lit.cfg b/utils/lit/tests/Inputs/test-data/lit.cfg
new file mode 100644
index 0000000000000..f5aba7b217748
--- /dev/null
+++ b/utils/lit/tests/Inputs/test-data/lit.cfg
@@ -0,0 +1,44 @@
+import os
+try:
+ import ConfigParser
+except ImportError:
+ import configparser as ConfigParser
+
+import lit.formats
+import lit.Test
+
+class DummyFormat(lit.formats.FileBasedTest):
+ def execute(self, test, lit_config):
+ # In this dummy format, expect that each test file is actually just a
+ # .ini format dump of the results to report.
+
+ source_path = test.getSourcePath()
+
+ cfg = ConfigParser.ConfigParser()
+ cfg.read(source_path)
+
+ # Create the basic test result.
+ result_code = cfg.get('global', 'result_code')
+ result_output = cfg.get('global', 'result_output')
+ result = lit.Test.Result(getattr(lit.Test, result_code),
+ result_output)
+
+ # Load additional metrics.
+ for key,value_str in cfg.items('results'):
+ value = eval(value_str)
+ if isinstance(value, int):
+ metric = lit.Test.IntMetricValue(value)
+ elif isinstance(value, float):
+ metric = lit.Test.RealMetricValue(value)
+ else:
+ raise RuntimeError("unsupported result type")
+ result.addMetric(key, metric)
+
+ return result
+
+config.name = 'test-data'
+config.suffixes = ['.ini']
+config.test_format = DummyFormat()
+config.test_source_root = None
+config.test_exec_root = None
+config.target_triple = None
diff --git a/utils/lit/tests/Inputs/test-data/metrics.ini b/utils/lit/tests/Inputs/test-data/metrics.ini
new file mode 100644
index 0000000000000..01b09c5c77529
--- /dev/null
+++ b/utils/lit/tests/Inputs/test-data/metrics.ini
@@ -0,0 +1,7 @@
+[global]
+result_code = PASS
+result_output = Test passed.
+
+[results]
+value0 = 1
+value1 = 2.3456 \ No newline at end of file
diff --git a/utils/lit/tests/Inputs/unittest-adaptor/lit.cfg b/utils/lit/tests/Inputs/unittest-adaptor/lit.cfg
index 52de70966242e..9e08a8629a435 100644
--- a/utils/lit/tests/Inputs/unittest-adaptor/lit.cfg
+++ b/utils/lit/tests/Inputs/unittest-adaptor/lit.cfg
@@ -1,3 +1,4 @@
+import lit.formats
config.name = 'unittest-adaptor'
config.suffixes = ['.txt']
config.test_format = lit.formats.ShTest()
diff --git a/utils/lit/tests/discovery.py b/utils/lit/tests/discovery.py
index 56d9dd07e841d..28010894cda62 100644
--- a/utils/lit/tests/discovery.py
+++ b/utils/lit/tests/discovery.py
@@ -1,7 +1,8 @@
# Check the basic discovery process, including a sub-suite.
#
# RUN: %{lit} %{inputs}/discovery \
-# RUN: -j 1 --debug --no-execute --show-suites -v > %t.out 2> %t.err
+# RUN: -j 1 --debug --show-tests --show-suites \
+# RUN: -v > %t.out 2> %t.err
# RUN: FileCheck --check-prefix=CHECK-BASIC-OUT < %t.out %s
# RUN: FileCheck --check-prefix=CHECK-BASIC-ERR < %t.err %s
#
@@ -17,12 +18,12 @@
# CHECK-BASIC-OUT: Source Root: {{.*/discovery$}}
# CHECK-BASIC-OUT: Exec Root : {{.*/discovery$}}
#
-# CHECK-BASIC-OUT: -- Testing: 5 tests, 1 threads --
-# CHECK-BASIC-OUT: PASS: sub-suite :: test-one
-# CHECK-BASIC-OUT: PASS: sub-suite :: test-two
-# CHECK-BASIC-OUT: PASS: top-level-suite :: subdir/test-three
-# CHECK-BASIC-OUT: PASS: top-level-suite :: test-one
-# CHECK-BASIC-OUT: PASS: top-level-suite :: test-two
+# CHECK-BASIC-OUT: -- Available Tests --
+# CHECK-BASIC-OUT: sub-suite :: test-one
+# CHECK-BASIC-OUT: sub-suite :: test-two
+# CHECK-BASIC-OUT: top-level-suite :: subdir/test-three
+# CHECK-BASIC-OUT: top-level-suite :: test-one
+# CHECK-BASIC-OUT: top-level-suite :: test-two
# Check discovery when exact test names are given.
@@ -30,18 +31,19 @@
# RUN: %{lit} \
# RUN: %{inputs}/discovery/subdir/test-three.py \
# RUN: %{inputs}/discovery/subsuite/test-one.txt \
-# RUN: -j 1 --no-execute --show-suites -v > %t.out
+# RUN: -j 1 --show-tests --show-suites -v > %t.out
# RUN: FileCheck --check-prefix=CHECK-EXACT-TEST < %t.out %s
#
-# CHECK-EXACT-TEST: -- Testing: 2 tests, 1 threads --
-# CHECK-EXACT-TEST: PASS: sub-suite :: test-one
-# CHECK-EXACT-TEST: PASS: top-level-suite :: subdir/test-three
+# CHECK-EXACT-TEST: -- Available Tests --
+# CHECK-EXACT-TEST: sub-suite :: test-one
+# CHECK-EXACT-TEST: top-level-suite :: subdir/test-three
# Check discovery when using an exec path.
#
# RUN: %{lit} %{inputs}/exec-discovery \
-# RUN: -j 1 --debug --no-execute --show-suites -v > %t.out 2> %t.err
+# RUN: -j 1 --debug --show-tests --show-suites \
+# RUN: -v > %t.out 2> %t.err
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-OUT < %t.out %s
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-ERR < %t.err %s
#
@@ -60,13 +62,12 @@
# CHECK-ASEXEC-OUT: Source Root: {{.*/discovery$}}
# CHECK-ASEXEC-OUT: Exec Root : {{.*/exec-discovery$}}
#
-# CHECK-ASEXEC-OUT: -- Testing: 5 tests, 1 threads --
-# CHECK-ASEXEC-OUT: PASS: sub-suite :: test-one
-# CHECK-ASEXEC-OUT: PASS: sub-suite :: test-two
-# CHECK-ASEXEC-OUT: PASS: top-level-suite :: subdir/test-three
-# CHECK-ASEXEC-OUT: PASS: top-level-suite :: test-one
-# CHECK-ASEXEC-OUT: PASS: top-level-suite :: test-two
-
+# CHECK-ASEXEC-OUT: -- Available Tests --
+# CHECK-ASEXEC-OUT: sub-suite :: test-one
+# CHECK-ASEXEC-OUT: sub-suite :: test-two
+# CHECK-ASEXEC-OUT: top-level-suite :: subdir/test-three
+# CHECK-ASEXEC-OUT: top-level-suite :: test-one
+# CHECK-ASEXEC-OUT: top-level-suite :: test-two
# Check discovery when exact test names are given.
#
@@ -74,11 +75,11 @@
#
# RUN: %{lit} \
# RUN: %{inputs}/exec-discovery/subdir/test-three.py \
-# RUN: -j 1 --no-execute --show-suites -v > %t.out
+# RUN: -j 1 --show-tests --show-suites -v > %t.out
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-EXACT-TEST < %t.out %s
#
-# CHECK-ASEXEC-EXACT-TEST: -- Testing: 1 tests, 1 threads --
-# CHECK-ASEXEC-EXACT-TEST: PASS: top-level-suite :: subdir/test-three
+# CHECK-ASEXEC-EXACT-TEST: -- Available Tests --
+# CHECK-ASEXEC-EXACT-TEST: top-level-suite :: subdir/test-three
# Check that we don't recurse infinitely when loading an site specific test
@@ -86,11 +87,11 @@
#
# RUN: %{lit} \
# RUN: %{inputs}/exec-discovery-in-tree/obj/ \
-# RUN: -j 1 --no-execute --show-suites -v > %t.out
+# RUN: -j 1 --show-tests --show-suites -v > %t.out
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-INTREE < %t.out %s
#
# CHECK-ASEXEC-INTREE: exec-discovery-in-tree-suite - 1 tests
# CHECK-ASEXEC-INTREE-NEXT: Source Root: {{.*/exec-discovery-in-tree$}}
# CHECK-ASEXEC-INTREE-NEXT: Exec Root : {{.*/exec-discovery-in-tree/obj$}}
-# CHECK-ASEXEC-INTREE-NEXT: -- Testing: 1 tests, 1 threads --
-# CHECK-ASEXEC-INTREE-NEXT: PASS: exec-discovery-in-tree-suite :: test-one
+# CHECK-ASEXEC-INTREE-NEXT: -- Available Tests --
+# CHECK-ASEXEC-INTREE-NEXT: exec-discovery-in-tree-suite :: test-one
diff --git a/utils/lit/tests/googletest-format.py b/utils/lit/tests/googletest-format.py
new file mode 100644
index 0000000000000..a62fd1b3ccaf9
--- /dev/null
+++ b/utils/lit/tests/googletest-format.py
@@ -0,0 +1,20 @@
+# Check the various features of the GoogleTest format.
+#
+# RUN: not %{lit} -j 1 -v %{inputs}/googletest-format > %t.out
+# RUN: FileCheck < %t.out %s
+#
+# END.
+
+# CHECK: -- Testing:
+# CHECK: PASS: googletest-format :: DummySubDir/OneTest/FirstTest.subTestA
+# CHECK: FAIL: googletest-format :: DummySubDir/OneTest/FirstTest.subTestB
+# CHECK-NEXT: *** TEST 'googletest-format :: DummySubDir/OneTest/FirstTest.subTestB' FAILED ***
+# CHECK-NEXT: I am subTest B, I FAIL
+# CHECK-NEXT: And I have two lines of output
+# CHECK: ***
+# CHECK: PASS: googletest-format :: DummySubDir/OneTest/ParameterizedTest/0.subTest
+# CHECK: PASS: googletest-format :: DummySubDir/OneTest/ParameterizedTest/1.subTest
+# CHECK: Failing Tests (1)
+# CHECK: Expected Passes : 3
+# CHECK: Unexpected Failures: 1
+
diff --git a/utils/lit/tests/lit.cfg b/utils/lit/tests/lit.cfg
index 32760ceb27353..2111b72748b59 100644
--- a/utils/lit/tests/lit.cfg
+++ b/utils/lit/tests/lit.cfg
@@ -1,6 +1,9 @@
# -*- Python -*-
import os
+import sys
+
+import lit.formats
# Configuration file for the 'lit' test runner.
@@ -20,17 +23,23 @@ config.excludes = ['Inputs']
config.test_source_root = os.path.dirname(__file__)
config.test_exec_root = config.test_source_root
-config.target_triple = None
+config.target_triple = '(unused)'
src_root = os.path.join(config.test_source_root, '..')
config.environment['PYTHONPATH'] = src_root
config.substitutions.append(('%{src_root}', src_root))
config.substitutions.append(('%{inputs}', os.path.join(
src_root, 'tests', 'Inputs')))
-config.substitutions.append(('%{lit}', os.path.join(src_root, 'lit.py')))
+config.substitutions.append(('%{lit}', "%%{python} %s" % (
+ os.path.join(src_root, 'lit.py'),)))
+config.substitutions.append(('%{python}', sys.executable))
# Enable coverage.py reporting, assuming the coverage module has been installed
# and sitecustomize.py in the virtualenv has been modified appropriately.
-if lit.params.get('check-coverage', None):
+if lit_config.params.get('check-coverage', None):
config.environment['COVERAGE_PROCESS_START'] = os.path.join(
os.path.dirname(__file__), ".coveragerc")
+
+# Add a feature to detect the Python version.
+config.available_features.add("python%d.%d" % (sys.version_info[0],
+ sys.version_info[1]))
diff --git a/utils/lit/tests/shell-parsing.py b/utils/lit/tests/shell-parsing.py
index f644132f29d3c..a07e988861fad 100644
--- a/utils/lit/tests/shell-parsing.py
+++ b/utils/lit/tests/shell-parsing.py
@@ -1,3 +1,3 @@
# Just run the ShUtil unit tests.
#
-# RUN: python -m lit.ShUtil
+# RUN: %{python} -m lit.ShUtil
diff --git a/utils/lit/tests/shtest-encoding.py b/utils/lit/tests/shtest-encoding.py
new file mode 100644
index 0000000000000..dfc987f6df7ee
--- /dev/null
+++ b/utils/lit/tests/shtest-encoding.py
@@ -0,0 +1,3 @@
+# RUN: true
+
+# Here is a string that cannot be decoded in line mode: Â.
diff --git a/utils/lit/tests/shtest-format.py b/utils/lit/tests/shtest-format.py
index 4b36873a3d7fd..751f0d7080306 100644
--- a/utils/lit/tests/shtest-format.py
+++ b/utils/lit/tests/shtest-format.py
@@ -7,15 +7,43 @@
# CHECK: -- Testing:
+# CHECK: PASS: shtest-format :: argv0.txt
# CHECK: FAIL: shtest-format :: external_shell/fail.txt
-# CHECK: *** TEST 'shtest-format :: external_shell/fail.txt' FAILED ***
+# CHECK-NEXT: *** TEST 'shtest-format :: external_shell/fail.txt' FAILED ***
+# CHECK: Command Output (stdout):
+# CHECK-NEXT: --
+# CHECK-NEXT: line 1: failed test output on stdout
+# CHECK-NEXT: line 2: failed test output on stdout
# CHECK: Command Output (stderr):
-# CHECK: cat: does-not-exist: No such file or directory
+# CHECK-NEXT: --
+# CHECK-NEXT: cat: does-not-exist: No such file or directory
+# CHECK: --
+
+# CHECK: FAIL: shtest-format :: external_shell/fail_with_bad_encoding.txt
+# CHECK-NEXT: *** TEST 'shtest-format :: external_shell/fail_with_bad_encoding.txt' FAILED ***
+# CHECK: Command Output (stdout):
+# CHECK-NEXT: --
+# CHECK-NEXT: a line with bad encoding:
# CHECK: --
# CHECK: PASS: shtest-format :: external_shell/pass.txt
# CHECK: FAIL: shtest-format :: fail.txt
+# CHECK-NEXT: *** TEST 'shtest-format :: fail.txt' FAILED ***
+# CHECK-NEXT: Script:
+# CHECK-NEXT: --
+# CHECK-NEXT: printf "line 1
+# CHECK-NEXT: false
+# CHECK-NEXT: --
+# CHECK-NEXT: Exit Code: 1
+#
+# CHECK: Command Output (stdout):
+# CHECK-NEXT: --
+# CHECK-NEXT: Command 0: "printf"
+# CHECK-NEXT: Command 0 Result: 0
+# CHECK-NEXT: Command 0 Output:
+# CHECK-NEXT: line 1: failed test output on stdout
+# CHECK-NEXT: line 2: failed test output on stdout
# CHECK: UNRESOLVED: shtest-format :: no-test-line.txt
# CHECK: PASS: shtest-format :: pass.txt
@@ -26,18 +54,24 @@
# CHECK: XFAIL: shtest-format :: xfail-target.txt
# CHECK: XFAIL: shtest-format :: xfail.txt
# CHECK: XPASS: shtest-format :: xpass.txt
+# CHECK-NEXT: *** TEST 'shtest-format :: xpass.txt' FAILED ***
+# CHECK-NEXT: Script
+# CHECK-NEXT: --
+# CHECK-NEXT: true
+# CHECK-NEXT: --
# CHECK: Testing Time
# CHECK: Unexpected Passing Tests (1)
# CHECK: shtest-format :: xpass.txt
-# CHECK: Failing Tests (2)
+# CHECK: Failing Tests (3)
# CHECK: shtest-format :: external_shell/fail.txt
+# CHECK: shtest-format :: external_shell/fail_with_bad_encoding.txt
# CHECK: shtest-format :: fail.txt
-# CHECK: Expected Passes : 3
+# CHECK: Expected Passes : 4
# CHECK: Expected Failures : 3
# CHECK: Unsupported Tests : 2
# CHECK: Unresolved Tests : 1
# CHECK: Unexpected Passes : 1
-# CHECK: Unexpected Failures: 2
+# CHECK: Unexpected Failures: 3
diff --git a/utils/lit/tests/test-data.py b/utils/lit/tests/test-data.py
new file mode 100644
index 0000000000000..54909d7338e96
--- /dev/null
+++ b/utils/lit/tests/test-data.py
@@ -0,0 +1,12 @@
+# Test features related to formats which support reporting additional test data.
+
+# RUN: %{lit} -j 1 -v %{inputs}/test-data > %t.out
+# RUN: FileCheck < %t.out %s
+
+# CHECK: -- Testing:
+
+# CHECK: PASS: test-data :: metrics.ini
+# CHECK-NEXT: *** TEST 'test-data :: metrics.ini' RESULTS ***
+# CHECK-NEXT: value0: 1
+# CHECK-NEXT: value1: 2.3456
+# CHECK-NEXT: ***
diff --git a/utils/lit/tests/test-output.py b/utils/lit/tests/test-output.py
new file mode 100644
index 0000000000000..adfbcd88f22af
--- /dev/null
+++ b/utils/lit/tests/test-output.py
@@ -0,0 +1,21 @@
+# XFAIL: python2.5
+
+# RUN: %{lit} -j 1 -v %{inputs}/test-data --output %t.results.out > %t.out
+# RUN: FileCheck < %t.results.out %s
+
+# CHECK: {
+# CHECK: "__version__"
+# CHECK: "elapsed"
+# CHECK-NEXT: "tests": [
+# CHECK-NEXT: {
+# CHECK-NEXT: "code": "PASS",
+# CHECK-NEXT: "elapsed": {{[0-9.]+}},
+# CHECK-NEXT: "metrics": {
+# CHECK-NEXT: "value0": 1,
+# CHECK-NEXT: "value1": 2.3456
+# CHECK-NEXT: }
+# CHECK-NEXT: "name": "test-data :: metrics.ini",
+# CHECK-NEXT: "output": "Test passed."
+# CHECK-NEXT: }
+# CHECK-NEXT: ]
+# CHECK-NEXT: }
diff --git a/utils/lit/tests/unittest-adaptor.py b/utils/lit/tests/unittest-adaptor.py
index 243dd4191d0df..7435dda41968b 100644
--- a/utils/lit/tests/unittest-adaptor.py
+++ b/utils/lit/tests/unittest-adaptor.py
@@ -1,6 +1,6 @@
# Check the lit adaption to run under unittest.
#
-# RUN: python %s %{inputs}/unittest-adaptor 2> %t.err
+# RUN: %{python} %s %{inputs}/unittest-adaptor 2> %t.err
# RUN: FileCheck < %t.err %s
#
# CHECK: unittest-adaptor :: test-one.txt ... ok