summaryrefslogtreecommitdiff
path: root/utils/lit
diff options
context:
space:
mode:
Diffstat (limited to 'utils/lit')
-rw-r--r--utils/lit/lit/BooleanExpression.py251
-rw-r--r--utils/lit/lit/LitConfig.py4
-rw-r--r--utils/lit/lit/ShCommands.py23
-rw-r--r--utils/lit/lit/ShUtil.py36
-rw-r--r--utils/lit/lit/Test.py118
-rw-r--r--utils/lit/lit/TestRunner.py227
-rw-r--r--utils/lit/lit/TestingConfig.py3
-rwxr-xr-xutils/lit/lit/main.py50
-rw-r--r--utils/lit/lit/run.py224
-rw-r--r--utils/lit/lit/util.py4
-rw-r--r--utils/lit/tests/Inputs/shtest-format/requires-missing.txt7
-rw-r--r--utils/lit/tests/Inputs/shtest-format/requires-present.txt4
-rw-r--r--utils/lit/tests/Inputs/shtest-format/requires-star.txt3
-rw-r--r--utils/lit/tests/Inputs/shtest-format/requires-triple.txt3
-rw-r--r--utils/lit/tests/Inputs/shtest-format/unsupported-expr-false.txt9
-rw-r--r--utils/lit/tests/Inputs/shtest-format/unsupported-expr-true.txt4
-rw-r--r--utils/lit/tests/Inputs/shtest-format/unsupported-star.txt3
-rw-r--r--utils/lit/tests/Inputs/shtest-format/xfail-expr-false.txt3
-rw-r--r--utils/lit/tests/Inputs/shtest-format/xfail-expr-true.txt4
-rw-r--r--utils/lit/tests/Inputs/test-data/dummy_format.py38
-rw-r--r--utils/lit/tests/Inputs/test-data/lit.cfg42
-rw-r--r--utils/lit/tests/boolean-parsing.py4
-rw-r--r--utils/lit/tests/selecting.py90
-rw-r--r--utils/lit/tests/shtest-format.py15
-rw-r--r--utils/lit/tests/unit/TestRunner.py61
25 files changed, 1047 insertions, 183 deletions
diff --git a/utils/lit/lit/BooleanExpression.py b/utils/lit/lit/BooleanExpression.py
new file mode 100644
index 0000000000000..3eb5060de3e30
--- /dev/null
+++ b/utils/lit/lit/BooleanExpression.py
@@ -0,0 +1,251 @@
+import re
+
+class BooleanExpression:
+ # A simple evaluator of boolean expressions.
+ #
+ # Grammar:
+ # expr :: or_expr
+ # or_expr :: and_expr ('||' and_expr)*
+ # and_expr :: not_expr ('&&' not_expr)*
+ # not_expr :: '!' not_expr
+ # '(' or_expr ')'
+ # identifier
+ # identifier :: [-+=._a-zA-Z0-9]+
+
+ # Evaluates `string` as a boolean expression.
+ # Returns True or False. Throws a ValueError on syntax error.
+ #
+ # Variables in `variables` are true.
+ # Substrings of `triple` are true.
+ # 'true' is true.
+ # All other identifiers are false.
+ @staticmethod
+ def evaluate(string, variables, triple=""):
+ try:
+ parser = BooleanExpression(string, set(variables), triple)
+ return parser.parseAll()
+ except ValueError as e:
+ raise ValueError(str(e) + ('\nin expression: %r' % string))
+
+ #####
+
+ def __init__(self, string, variables, triple=""):
+ self.tokens = BooleanExpression.tokenize(string)
+ self.variables = variables
+ self.variables.add('true')
+ self.triple = triple
+ self.value = None
+ self.token = None
+
+ # Singleton end-of-expression marker.
+ END = object()
+
+ # Tokenization pattern.
+ Pattern = re.compile(r'\A\s*([()]|[-+=._a-zA-Z0-9]+|&&|\|\||!)\s*(.*)\Z')
+
+ @staticmethod
+ def tokenize(string):
+ while True:
+ m = re.match(BooleanExpression.Pattern, string)
+ if m is None:
+ if string == "":
+ yield BooleanExpression.END;
+ return
+ else:
+ raise ValueError("couldn't parse text: %r" % string)
+
+ token = m.group(1)
+ string = m.group(2)
+ yield token
+
+ def quote(self, token):
+ if token is BooleanExpression.END:
+ return '<end of expression>'
+ else:
+ return repr(token)
+
+ def accept(self, t):
+ if self.token == t:
+ self.token = next(self.tokens)
+ return True
+ else:
+ return False
+
+ def expect(self, t):
+ if self.token == t:
+ if self.token != BooleanExpression.END:
+ self.token = next(self.tokens)
+ else:
+ raise ValueError("expected: %s\nhave: %s" %
+ (self.quote(t), self.quote(self.token)))
+
+ def isIdentifier(self, t):
+ if (t is BooleanExpression.END or t == '&&' or t == '||' or
+ t == '!' or t == '(' or t == ')'):
+ return False
+ return True
+
+ def parseNOT(self):
+ if self.accept('!'):
+ self.parseNOT()
+ self.value = not self.value
+ elif self.accept('('):
+ self.parseOR()
+ self.expect(')')
+ elif not self.isIdentifier(self.token):
+ raise ValueError("expected: '!' or '(' or identifier\nhave: %s" %
+ self.quote(self.token))
+ else:
+ self.value = (self.token in self.variables or
+ self.token in self.triple)
+ self.token = next(self.tokens)
+
+ def parseAND(self):
+ self.parseNOT()
+ while self.accept('&&'):
+ left = self.value
+ self.parseNOT()
+ right = self.value
+ # this is technically the wrong associativity, but it
+ # doesn't matter for this limited expression grammar
+ self.value = left and right
+
+ def parseOR(self):
+ self.parseAND()
+ while self.accept('||'):
+ left = self.value
+ self.parseAND()
+ right = self.value
+ # this is technically the wrong associativity, but it
+ # doesn't matter for this limited expression grammar
+ self.value = left or right
+
+ def parseAll(self):
+ self.token = next(self.tokens)
+ self.parseOR()
+ self.expect(BooleanExpression.END)
+ return self.value
+
+
+#######
+# Tests
+
+import unittest
+
+class TestBooleanExpression(unittest.TestCase):
+ def test_variables(self):
+ variables = {'its-true', 'false-lol-true', 'under_score',
+ 'e=quals', 'd1g1ts'}
+ self.assertTrue(BooleanExpression.evaluate('true', variables))
+ self.assertTrue(BooleanExpression.evaluate('its-true', variables))
+ self.assertTrue(BooleanExpression.evaluate('false-lol-true', variables))
+ self.assertTrue(BooleanExpression.evaluate('under_score', variables))
+ self.assertTrue(BooleanExpression.evaluate('e=quals', variables))
+ self.assertTrue(BooleanExpression.evaluate('d1g1ts', variables))
+
+ self.assertFalse(BooleanExpression.evaluate('false', variables))
+ self.assertFalse(BooleanExpression.evaluate('True', variables))
+ self.assertFalse(BooleanExpression.evaluate('true-ish', variables))
+ self.assertFalse(BooleanExpression.evaluate('not_true', variables))
+ self.assertFalse(BooleanExpression.evaluate('tru', variables))
+
+ def test_triple(self):
+ triple = 'arch-vendor-os'
+ self.assertTrue(BooleanExpression.evaluate('arch-', {}, triple))
+ self.assertTrue(BooleanExpression.evaluate('ar', {}, triple))
+ self.assertTrue(BooleanExpression.evaluate('ch-vend', {}, triple))
+ self.assertTrue(BooleanExpression.evaluate('-vendor-', {}, triple))
+ self.assertTrue(BooleanExpression.evaluate('-os', {}, triple))
+ self.assertFalse(BooleanExpression.evaluate('arch-os', {}, triple))
+
+ def test_operators(self):
+ self.assertTrue(BooleanExpression.evaluate('true || true', {}))
+ self.assertTrue(BooleanExpression.evaluate('true || false', {}))
+ self.assertTrue(BooleanExpression.evaluate('false || true', {}))
+ self.assertFalse(BooleanExpression.evaluate('false || false', {}))
+
+ self.assertTrue(BooleanExpression.evaluate('true && true', {}))
+ self.assertFalse(BooleanExpression.evaluate('true && false', {}))
+ self.assertFalse(BooleanExpression.evaluate('false && true', {}))
+ self.assertFalse(BooleanExpression.evaluate('false && false', {}))
+
+ self.assertFalse(BooleanExpression.evaluate('!true', {}))
+ self.assertTrue(BooleanExpression.evaluate('!false', {}))
+
+ self.assertTrue(BooleanExpression.evaluate(' ((!((false) )) ) ', {}))
+ self.assertTrue(BooleanExpression.evaluate('true && (true && (true))', {}))
+ self.assertTrue(BooleanExpression.evaluate('!false && !false && !! !false', {}))
+ self.assertTrue(BooleanExpression.evaluate('false && false || true', {}))
+ self.assertTrue(BooleanExpression.evaluate('(false && false) || true', {}))
+ self.assertFalse(BooleanExpression.evaluate('false && (false || true)', {}))
+
+ # Evaluate boolean expression `expr`.
+ # Fail if it does not throw a ValueError containing the text `error`.
+ def checkException(self, expr, error):
+ try:
+ BooleanExpression.evaluate(expr, {})
+ self.fail("expression %r didn't cause an exception" % expr)
+ except ValueError as e:
+ if -1 == str(e).find(error):
+ self.fail(("expression %r caused the wrong ValueError\n" +
+ "actual error was:\n%s\n" +
+ "expected error was:\n%s\n") % (expr, e, error))
+ except BaseException as e:
+ self.fail(("expression %r caused the wrong exception; actual " +
+ "exception was: \n%r") % (expr, e))
+
+ def test_errors(self):
+ self.checkException("ba#d",
+ "couldn't parse text: '#d'\n" +
+ "in expression: 'ba#d'")
+
+ self.checkException("true and true",
+ "expected: <end of expression>\n" +
+ "have: 'and'\n" +
+ "in expression: 'true and true'")
+
+ self.checkException("|| true",
+ "expected: '!' or '(' or identifier\n" +
+ "have: '||'\n" +
+ "in expression: '|| true'")
+
+ self.checkException("true &&",
+ "expected: '!' or '(' or identifier\n" +
+ "have: <end of expression>\n" +
+ "in expression: 'true &&'")
+
+ self.checkException("",
+ "expected: '!' or '(' or identifier\n" +
+ "have: <end of expression>\n" +
+ "in expression: ''")
+
+ self.checkException("*",
+ "couldn't parse text: '*'\n" +
+ "in expression: '*'")
+
+ self.checkException("no wait stop",
+ "expected: <end of expression>\n" +
+ "have: 'wait'\n" +
+ "in expression: 'no wait stop'")
+
+ self.checkException("no-$-please",
+ "couldn't parse text: '$-please'\n" +
+ "in expression: 'no-$-please'")
+
+ self.checkException("(((true && true) || true)",
+ "expected: ')'\n" +
+ "have: <end of expression>\n" +
+ "in expression: '(((true && true) || true)'")
+
+ self.checkException("true (true)",
+ "expected: <end of expression>\n" +
+ "have: '('\n" +
+ "in expression: 'true (true)'")
+
+ self.checkException("( )",
+ "expected: '!' or '(' or identifier\n" +
+ "have: ')'\n" +
+ "in expression: '( )'")
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/utils/lit/lit/LitConfig.py b/utils/lit/lit/LitConfig.py
index fc50ffc1039d2..2b680846e1767 100644
--- a/utils/lit/lit/LitConfig.py
+++ b/utils/lit/lit/LitConfig.py
@@ -24,7 +24,8 @@ class LitConfig(object):
noExecute, debug, isWindows,
params, config_prefix = None,
maxIndividualTestTime = 0,
- maxFailures = None):
+ maxFailures = None,
+ parallelism_groups = []):
# The name of the test runner.
self.progname = progname
# The items to add to the PATH environment variable.
@@ -62,6 +63,7 @@ class LitConfig(object):
self.maxIndividualTestTime = maxIndividualTestTime
self.maxFailures = maxFailures
+ self.parallelism_groups = parallelism_groups
@property
def maxIndividualTestTime(self):
diff --git a/utils/lit/lit/ShCommands.py b/utils/lit/lit/ShCommands.py
index 9ca9e8c91c0d4..01e91c55da989 100644
--- a/utils/lit/lit/ShCommands.py
+++ b/utils/lit/lit/ShCommands.py
@@ -35,6 +35,29 @@ class Command:
else:
file.write("%s%s '%s'" % (r[0][1], r[0][0], r[1]))
+class GlobItem:
+ def __init__(self, pattern):
+ self.pattern = pattern
+
+ def __repr__(self):
+ return self.pattern
+
+ def __eq__(self, other):
+ if not isinstance(other, Command):
+ return False
+
+ return (self.pattern == other.pattern)
+
+ def resolve(self, cwd):
+ import glob
+ import os
+ if os.path.isabs(self.pattern):
+ abspath = self.pattern
+ else:
+ abspath = os.path.join(cwd, self.pattern)
+ results = glob.glob(abspath)
+ return [self.pattern] if len(results) == 0 else results
+
class Pipeline:
def __init__(self, commands, negate=False, pipe_err=False):
self.commands = commands
diff --git a/utils/lit/lit/ShUtil.py b/utils/lit/lit/ShUtil.py
index 0b3e0f58c9773..00ec8ab004936 100644
--- a/utils/lit/lit/ShUtil.py
+++ b/utils/lit/lit/ShUtil.py
@@ -2,7 +2,7 @@ from __future__ import absolute_import
import itertools
import lit.util
-from lit.ShCommands import Command, Pipeline, Seq
+from lit.ShCommands import Command, GlobItem, Pipeline, Seq
class ShLexer:
def __init__(self, data, win32Escapes = False):
@@ -40,13 +40,15 @@ class ShLexer:
return None
self.pos = self.pos - 1 + len(chunk)
- return chunk
+ return GlobItem(chunk) if '*' in chunk or '?' in chunk else chunk
def lex_arg_slow(self, c):
if c in "'\"":
str = self.lex_arg_quoted(c)
else:
str = c
+ unquoted_glob_char = False
+ quoted_glob_char = False
while self.pos != self.end:
c = self.look()
if c.isspace() or c in "|&;":
@@ -65,12 +67,12 @@ class ShLexer:
tok = self.lex_one_token()
assert isinstance(tok, tuple) and len(tok) == 1
return (tok[0], num)
- elif c == '"':
+ elif c == '"' or c == "'":
self.eat()
- str += self.lex_arg_quoted('"')
- elif c == "'":
- self.eat()
- str += self.lex_arg_quoted("'")
+ quoted_arg = self.lex_arg_quoted(c)
+ if '*' in quoted_arg or '?' in quoted_arg:
+ quoted_glob_char = True
+ str += quoted_arg
elif not self.win32Escapes and c == '\\':
# Outside of a string, '\\' escapes everything.
self.eat()
@@ -79,9 +81,25 @@ class ShLexer:
"escape at end of quoted argument in: %r" % self.data)
return str
str += self.eat()
+ elif c in '*?':
+ unquoted_glob_char = True
+ str += self.eat()
else:
str += self.eat()
- return str
+ # If a quote character is present, lex_arg_quoted will remove the quotes
+ # and append the argument directly. This causes a problem when the
+ # quoted portion contains a glob character, as the character will no
+ # longer be treated literally. If glob characters occur *only* inside
+ # of quotes, then we can handle this by not globbing at all, and if
+ # glob characters occur *only* outside of quotes, we can still glob just
+ # fine. But if a glob character occurs both inside and outside of
+ # quotes this presents a problem. In practice this is such an obscure
+ # edge case that it doesn't seem worth the added complexity to support.
+ # By adding an assertion, it means some bot somewhere will catch this
+ # and flag the user of a non-portable test (which could almost certainly
+ # be re-written to work correctly without triggering this).
+ assert not (quoted_glob_char and unquoted_glob_char)
+ return GlobItem(str) if unquoted_glob_char else str
def lex_arg_quoted(self, delim):
str = ''
@@ -202,7 +220,7 @@ class ShParser:
break
# If this is an argument, just add it to the current command.
- if isinstance(tok, str):
+ if isinstance(tok, (str, GlobItem)):
args.append(self.lex())
continue
diff --git a/utils/lit/lit/Test.py b/utils/lit/lit/Test.py
index 657a7e8140d7a..1a9e3fe80fb39 100644
--- a/utils/lit/lit/Test.py
+++ b/utils/lit/lit/Test.py
@@ -2,6 +2,8 @@ import os
from xml.sax.saxutils import escape
from json import JSONEncoder
+from lit.BooleanExpression import BooleanExpression
+
# Test result codes.
class ResultCode(object):
@@ -180,10 +182,24 @@ class Test:
self.path_in_suite = path_in_suite
self.config = config
self.file_path = file_path
- # A list of conditions under which this test is expected to fail. These
- # can optionally be provided by test format handlers, and will be
- # honored when the test result is supplied.
+
+ # A list of conditions under which this test is expected to fail.
+ # Each condition is a boolean expression of features and target
+ # triple parts. These can optionally be provided by test format
+ # handlers, and will be honored when the test result is supplied.
self.xfails = []
+
+ # A list of conditions that must be satisfied before running the test.
+ # Each condition is a boolean expression of features. All of them
+ # must be True for the test to run.
+ # FIXME should target triple parts count here too?
+ self.requires = []
+
+ # A list of conditions that prevent execution of the test.
+ # Each condition is a boolean expression of features and target
+ # triple parts. All of them must be False for the test to run.
+ self.unsupported = []
+
# The test result, once complete.
self.result = None
@@ -196,11 +212,16 @@ class Test:
self.result = result
# Apply the XFAIL handling to resolve the result exit code.
- if self.isExpectedToFail():
- if self.result.code == PASS:
- self.result.code = XPASS
- elif self.result.code == FAIL:
- self.result.code = XFAIL
+ try:
+ if self.isExpectedToFail():
+ if self.result.code == PASS:
+ self.result.code = XPASS
+ elif self.result.code == FAIL:
+ self.result.code = XFAIL
+ except ValueError as e:
+ # Syntax error in an XFAIL line.
+ self.result.code = UNRESOLVED
+ self.result.output = str(e)
def getFullName(self):
return self.suite.config.name + ' :: ' + '/'.join(self.path_in_suite)
@@ -224,24 +245,91 @@ class Test:
configuration. This check relies on the test xfails property which by
some test formats may not be computed until the test has first been
executed.
+ Throws ValueError if an XFAIL line has a syntax error.
"""
+ features = self.config.available_features
+ triple = getattr(self.suite.config, 'target_triple', "")
+
# Check if any of the xfails match an available feature or the target.
for item in self.xfails:
# If this is the wildcard, it always fails.
if item == '*':
return True
- # If this is an exact match for one of the features, it fails.
- if item in self.config.available_features:
- return True
-
- # If this is a part of the target triple, it fails.
- if item and item in self.suite.config.target_triple:
- return True
+ # If this is a True expression of features and target triple parts,
+ # it fails.
+ try:
+ if BooleanExpression.evaluate(item, features, triple):
+ return True
+ except ValueError as e:
+ raise ValueError('Error in XFAIL list:\n%s' % str(e))
return False
+ def isWithinFeatureLimits(self):
+ """
+ isWithinFeatureLimits() -> bool
+
+ A test is within the feature limits set by run_only_tests if
+ 1. the test's requirements ARE satisfied by the available features
+ 2. the test's requirements ARE NOT satisfied after the limiting
+ features are removed from the available features
+
+ Throws ValueError if a REQUIRES line has a syntax error.
+ """
+
+ if not self.config.limit_to_features:
+ return True # No limits. Run it.
+
+ # Check the requirements as-is (#1)
+ if self.getMissingRequiredFeatures():
+ return False
+
+ # Check the requirements after removing the limiting features (#2)
+ featuresMinusLimits = [f for f in self.config.available_features
+ if not f in self.config.limit_to_features]
+ if not self.getMissingRequiredFeaturesFromList(featuresMinusLimits):
+ return False
+
+ return True
+
+ def getMissingRequiredFeaturesFromList(self, features):
+ try:
+ return [item for item in self.requires
+ if not BooleanExpression.evaluate(item, features)]
+ except ValueError as e:
+ raise ValueError('Error in REQUIRES list:\n%s' % str(e))
+
+ def getMissingRequiredFeatures(self):
+ """
+ getMissingRequiredFeatures() -> list of strings
+
+ Returns a list of features from REQUIRES that are not satisfied."
+ Throws ValueError if a REQUIRES line has a syntax error.
+ """
+
+ features = self.config.available_features
+ return self.getMissingRequiredFeaturesFromList(features)
+
+ def getUnsupportedFeatures(self):
+ """
+ getUnsupportedFeatures() -> list of strings
+
+ Returns a list of features from UNSUPPORTED that are present
+ in the test configuration's features or target triple.
+ Throws ValueError if an UNSUPPORTED line has a syntax error.
+ """
+
+ features = self.config.available_features
+ triple = getattr(self.suite.config, 'target_triple', "")
+
+ try:
+ return [item for item in self.unsupported
+ if BooleanExpression.evaluate(item, features, triple)]
+ except ValueError as e:
+ raise ValueError('Error in UNSUPPORTED list:\n%s' % str(e))
+
def isEarlyTest(self):
"""
isEarlyTest() -> bool
diff --git a/utils/lit/lit/TestRunner.py b/utils/lit/lit/TestRunner.py
index 942ae38a04c5d..3fb9def26ee88 100644
--- a/utils/lit/lit/TestRunner.py
+++ b/utils/lit/lit/TestRunner.py
@@ -5,10 +5,12 @@ import platform
import tempfile
import threading
+from lit.ShCommands import GlobItem
import lit.ShUtil as ShUtil
import lit.Test as Test
import lit.util
from lit.util import to_bytes, to_string
+from lit.BooleanExpression import BooleanExpression
class InternalShellError(Exception):
def __init__(self, command, message):
@@ -140,6 +142,17 @@ def executeShCmd(cmd, shenv, results, timeout=0):
return (finalExitCode, timeoutInfo)
+def expand_glob(arg, cwd):
+ if isinstance(arg, GlobItem):
+ return arg.resolve(cwd)
+ return [arg]
+
+def expand_glob_expressions(args, cwd):
+ result = [args[0]]
+ for arg in args[1:]:
+ result.extend(expand_glob(arg, cwd))
+ return result
+
def quote_windows_command(seq):
"""
Reimplement Python's private subprocess.list2cmdline for MSys compatibility
@@ -196,6 +209,18 @@ def quote_windows_command(seq):
return ''.join(result)
+# cmd is export or env
+def updateEnv(env, cmd):
+ arg_idx = 1
+ for arg_idx, arg in enumerate(cmd.args[1:]):
+ # Partition the string into KEY=VALUE.
+ key, eq, val = arg.partition('=')
+ # Stop if there was no equals.
+ if eq == '':
+ break
+ env.env[key] = val
+ cmd.args = cmd.args[arg_idx+1:]
+
def _executeShCmd(cmd, shenv, results, timeoutHelper):
if timeoutHelper.timeoutReached():
# Prevent further recursion if the timeout has been hit
@@ -239,11 +264,19 @@ def _executeShCmd(cmd, shenv, results, timeoutHelper):
if os.path.isabs(newdir):
shenv.cwd = newdir
else:
- shenv.cwd = os.path.join(shenv.cwd, newdir)
+ shenv.cwd = os.path.realpath(os.path.join(shenv.cwd, newdir))
# The cd builtin always succeeds. If the directory does not exist, the
# following Popen calls will fail instead.
return 0
+ if cmd.commands[0].args[0] == 'export':
+ if len(cmd.commands) != 1:
+ raise ValueError("'export' cannot be part of a pipeline")
+ if len(cmd.commands[0].args) != 2:
+ raise ValueError("'export' supports only one argument")
+ updateEnv(shenv, cmd.commands[0])
+ return 0
+
procs = []
input = subprocess.PIPE
stderrTempFiles = []
@@ -260,15 +293,7 @@ def _executeShCmd(cmd, shenv, results, timeoutHelper):
# command. There might be multiple envs in a pipeline:
# env FOO=1 llc < %s | env BAR=2 llvm-mc | FileCheck %s
cmd_shenv = ShellEnvironment(shenv.cwd, shenv.env)
- arg_idx = 1
- for arg_idx, arg in enumerate(j.args[1:]):
- # Partition the string into KEY=VALUE.
- key, eq, val = arg.partition('=')
- # Stop if there was no equals.
- if eq == '':
- break
- cmd_shenv.env[key] = val
- j.args = j.args[arg_idx+1:]
+ updateEnv(cmd_shenv, j)
# Apply the redirections, we use (N,) as a sentinel to indicate stdin,
# stdout, stderr for N equal to 0, 1, or 2 respectively. Redirects to or
@@ -312,15 +337,19 @@ def _executeShCmd(cmd, shenv, results, timeoutHelper):
else:
if r[2] is None:
redir_filename = None
- if kAvoidDevNull and r[0] == '/dev/null':
+ name = expand_glob(r[0], cmd_shenv.cwd)
+ if len(name) != 1:
+ raise InternalShellError(j,"Unsupported: glob in redirect expanded to multiple files")
+ name = name[0]
+ if kAvoidDevNull and name == '/dev/null':
r[2] = tempfile.TemporaryFile(mode=r[1])
- elif kIsWindows and r[0] == '/dev/tty':
+ elif kIsWindows and name == '/dev/tty':
# Simulate /dev/tty on Windows.
# "CON" is a special filename for the console.
r[2] = open("CON", r[1])
else:
# Make sure relative paths are relative to the cwd.
- redir_filename = os.path.join(cmd_shenv.cwd, r[0])
+ redir_filename = os.path.join(cmd_shenv.cwd, name)
r[2] = open(redir_filename, r[1])
# Workaround a Win32 and/or subprocess bug when appending.
#
@@ -371,6 +400,9 @@ def _executeShCmd(cmd, shenv, results, timeoutHelper):
named_temp_files.append(f.name)
args[i] = f.name
+ # Expand all glob expressions
+ args = expand_glob_expressions(args, cmd_shenv.cwd)
+
# On Windows, do our own command line quoting for better compatibility
# with some core utility distributions.
if kIsWindows:
@@ -686,11 +718,14 @@ def getDefaultSubstitutions(test, tmpDir, tmpBase, normalize_slashes=False):
substitutions = []
substitutions.extend([('%%', '#_MARKER_#')])
substitutions.extend(test.config.substitutions)
+ tmpName = tmpBase + '.tmp'
+ baseName = os.path.basename(tmpBase)
substitutions.extend([('%s', sourcepath),
('%S', sourcedir),
('%p', sourcedir),
('%{pathsep}', os.pathsep),
- ('%t', tmpBase + '.tmp'),
+ ('%t', tmpName),
+ ('%basename_t', baseName),
('%T', tmpDir),
('#_MARKER_#', '%')])
@@ -746,14 +781,35 @@ class ParserKind(object):
command.
TAG: A keyword taking no value. Ex 'END.'
- COMMAND: A Keyword taking a list of shell commands. Ex 'RUN:'
- LIST: A keyword taking a comma separated list of value. Ex 'XFAIL:'
+ COMMAND: A keyword taking a list of shell commands. Ex 'RUN:'
+ LIST: A keyword taking a comma-separated list of values.
+ BOOLEAN_EXPR: A keyword taking a comma-separated list of
+ boolean expressions. Ex 'XFAIL:'
CUSTOM: A keyword with custom parsing semantics.
"""
TAG = 0
COMMAND = 1
LIST = 2
- CUSTOM = 3
+ BOOLEAN_EXPR = 3
+ CUSTOM = 4
+
+ @staticmethod
+ def allowedKeywordSuffixes(value):
+ return { ParserKind.TAG: ['.'],
+ ParserKind.COMMAND: [':'],
+ ParserKind.LIST: [':'],
+ ParserKind.BOOLEAN_EXPR: [':'],
+ ParserKind.CUSTOM: [':', '.']
+ } [value]
+
+ @staticmethod
+ def str(value):
+ return { ParserKind.TAG: 'TAG',
+ ParserKind.COMMAND: 'COMMAND',
+ ParserKind.LIST: 'LIST',
+ ParserKind.BOOLEAN_EXPR: 'BOOLEAN_EXPR',
+ ParserKind.CUSTOM: 'CUSTOM'
+ } [value]
class IntegratedTestKeywordParser(object):
@@ -765,15 +821,18 @@ class IntegratedTestKeywordParser(object):
ParserKind.CUSTOM.
"""
def __init__(self, keyword, kind, parser=None, initial_value=None):
- if not keyword.endswith('.') and not keyword.endswith(':'):
- raise ValueError("keyword '%s' must end with either '.' or ':' "
- % keyword)
- if keyword.endswith('.') and kind in \
- [ParserKind.LIST, ParserKind.COMMAND]:
- raise ValueError("Keyword '%s' should end in ':'" % keyword)
-
- elif keyword.endswith(':') and kind in [ParserKind.TAG]:
- raise ValueError("Keyword '%s' should end in '.'" % keyword)
+ allowedSuffixes = ParserKind.allowedKeywordSuffixes(kind)
+ if len(keyword) == 0 or keyword[-1] not in allowedSuffixes:
+ if len(allowedSuffixes) == 1:
+ raise ValueError("Keyword '%s' of kind '%s' must end in '%s'"
+ % (keyword, ParserKind.str(kind),
+ allowedSuffixes[0]))
+ else:
+ raise ValueError("Keyword '%s' of kind '%s' must end in "
+ " one of '%s'"
+ % (keyword, ParserKind.str(kind),
+ ' '.join(allowedSuffixes)))
+
if parser is not None and kind != ParserKind.CUSTOM:
raise ValueError("custom parsers can only be specified with "
"ParserKind.CUSTOM")
@@ -787,9 +846,9 @@ class IntegratedTestKeywordParser(object):
self.parser = self._handleCommand
elif kind == ParserKind.LIST:
self.parser = self._handleList
+ elif kind == ParserKind.BOOLEAN_EXPR:
+ self.parser = self._handleBooleanExpr
elif kind == ParserKind.TAG:
- if not keyword.endswith('.'):
- raise ValueError("keyword '%s' should end with '.'" % keyword)
self.parser = self._handleTag
elif kind == ParserKind.CUSTOM:
if parser is None:
@@ -799,8 +858,12 @@ class IntegratedTestKeywordParser(object):
raise ValueError("Unknown kind '%s'" % kind)
def parseLine(self, line_number, line):
- self.parsed_lines += [(line_number, line)]
- self.value = self.parser(line_number, line, self.value)
+ try:
+ self.parsed_lines += [(line_number, line)]
+ self.value = self.parser(line_number, line, self.value)
+ except ValueError as e:
+ raise ValueError(str(e) + ("\nin %s directive on test line %d" %
+ (self.keyword, line_number)))
def getValue(self):
return self.value
@@ -841,12 +904,38 @@ class IntegratedTestKeywordParser(object):
output.extend([s.strip() for s in line.split(',')])
return output
+ @staticmethod
+ def _handleBooleanExpr(line_number, line, output):
+ """A parser for BOOLEAN_EXPR type keywords"""
+ if output is None:
+ output = []
+ output.extend([s.strip() for s in line.split(',')])
+ # Evaluate each expression to verify syntax.
+ # We don't want any results, just the raised ValueError.
+ for s in output:
+ if s != '*':
+ BooleanExpression.evaluate(s, [])
+ return output
+
+ @staticmethod
+ def _handleRequiresAny(line_number, line, output):
+ """A custom parser to transform REQUIRES-ANY: into REQUIRES:"""
+
+ # Extract the conditions specified in REQUIRES-ANY: as written.
+ conditions = []
+ IntegratedTestKeywordParser._handleList(line_number, line, conditions)
+
+ # Output a `REQUIRES: a || b || c` expression in its place.
+ expression = ' || '.join(conditions)
+ IntegratedTestKeywordParser._handleBooleanExpr(line_number,
+ expression, output)
+ return output
def parseIntegratedTestScript(test, additional_parsers=[],
require_script=True):
"""parseIntegratedTestScript - Scan an LLVM/Clang style integrated test
script and extract the lines to 'RUN' as well as 'XFAIL' and 'REQUIRES'
- 'REQUIRES-ANY' and 'UNSUPPORTED' information.
+ and 'UNSUPPORTED' information.
If additional parsers are specified then the test is also scanned for the
keywords they specify and all matches are passed to the custom parser.
@@ -855,26 +944,26 @@ def parseIntegratedTestScript(test, additional_parsers=[],
may be returned. This can be used for test formats where the actual script
is optional or ignored.
"""
- # Collect the test lines from the script.
- sourcepath = test.getSourcePath()
+
+ # Install the built-in keyword parsers.
script = []
- requires = []
- requires_any = []
- unsupported = []
builtin_parsers = [
IntegratedTestKeywordParser('RUN:', ParserKind.COMMAND,
initial_value=script),
- IntegratedTestKeywordParser('XFAIL:', ParserKind.LIST,
+ IntegratedTestKeywordParser('XFAIL:', ParserKind.BOOLEAN_EXPR,
initial_value=test.xfails),
- IntegratedTestKeywordParser('REQUIRES:', ParserKind.LIST,
- initial_value=requires),
- IntegratedTestKeywordParser('REQUIRES-ANY:', ParserKind.LIST,
- initial_value=requires_any),
- IntegratedTestKeywordParser('UNSUPPORTED:', ParserKind.LIST,
- initial_value=unsupported),
+ IntegratedTestKeywordParser('REQUIRES:', ParserKind.BOOLEAN_EXPR,
+ initial_value=test.requires),
+ IntegratedTestKeywordParser('REQUIRES-ANY:', ParserKind.CUSTOM,
+ IntegratedTestKeywordParser._handleRequiresAny,
+ initial_value=test.requires),
+ IntegratedTestKeywordParser('UNSUPPORTED:', ParserKind.BOOLEAN_EXPR,
+ initial_value=test.unsupported),
IntegratedTestKeywordParser('END.', ParserKind.TAG)
]
keyword_parsers = {p.keyword: p for p in builtin_parsers}
+
+ # Install user-defined additional parsers.
for parser in additional_parsers:
if not isinstance(parser, IntegratedTestKeywordParser):
raise ValueError('additional parser must be an instance of '
@@ -883,7 +972,9 @@ def parseIntegratedTestScript(test, additional_parsers=[],
raise ValueError("Parser for keyword '%s' already exists"
% parser.keyword)
keyword_parsers[parser.keyword] = parser
-
+
+ # Collect the test lines from the script.
+ sourcepath = test.getSourcePath()
for line_number, command_type, ln in \
parseIntegratedTestScriptCommands(sourcepath,
keyword_parsers.keys()):
@@ -901,46 +992,30 @@ def parseIntegratedTestScript(test, additional_parsers=[],
return lit.Test.Result(Test.UNRESOLVED,
"Test has unterminated run lines (with '\\')")
- # Check that we have the required features:
- missing_required_features = [f for f in requires
- if f not in test.config.available_features]
+ # Enforce REQUIRES:
+ missing_required_features = test.getMissingRequiredFeatures()
if missing_required_features:
msg = ', '.join(missing_required_features)
return lit.Test.Result(Test.UNSUPPORTED,
- "Test requires the following features: %s"
- % msg)
- requires_any_features = [f for f in requires_any
- if f in test.config.available_features]
- if requires_any and not requires_any_features:
- msg = ' ,'.join(requires_any)
- return lit.Test.Result(Test.UNSUPPORTED,
- "Test requires any of the following features: "
- "%s" % msg)
- unsupported_features = [f for f in unsupported
- if f in test.config.available_features]
+ "Test requires the following unavailable "
+ "features: %s" % msg)
+
+ # Enforce UNSUPPORTED:
+ unsupported_features = test.getUnsupportedFeatures()
if unsupported_features:
msg = ', '.join(unsupported_features)
return lit.Test.Result(
Test.UNSUPPORTED,
- "Test is unsupported with the following features: %s" % msg)
+ "Test does not support the following features "
+ "and/or targets: %s" % msg)
+
+ # Enforce limit_to_features.
+ if not test.isWithinFeatureLimits():
+ msg = ', '.join(test.config.limit_to_features)
+ return lit.Test.Result(Test.UNSUPPORTED,
+ "Test does not require any of the features "
+ "specified in limit_to_features: %s" % msg)
- unsupported_targets = [f for f in unsupported
- if f in test.suite.config.target_triple]
- if unsupported_targets:
- return lit.Test.Result(
- Test.UNSUPPORTED,
- "Test is unsupported with the following triple: %s" % (
- test.suite.config.target_triple,))
-
- if test.config.limit_to_features:
- # Check that we have one of the limit_to_features features in requires.
- limit_to_features_tests = [f for f in test.config.limit_to_features
- if f in requires]
- if not limit_to_features_tests:
- msg = ', '.join(test.config.limit_to_features)
- return lit.Test.Result(
- Test.UNSUPPORTED,
- "Test requires one of the limit_to_features features %s" % msg)
return script
diff --git a/utils/lit/lit/TestingConfig.py b/utils/lit/lit/TestingConfig.py
index 717b53c67fbeb..c729ec060ace7 100644
--- a/utils/lit/lit/TestingConfig.py
+++ b/utils/lit/lit/TestingConfig.py
@@ -106,7 +106,7 @@ class TestingConfig:
environment, substitutions, unsupported,
test_exec_root, test_source_root, excludes,
available_features, pipefail, limit_to_features = [],
- is_early = False):
+ is_early = False, parallelism_group = ""):
self.parent = parent
self.name = str(name)
self.suffixes = set(suffixes)
@@ -125,6 +125,7 @@ class TestingConfig:
self.limit_to_features = set(limit_to_features)
# Whether the suite should be tested early in a given run.
self.is_early = bool(is_early)
+ self.parallelism_group = parallelism_group
def finish(self, litConfig):
"""finish() - Finish this config object, after loading is complete."""
diff --git a/utils/lit/lit/main.py b/utils/lit/lit/main.py
index ac3066eea7252..689a2d55bcea7 100755
--- a/utils/lit/lit/main.py
+++ b/utils/lit/lit/main.py
@@ -259,6 +259,14 @@ def main_with_tmp(builtinParameters):
help=("Only run tests with paths matching the given "
"regular expression"),
action="store", default=None)
+ selection_group.add_argument("--num-shards", dest="numShards", metavar="M",
+ help="Split testsuite into M pieces and only run one",
+ action="store", type=int,
+ default=os.environ.get("LIT_NUM_SHARDS"))
+ selection_group.add_argument("--run-shard", dest="runShard", metavar="N",
+ help="Run shard #N of the testsuite",
+ action="store", type=int,
+ default=os.environ.get("LIT_RUN_SHARD"))
debug_group = parser.add_argument_group("Debug and Experimental Options")
debug_group.add_argument("--debug",
@@ -270,12 +278,15 @@ def main_with_tmp(builtinParameters):
debug_group.add_argument("--show-tests", dest="showTests",
help="Show all discovered tests",
action="store_true", default=False)
- debug_group.add_argument("--use-processes", dest="useProcesses",
+ debug_group.add_argument("--use-process-pool", dest="executionStrategy",
+ help="Run tests in parallel with a process pool",
+ action="store_const", const="PROCESS_POOL")
+ debug_group.add_argument("--use-processes", dest="executionStrategy",
help="Run tests in parallel with processes (not threads)",
- action="store_true", default=True)
- debug_group.add_argument("--use-threads", dest="useProcesses",
+ action="store_const", const="PROCESSES")
+ debug_group.add_argument("--use-threads", dest="executionStrategy",
help="Run tests in parallel with threads (not processes)",
- action="store_false", default=True)
+ action="store_const", const="THREADS")
opts = parser.parse_args()
args = opts.test_paths
@@ -290,6 +301,9 @@ def main_with_tmp(builtinParameters):
if opts.numThreads is None:
opts.numThreads = lit.util.detectCPUs()
+ if opts.executionStrategy is None:
+ opts.executionStrategy = 'PROCESS_POOL'
+
if opts.maxFailures == 0:
parser.error("Setting --max-failures to 0 does not have any effect.")
@@ -327,7 +341,8 @@ def main_with_tmp(builtinParameters):
params = userParams,
config_prefix = opts.configPrefix,
maxIndividualTestTime = maxIndividualTestTime,
- maxFailures = opts.maxFailures)
+ maxFailures = opts.maxFailures,
+ parallelism_groups = {})
# Perform test discovery.
run = lit.run.Run(litConfig,
@@ -399,6 +414,29 @@ def main_with_tmp(builtinParameters):
else:
run.tests.sort(key = lambda t: (not t.isEarlyTest(), t.getFullName()))
+ # Then optionally restrict our attention to a shard of the tests.
+ if (opts.numShards is not None) or (opts.runShard is not None):
+ if (opts.numShards is None) or (opts.runShard is None):
+ parser.error("--num-shards and --run-shard must be used together")
+ if opts.numShards <= 0:
+ parser.error("--num-shards must be positive")
+ if (opts.runShard < 1) or (opts.runShard > opts.numShards):
+ parser.error("--run-shard must be between 1 and --num-shards (inclusive)")
+ num_tests = len(run.tests)
+ # Note: user views tests and shard numbers counting from 1.
+ test_ixs = range(opts.runShard - 1, num_tests, opts.numShards)
+ run.tests = [run.tests[i] for i in test_ixs]
+ # Generate a preview of the first few test indices in the shard
+ # to accompany the arithmetic expression, for clarity.
+ preview_len = 3
+ ix_preview = ", ".join([str(i+1) for i in test_ixs[:preview_len]])
+ if len(test_ixs) > preview_len:
+ ix_preview += ", ..."
+ litConfig.note('Selecting shard %d/%d = size %d/%d = tests #(%d*k)+%d = [%s]' %
+ (opts.runShard, opts.numShards,
+ len(run.tests), num_tests,
+ opts.numShards, opts.runShard, ix_preview))
+
# Finally limit the number of tests, if desired.
if opts.maxTests is not None:
run.tests = run.tests[:opts.maxTests]
@@ -449,7 +487,7 @@ def main_with_tmp(builtinParameters):
display = TestingProgressDisplay(opts, len(run.tests), progressBar)
try:
run.execute_tests(display, opts.numThreads, opts.maxTime,
- opts.useProcesses)
+ opts.executionStrategy)
except KeyboardInterrupt:
sys.exit(2)
display.finish()
diff --git a/utils/lit/lit/run.py b/utils/lit/lit/run.py
index f7e84d316a7cd..14d8ec98490e8 100644
--- a/utils/lit/lit/run.py
+++ b/utils/lit/lit/run.py
@@ -1,4 +1,5 @@
import os
+import sys
import threading
import time
import traceback
@@ -84,11 +85,13 @@ class Tester(object):
def run_test(self, test_index):
test = self.run_instance.tests[test_index]
try:
- self.run_instance.execute_test(test)
+ execute_test(test, self.run_instance.lit_config,
+ self.run_instance.parallelism_semaphores)
except KeyboardInterrupt:
# This is a sad hack. Unfortunately subprocess goes
# bonkers with ctrl-c and we start forking merrily.
print('\nCtrl-C detected, goodbye.')
+ sys.stdout.flush()
os.kill(0,9)
self.consumer.update(test_index, test)
@@ -167,6 +170,44 @@ class _Display(object):
def handleFailures(provider, consumer, maxFailures):
consumer.display = _Display(consumer.display, provider, maxFailures)
+def execute_test(test, lit_config, parallelism_semaphores):
+ """Execute one test"""
+ pg = test.config.parallelism_group
+ if callable(pg):
+ pg = pg(test)
+
+ result = None
+ semaphore = None
+ try:
+ if pg:
+ semaphore = parallelism_semaphores[pg]
+ if semaphore:
+ semaphore.acquire()
+ start_time = time.time()
+ result = test.config.test_format.execute(test, lit_config)
+ # Support deprecated result from execute() which returned the result
+ # code and additional output as a tuple.
+ if isinstance(result, tuple):
+ code, output = result
+ result = lit.Test.Result(code, output)
+ elif not isinstance(result, lit.Test.Result):
+ raise ValueError("unexpected result from test execution")
+ result.elapsed = time.time() - start_time
+ except KeyboardInterrupt:
+ raise
+ except:
+ if lit_config.debug:
+ raise
+ output = 'Exception during script execution:\n'
+ output += traceback.format_exc()
+ output += '\n'
+ result = lit.Test.Result(lit.Test.UNRESOLVED, output)
+ finally:
+ if semaphore:
+ semaphore.release()
+
+ test.setResult(result)
+
class Run(object):
"""
This class represents a concrete, configured testing run.
@@ -177,33 +218,10 @@ class Run(object):
self.tests = tests
def execute_test(self, test):
- result = None
- start_time = time.time()
- try:
- result = test.config.test_format.execute(test, self.lit_config)
-
- # Support deprecated result from execute() which returned the result
- # code and additional output as a tuple.
- if isinstance(result, tuple):
- code, output = result
- result = lit.Test.Result(code, output)
- elif not isinstance(result, lit.Test.Result):
- raise ValueError("unexpected result from test execution")
- except KeyboardInterrupt:
- raise
- except:
- if self.lit_config.debug:
- raise
- output = 'Exception during script execution:\n'
- output += traceback.format_exc()
- output += '\n'
- result = lit.Test.Result(lit.Test.UNRESOLVED, output)
- result.elapsed = time.time() - start_time
-
- test.setResult(result)
+ return execute_test(test, self.lit_config, self.parallelism_semaphores)
def execute_tests(self, display, jobs, max_time=None,
- use_processes=False):
+ execution_strategy=None):
"""
execute_tests(display, jobs, [max_time])
@@ -225,12 +243,21 @@ class Run(object):
be given an UNRESOLVED result.
"""
+ if execution_strategy == 'PROCESS_POOL':
+ self.execute_tests_with_mp_pool(display, jobs, max_time)
+ return
+ # FIXME: Standardize on the PROCESS_POOL execution strategy and remove
+ # the other two strategies.
+
+ use_processes = execution_strategy == 'PROCESSES'
+
# Choose the appropriate parallel execution implementation.
consumer = None
if jobs != 1 and use_processes and multiprocessing:
try:
task_impl = multiprocessing.Process
queue_impl = multiprocessing.Queue
+ sem_impl = multiprocessing.Semaphore
canceled_flag = multiprocessing.Value('i', 0)
consumer = MultiprocessResultsConsumer(self, display, jobs)
except:
@@ -242,15 +269,19 @@ class Run(object):
if not consumer:
task_impl = threading.Thread
queue_impl = queue.Queue
+ sem_impl = threading.Semaphore
canceled_flag = LockedValue(0)
consumer = ThreadResultsConsumer(display)
+ self.parallelism_semaphores = {k: sem_impl(v)
+ for k, v in self.lit_config.parallelism_groups.items()}
+
# Create the test provider.
provider = TestProvider(queue_impl, canceled_flag)
handleFailures(provider, consumer, self.lit_config.maxFailures)
- # Queue the tests outside the main thread because we can't guarantee
- # that we can put() all the tests without blocking:
+ # Putting tasks into the threading or multiprocessing Queue may block,
+ # so do it in a separate thread.
# https://docs.python.org/2/library/multiprocessing.html
# e.g: On Mac OS X, we will hang if we put 2^15 elements in the queue
# without taking any out.
@@ -303,3 +334,140 @@ class Run(object):
# Wait for all the tasks to complete.
for t in tasks:
t.join()
+
+ def execute_tests_with_mp_pool(self, display, jobs, max_time=None):
+ # Don't do anything if we aren't going to run any tests.
+ if not self.tests or jobs == 0:
+ return
+
+ # Set up semaphores to limit parallelism of certain classes of tests.
+ # For example, some ASan tests require lots of virtual memory and run
+ # faster with less parallelism on OS X.
+ self.parallelism_semaphores = \
+ {k: multiprocessing.Semaphore(v) for k, v in
+ self.lit_config.parallelism_groups.items()}
+
+ # Install a console-control signal handler on Windows.
+ if win32api is not None:
+ def console_ctrl_handler(type):
+ print('\nCtrl-C detected, terminating.')
+ pool.terminate()
+ pool.join()
+ os.kill(0,9)
+ return True
+ win32api.SetConsoleCtrlHandler(console_ctrl_handler, True)
+
+ # Save the display object on the runner so that we can update it from
+ # our task completion callback.
+ self.display = display
+
+ # We need to issue many wait calls, so compute the final deadline and
+ # subtract time.time() from that as we go along.
+ deadline = None
+ if max_time:
+ deadline = time.time() + max_time
+
+ # Start a process pool. Copy over the data shared between all test runs.
+ pool = multiprocessing.Pool(jobs, worker_initializer,
+ (self.lit_config,
+ self.parallelism_semaphores))
+
+ try:
+ self.failure_count = 0
+ self.hit_max_failures = False
+ async_results = [pool.apply_async(worker_run_one_test,
+ args=(test_index, test),
+ callback=self.consume_test_result)
+ for test_index, test in enumerate(self.tests)]
+
+ # Wait for all results to come in. The callback that runs in the
+ # parent process will update the display.
+ for a in async_results:
+ if deadline:
+ a.wait(deadline - time.time())
+ else:
+ # Python condition variables cannot be interrupted unless
+ # they have a timeout. This can make lit unresponsive to
+ # KeyboardInterrupt, so do a busy wait with a timeout.
+ while not a.ready():
+ a.wait(1)
+ if not a.successful():
+ a.get() # Exceptions raised here come from the worker.
+ if self.hit_max_failures:
+ break
+ finally:
+ # Stop the workers and wait for any straggling results to come in
+ # if we exited without waiting on every async result.
+ pool.terminate()
+ pool.join()
+
+ # Mark any tests that weren't run as UNRESOLVED.
+ for test in self.tests:
+ if test.result is None:
+ test.setResult(lit.Test.Result(lit.Test.UNRESOLVED, '', 0.0))
+
+ def consume_test_result(self, pool_result):
+ """Test completion callback for worker_run_one_test
+
+ Updates the test result status in the parent process. Each task in the
+ pool returns the test index and the result, and we use the index to look
+ up the original test object. Also updates the progress bar as tasks
+ complete.
+ """
+ # Don't add any more test results after we've hit the maximum failure
+ # count. Otherwise we're racing with the main thread, which is going
+ # to terminate the process pool soon.
+ if self.hit_max_failures:
+ return
+
+ (test_index, test_with_result) = pool_result
+ # Update the parent process copy of the test. This includes the result,
+ # XFAILS, REQUIRES, and UNSUPPORTED statuses.
+ assert self.tests[test_index].file_path == test_with_result.file_path, \
+ "parent and child disagree on test path"
+ self.tests[test_index] = test_with_result
+ self.display.update(test_with_result)
+
+ # If we've finished all the tests or too many tests have failed, notify
+ # the main thread that we've stopped testing.
+ self.failure_count += (test_with_result.result.code == lit.Test.FAIL)
+ if self.lit_config.maxFailures and \
+ self.failure_count == self.lit_config.maxFailures:
+ self.hit_max_failures = True
+
+child_lit_config = None
+child_parallelism_semaphores = None
+
+def worker_initializer(lit_config, parallelism_semaphores):
+ """Copy expensive repeated data into worker processes"""
+ global child_lit_config
+ child_lit_config = lit_config
+ global child_parallelism_semaphores
+ child_parallelism_semaphores = parallelism_semaphores
+
+def worker_run_one_test(test_index, test):
+ """Run one test in a multiprocessing.Pool
+
+ Side effects in this function and functions it calls are not visible in the
+ main lit process.
+
+ Arguments and results of this function are pickled, so they should be cheap
+ to copy. For efficiency, we copy all data needed to execute all tests into
+ each worker and store it in the child_* global variables. This reduces the
+ cost of each task.
+
+ Returns an index and a Result, which the parent process uses to update
+ the display.
+ """
+ try:
+ execute_test(test, child_lit_config, child_parallelism_semaphores)
+ return (test_index, test)
+ except KeyboardInterrupt as e:
+ # This is a sad hack. Unfortunately subprocess goes
+ # bonkers with ctrl-c and we start forking merrily.
+ print('\nCtrl-C detected, goodbye.')
+ traceback.print_exc()
+ sys.stdout.flush()
+ os.kill(0,9)
+ except:
+ traceback.print_exc()
diff --git a/utils/lit/lit/util.py b/utils/lit/lit/util.py
index be37998c6f16c..104e9dac464d7 100644
--- a/utils/lit/lit/util.py
+++ b/utils/lit/lit/util.py
@@ -10,6 +10,8 @@ import threading
def to_bytes(str):
# Encode to UTF-8 to get binary data.
+ if isinstance(str, bytes):
+ return str
return str.encode('utf-8')
def to_string(bytes):
@@ -200,6 +202,8 @@ def executeCommand(command, cwd=None, env=None, input=None, timeout=0):
If the timeout is hit an ``ExecuteCommandTimeoutException``
is raised.
"""
+ if input is not None:
+ input = to_bytes(input)
p = subprocess.Popen(command, cwd=cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
diff --git a/utils/lit/tests/Inputs/shtest-format/requires-missing.txt b/utils/lit/tests/Inputs/shtest-format/requires-missing.txt
index 9e6648d8b8f08..d643e57edcad2 100644
--- a/utils/lit/tests/Inputs/shtest-format/requires-missing.txt
+++ b/utils/lit/tests/Inputs/shtest-format/requires-missing.txt
@@ -1,2 +1,5 @@
-RUN: true
-REQUIRES: a-missing-feature
+# REQUIRES with a false clause. Test should not run.
+REQUIRES: true
+REQUIRES: a-missing-feature, true
+REQUIRES: true
+RUN: false
diff --git a/utils/lit/tests/Inputs/shtest-format/requires-present.txt b/utils/lit/tests/Inputs/shtest-format/requires-present.txt
index 064f7074a76ea..9fcbdca69be35 100644
--- a/utils/lit/tests/Inputs/shtest-format/requires-present.txt
+++ b/utils/lit/tests/Inputs/shtest-format/requires-present.txt
@@ -1,2 +1,4 @@
+# REQUIRES with only true clauses. Test should run.
+REQUIRES: a-present-feature, true, !not-true
+REQUIRES: true
RUN: true
-REQUIRES: a-present-feature
diff --git a/utils/lit/tests/Inputs/shtest-format/requires-star.txt b/utils/lit/tests/Inputs/shtest-format/requires-star.txt
new file mode 100644
index 0000000000000..5566d8b15b074
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-format/requires-star.txt
@@ -0,0 +1,3 @@
+# '*' only works in XFAIL
+REQUIRES: *
+RUN: false
diff --git a/utils/lit/tests/Inputs/shtest-format/requires-triple.txt b/utils/lit/tests/Inputs/shtest-format/requires-triple.txt
new file mode 100644
index 0000000000000..6470bf4041457
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-format/requires-triple.txt
@@ -0,0 +1,3 @@
+# REQUIRES line that uses target triple, which doesn't work. Test should not run
+REQUIRES: x86_64
+RUN: false
diff --git a/utils/lit/tests/Inputs/shtest-format/unsupported-expr-false.txt b/utils/lit/tests/Inputs/shtest-format/unsupported-expr-false.txt
new file mode 100644
index 0000000000000..00c6160a367c1
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-format/unsupported-expr-false.txt
@@ -0,0 +1,9 @@
+# UNSUPPORTED with only false clauses. Test should run.
+UNSUPPORTED: false
+UNSUPPORTED: false, not-true
+UNSUPPORTED: false
+UNSUPPORTED: still-not-true
+UNSUPPORTED: false
+UNSUPPORTED: false
+UNSUPPORTED: false
+RUN: true
diff --git a/utils/lit/tests/Inputs/shtest-format/unsupported-expr-true.txt b/utils/lit/tests/Inputs/shtest-format/unsupported-expr-true.txt
new file mode 100644
index 0000000000000..f48ba7b2c2d22
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-format/unsupported-expr-true.txt
@@ -0,0 +1,4 @@
+# UNSUPPORTED with a true clause. Test should not run.
+UNSUPPORTED: false
+UNSUPPORTED: false, false, false, _64-unk && a-present-feature, false
+RUN: false
diff --git a/utils/lit/tests/Inputs/shtest-format/unsupported-star.txt b/utils/lit/tests/Inputs/shtest-format/unsupported-star.txt
new file mode 100644
index 0000000000000..16630207dacb9
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-format/unsupported-star.txt
@@ -0,0 +1,3 @@
+# '*' only works in XFAIL
+UNSUPPORTED: *
+RUN: false
diff --git a/utils/lit/tests/Inputs/shtest-format/xfail-expr-false.txt b/utils/lit/tests/Inputs/shtest-format/xfail-expr-false.txt
new file mode 100644
index 0000000000000..83b0de1621d08
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-format/xfail-expr-false.txt
@@ -0,0 +1,3 @@
+# XFAIL with only false clauses. Test should run.
+XFAIL: false, a-missing-feature || ! a-present-feature || ! x86_64, false
+RUN: true
diff --git a/utils/lit/tests/Inputs/shtest-format/xfail-expr-true.txt b/utils/lit/tests/Inputs/shtest-format/xfail-expr-true.txt
new file mode 100644
index 0000000000000..3c197484897e8
--- /dev/null
+++ b/utils/lit/tests/Inputs/shtest-format/xfail-expr-true.txt
@@ -0,0 +1,4 @@
+# XFAIL with a true clause. Test should not run.
+XFAIL: false
+XFAIL: false, a-present-feature && ! a-missing-feature && x86_64
+RUN: false
diff --git a/utils/lit/tests/Inputs/test-data/dummy_format.py b/utils/lit/tests/Inputs/test-data/dummy_format.py
new file mode 100644
index 0000000000000..93e48eeb83960
--- /dev/null
+++ b/utils/lit/tests/Inputs/test-data/dummy_format.py
@@ -0,0 +1,38 @@
+import os
+try:
+ import ConfigParser
+except ImportError:
+ import configparser as ConfigParser
+
+import lit.formats
+import lit.Test
+
+class DummyFormat(lit.formats.FileBasedTest):
+ def execute(self, test, lit_config):
+ # In this dummy format, expect that each test file is actually just a
+ # .ini format dump of the results to report.
+
+ source_path = test.getSourcePath()
+
+ cfg = ConfigParser.ConfigParser()
+ cfg.read(source_path)
+
+ # Create the basic test result.
+ result_code = cfg.get('global', 'result_code')
+ result_output = cfg.get('global', 'result_output')
+ result = lit.Test.Result(getattr(lit.Test, result_code),
+ result_output)
+
+ # Load additional metrics.
+ for key,value_str in cfg.items('results'):
+ value = eval(value_str)
+ if isinstance(value, int):
+ metric = lit.Test.IntMetricValue(value)
+ elif isinstance(value, float):
+ metric = lit.Test.RealMetricValue(value)
+ else:
+ raise RuntimeError("unsupported result type")
+ result.addMetric(key, metric)
+
+ return result
+
diff --git a/utils/lit/tests/Inputs/test-data/lit.cfg b/utils/lit/tests/Inputs/test-data/lit.cfg
index f5aba7b217748..0191cc2188843 100644
--- a/utils/lit/tests/Inputs/test-data/lit.cfg
+++ b/utils/lit/tests/Inputs/test-data/lit.cfg
@@ -1,44 +1,10 @@
-import os
-try:
- import ConfigParser
-except ImportError:
- import configparser as ConfigParser
-
-import lit.formats
-import lit.Test
-
-class DummyFormat(lit.formats.FileBasedTest):
- def execute(self, test, lit_config):
- # In this dummy format, expect that each test file is actually just a
- # .ini format dump of the results to report.
-
- source_path = test.getSourcePath()
-
- cfg = ConfigParser.ConfigParser()
- cfg.read(source_path)
-
- # Create the basic test result.
- result_code = cfg.get('global', 'result_code')
- result_output = cfg.get('global', 'result_output')
- result = lit.Test.Result(getattr(lit.Test, result_code),
- result_output)
-
- # Load additional metrics.
- for key,value_str in cfg.items('results'):
- value = eval(value_str)
- if isinstance(value, int):
- metric = lit.Test.IntMetricValue(value)
- elif isinstance(value, float):
- metric = lit.Test.RealMetricValue(value)
- else:
- raise RuntimeError("unsupported result type")
- result.addMetric(key, metric)
-
- return result
+import site
+site.addsitedir(os.path.dirname(__file__))
+import dummy_format
config.name = 'test-data'
config.suffixes = ['.ini']
-config.test_format = DummyFormat()
+config.test_format = dummy_format.DummyFormat()
config.test_source_root = None
config.test_exec_root = None
config.target_triple = None
diff --git a/utils/lit/tests/boolean-parsing.py b/utils/lit/tests/boolean-parsing.py
new file mode 100644
index 0000000000000..372a94d233234
--- /dev/null
+++ b/utils/lit/tests/boolean-parsing.py
@@ -0,0 +1,4 @@
+# Test the boolean expression parser
+# used for REQUIRES and UNSUPPORTED and XFAIL
+
+# RUN: %{python} -m lit.BooleanExpression
diff --git a/utils/lit/tests/selecting.py b/utils/lit/tests/selecting.py
new file mode 100644
index 0000000000000..72d6fbabdc932
--- /dev/null
+++ b/utils/lit/tests/selecting.py
@@ -0,0 +1,90 @@
+# RUN: %{lit} %{inputs}/discovery | FileCheck --check-prefix=CHECK-BASIC %s
+# CHECK-BASIC: Testing: 5 tests
+
+
+# Check that regex-filtering works
+#
+# RUN: %{lit} --filter 'o[a-z]e' %{inputs}/discovery | FileCheck --check-prefix=CHECK-FILTER %s
+# CHECK-FILTER: Testing: 2 of 5 tests
+
+
+# Check that maximum counts work
+#
+# RUN: %{lit} --max-tests 3 %{inputs}/discovery | FileCheck --check-prefix=CHECK-MAX %s
+# CHECK-MAX: Testing: 3 of 5 tests
+
+
+# Check that sharding partitions the testsuite in a way that distributes the
+# rounding error nicely (i.e. 5/3 => 2 2 1, not 1 1 3 or whatever)
+#
+# RUN: %{lit} --num-shards 3 --run-shard 1 %{inputs}/discovery >%t.out 2>%t.err
+# RUN: FileCheck --check-prefix=CHECK-SHARD0-ERR < %t.err %s
+# RUN: FileCheck --check-prefix=CHECK-SHARD0-OUT < %t.out %s
+# CHECK-SHARD0-ERR: note: Selecting shard 1/3 = size 2/5 = tests #(3*k)+1 = [1, 4]
+# CHECK-SHARD0-OUT: Testing: 2 of 5 tests
+#
+# RUN: %{lit} --num-shards 3 --run-shard 2 %{inputs}/discovery >%t.out 2>%t.err
+# RUN: FileCheck --check-prefix=CHECK-SHARD1-ERR < %t.err %s
+# RUN: FileCheck --check-prefix=CHECK-SHARD1-OUT < %t.out %s
+# CHECK-SHARD1-ERR: note: Selecting shard 2/3 = size 2/5 = tests #(3*k)+2 = [2, 5]
+# CHECK-SHARD1-OUT: Testing: 2 of 5 tests
+#
+# RUN: %{lit} --num-shards 3 --run-shard 3 %{inputs}/discovery >%t.out 2>%t.err
+# RUN: FileCheck --check-prefix=CHECK-SHARD2-ERR < %t.err %s
+# RUN: FileCheck --check-prefix=CHECK-SHARD2-OUT < %t.out %s
+# CHECK-SHARD2-ERR: note: Selecting shard 3/3 = size 1/5 = tests #(3*k)+3 = [3]
+# CHECK-SHARD2-OUT: Testing: 1 of 5 tests
+
+
+# Check that sharding via env vars works.
+#
+# RUN: env LIT_NUM_SHARDS=3 LIT_RUN_SHARD=1 %{lit} %{inputs}/discovery >%t.out 2>%t.err
+# RUN: FileCheck --check-prefix=CHECK-SHARD0-ENV-ERR < %t.err %s
+# RUN: FileCheck --check-prefix=CHECK-SHARD0-ENV-OUT < %t.out %s
+# CHECK-SHARD0-ENV-ERR: note: Selecting shard 1/3 = size 2/5 = tests #(3*k)+1 = [1, 4]
+# CHECK-SHARD0-ENV-OUT: Testing: 2 of 5 tests
+#
+# RUN: env LIT_NUM_SHARDS=3 LIT_RUN_SHARD=2 %{lit} %{inputs}/discovery >%t.out 2>%t.err
+# RUN: FileCheck --check-prefix=CHECK-SHARD1-ENV-ERR < %t.err %s
+# RUN: FileCheck --check-prefix=CHECK-SHARD1-ENV-OUT < %t.out %s
+# CHECK-SHARD1-ENV-ERR: note: Selecting shard 2/3 = size 2/5 = tests #(3*k)+2 = [2, 5]
+# CHECK-SHARD1-ENV-OUT: Testing: 2 of 5 tests
+#
+# RUN: env LIT_NUM_SHARDS=3 LIT_RUN_SHARD=3 %{lit} %{inputs}/discovery >%t.out 2>%t.err
+# RUN: FileCheck --check-prefix=CHECK-SHARD2-ENV-ERR < %t.err %s
+# RUN: FileCheck --check-prefix=CHECK-SHARD2-ENV-OUT < %t.out %s
+# CHECK-SHARD2-ENV-ERR: note: Selecting shard 3/3 = size 1/5 = tests #(3*k)+3 = [3]
+# CHECK-SHARD2-ENV-OUT: Testing: 1 of 5 tests
+
+
+# Check that providing more shards than tests results in 1 test per shard
+# until we run out, then 0.
+#
+# RUN: %{lit} --num-shards 100 --run-shard 2 %{inputs}/discovery >%t.out 2>%t.err
+# RUN: FileCheck --check-prefix=CHECK-SHARD-BIG-ERR1 < %t.err %s
+# RUN: FileCheck --check-prefix=CHECK-SHARD-BIG-OUT1 < %t.out %s
+# CHECK-SHARD-BIG-ERR1: note: Selecting shard 2/100 = size 1/5 = tests #(100*k)+2 = [2]
+# CHECK-SHARD-BIG-OUT1: Testing: 1 of 5 tests
+#
+# RUN: %{lit} --num-shards 100 --run-shard 6 %{inputs}/discovery >%t.out 2>%t.err
+# RUN: FileCheck --check-prefix=CHECK-SHARD-BIG-ERR2 < %t.err %s
+# RUN: FileCheck --check-prefix=CHECK-SHARD-BIG-OUT2 < %t.out %s
+# CHECK-SHARD-BIG-ERR2: note: Selecting shard 6/100 = size 0/5 = tests #(100*k)+6 = []
+# CHECK-SHARD-BIG-OUT2: Testing: 0 of 5 tests
+#
+# RUN: %{lit} --num-shards 100 --run-shard 50 %{inputs}/discovery >%t.out 2>%t.err
+# RUN: FileCheck --check-prefix=CHECK-SHARD-BIG-ERR3 < %t.err %s
+# RUN: FileCheck --check-prefix=CHECK-SHARD-BIG-OUT3 < %t.out %s
+# CHECK-SHARD-BIG-ERR3: note: Selecting shard 50/100 = size 0/5 = tests #(100*k)+50 = []
+# CHECK-SHARD-BIG-OUT3: Testing: 0 of 5 tests
+
+
+# Check that range constraints are enforced
+#
+# RUN: not %{lit} --num-shards 0 --run-shard 2 %{inputs}/discovery >%t.out 2>%t.err
+# RUN: FileCheck --check-prefix=CHECK-SHARD-ERR < %t.err %s
+# CHECK-SHARD-ERR: error: --num-shards must be positive
+#
+# RUN: not %{lit} --num-shards 3 --run-shard 4 %{inputs}/discovery >%t.out 2>%t.err
+# RUN: FileCheck --check-prefix=CHECK-SHARD-ERR2 < %t.err %s
+# CHECK-SHARD-ERR2: error: --run-shard must be between 1 and --num-shards (inclusive)
diff --git a/utils/lit/tests/shtest-format.py b/utils/lit/tests/shtest-format.py
index 20884f8c4854d..37e3e1c12629d 100644
--- a/utils/lit/tests/shtest-format.py
+++ b/utils/lit/tests/shtest-format.py
@@ -50,7 +50,14 @@
# CHECK: PASS: shtest-format :: requires-any-present.txt
# CHECK: UNSUPPORTED: shtest-format :: requires-missing.txt
# CHECK: PASS: shtest-format :: requires-present.txt
+# CHECK: UNRESOLVED: shtest-format :: requires-star.txt
+# CHECK: UNSUPPORTED: shtest-format :: requires-triple.txt
+# CHECK: PASS: shtest-format :: unsupported-expr-false.txt
+# CHECK: UNSUPPORTED: shtest-format :: unsupported-expr-true.txt
+# CHECK: UNRESOLVED: shtest-format :: unsupported-star.txt
# CHECK: UNSUPPORTED: shtest-format :: unsupported_dir/some-test.txt
+# CHECK: PASS: shtest-format :: xfail-expr-false.txt
+# CHECK: XFAIL: shtest-format :: xfail-expr-true.txt
# CHECK: XFAIL: shtest-format :: xfail-feature.txt
# CHECK: XFAIL: shtest-format :: xfail-target.txt
# CHECK: XFAIL: shtest-format :: xfail.txt
@@ -70,9 +77,9 @@
# CHECK: shtest-format :: external_shell/fail_with_bad_encoding.txt
# CHECK: shtest-format :: fail.txt
-# CHECK: Expected Passes : 5
-# CHECK: Expected Failures : 3
-# CHECK: Unsupported Tests : 3
-# CHECK: Unresolved Tests : 1
+# CHECK: Expected Passes : 7
+# CHECK: Expected Failures : 4
+# CHECK: Unsupported Tests : 5
+# CHECK: Unresolved Tests : 3
# CHECK: Unexpected Passes : 1
# CHECK: Unexpected Failures: 3
diff --git a/utils/lit/tests/unit/TestRunner.py b/utils/lit/tests/unit/TestRunner.py
index ff11834fed7ef..79cc10f7e14d6 100644
--- a/utils/lit/tests/unit/TestRunner.py
+++ b/utils/lit/tests/unit/TestRunner.py
@@ -89,7 +89,7 @@ class TestIntegratedTestKeywordParser(unittest.TestCase):
parsers = self.make_parsers()
self.parse_test(parsers)
list_parser = self.get_parser(parsers, 'MY_LIST:')
- self.assertItemsEqual(list_parser.getValue(),
+ self.assertEqual(list_parser.getValue(),
['one', 'two', 'three', 'four'])
def test_commands(self):
@@ -106,8 +106,65 @@ class TestIntegratedTestKeywordParser(unittest.TestCase):
self.parse_test(parsers)
custom_parser = self.get_parser(parsers, 'MY_CUSTOM:')
value = custom_parser.getValue()
- self.assertItemsEqual(value, ['a', 'b', 'c'])
+ self.assertEqual(value, ['a', 'b', 'c'])
+ def test_bad_keywords(self):
+ def custom_parse(line_number, line, output):
+ return output
+
+ try:
+ IntegratedTestKeywordParser("TAG_NO_SUFFIX", ParserKind.TAG),
+ self.fail("TAG_NO_SUFFIX failed to raise an exception")
+ except ValueError as e:
+ pass
+ except BaseException as e:
+ self.fail("TAG_NO_SUFFIX raised the wrong exception: %r" % e)
+
+ try:
+ IntegratedTestKeywordParser("TAG_WITH_COLON:", ParserKind.TAG),
+ self.fail("TAG_WITH_COLON: failed to raise an exception")
+ except ValueError as e:
+ pass
+ except BaseException as e:
+ self.fail("TAG_WITH_COLON: raised the wrong exception: %r" % e)
+
+ try:
+ IntegratedTestKeywordParser("LIST_WITH_DOT.", ParserKind.LIST),
+ self.fail("LIST_WITH_DOT. failed to raise an exception")
+ except ValueError as e:
+ pass
+ except BaseException as e:
+ self.fail("LIST_WITH_DOT. raised the wrong exception: %r" % e)
+
+ try:
+ IntegratedTestKeywordParser("CUSTOM_NO_SUFFIX",
+ ParserKind.CUSTOM, custom_parse),
+ self.fail("CUSTOM_NO_SUFFIX failed to raise an exception")
+ except ValueError as e:
+ pass
+ except BaseException as e:
+ self.fail("CUSTOM_NO_SUFFIX raised the wrong exception: %r" % e)
+
+ # Both '.' and ':' are allowed for CUSTOM keywords.
+ try:
+ IntegratedTestKeywordParser("CUSTOM_WITH_DOT.",
+ ParserKind.CUSTOM, custom_parse),
+ except BaseException as e:
+ self.fail("CUSTOM_WITH_DOT. raised an exception: %r" % e)
+ try:
+ IntegratedTestKeywordParser("CUSTOM_WITH_COLON:",
+ ParserKind.CUSTOM, custom_parse),
+ except BaseException as e:
+ self.fail("CUSTOM_WITH_COLON: raised an exception: %r" % e)
+
+ try:
+ IntegratedTestKeywordParser("CUSTOM_NO_PARSER:",
+ ParserKind.CUSTOM),
+ self.fail("CUSTOM_NO_PARSER: failed to raise an exception")
+ except ValueError as e:
+ pass
+ except BaseException as e:
+ self.fail("CUSTOM_NO_PARSER: raised the wrong exception: %r" % e)
if __name__ == '__main__':
TestIntegratedTestKeywordParser.load_keyword_parser_lit_tests()