summaryrefslogtreecommitdiff
path: root/tools/scan-build-py
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-04-16 16:02:28 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-04-16 16:02:28 +0000
commit7442d6faa2719e4e7d33a7021c406c5a4facd74d (patch)
treec72b9241553fc9966179aba84f90f17bfa9235c3 /tools/scan-build-py
parentb52119637f743680a99710ce5fdb6646da2772af (diff)
Notes
Diffstat (limited to 'tools/scan-build-py')
-rw-r--r--tools/scan-build-py/bin/analyze-build4
-rw-r--r--tools/scan-build-py/bin/analyze-build.bat1
-rw-r--r--tools/scan-build-py/bin/analyze-c++4
-rw-r--r--tools/scan-build-py/bin/analyze-c++.bat1
-rw-r--r--tools/scan-build-py/bin/analyze-cc4
-rw-r--r--tools/scan-build-py/bin/analyze-cc.bat1
-rw-r--r--tools/scan-build-py/bin/intercept-build4
-rw-r--r--tools/scan-build-py/bin/intercept-build.bat1
-rw-r--r--tools/scan-build-py/bin/intercept-c++4
-rw-r--r--tools/scan-build-py/bin/intercept-c++.bat1
-rw-r--r--tools/scan-build-py/bin/intercept-cc4
-rw-r--r--tools/scan-build-py/bin/intercept-cc.bat1
-rw-r--r--tools/scan-build-py/bin/scan-build4
-rw-r--r--tools/scan-build-py/bin/scan-build.bat1
-rw-r--r--tools/scan-build-py/libear/__init__.py4
-rw-r--r--tools/scan-build-py/libscanbuild/__init__.py185
-rw-r--r--tools/scan-build-py/libscanbuild/analyze.py766
-rw-r--r--tools/scan-build-py/libscanbuild/arguments.py431
-rw-r--r--tools/scan-build-py/libscanbuild/clang.py17
-rw-r--r--tools/scan-build-py/libscanbuild/intercept.py212
-rw-r--r--tools/scan-build-py/libscanbuild/report.py85
-rw-r--r--tools/scan-build-py/libscanbuild/runner.py302
-rw-r--r--tools/scan-build-py/tests/unit/__init__.py2
-rw-r--r--tools/scan-build-py/tests/unit/test_analyze.py328
-rw-r--r--tools/scan-build-py/tests/unit/test_intercept.py12
-rw-r--r--tools/scan-build-py/tests/unit/test_report.py15
-rw-r--r--tools/scan-build-py/tests/unit/test_runner.py322
27 files changed, 1427 insertions, 1289 deletions
diff --git a/tools/scan-build-py/bin/analyze-build b/tools/scan-build-py/bin/analyze-build
index 2cc9676fd5462..991cff0658f29 100644
--- a/tools/scan-build-py/bin/analyze-build
+++ b/tools/scan-build-py/bin/analyze-build
@@ -13,5 +13,5 @@ import os.path
this_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.dirname(this_dir))
-from libscanbuild.analyze import analyze_build_main
-sys.exit(analyze_build_main(this_dir, False))
+from libscanbuild.analyze import analyze_build
+sys.exit(analyze_build())
diff --git a/tools/scan-build-py/bin/analyze-build.bat b/tools/scan-build-py/bin/analyze-build.bat
deleted file mode 100644
index 05d81ddfda4d7..0000000000000
--- a/tools/scan-build-py/bin/analyze-build.bat
+++ /dev/null
@@ -1 +0,0 @@
-python %~dp0analyze-build %*
diff --git a/tools/scan-build-py/bin/analyze-c++ b/tools/scan-build-py/bin/analyze-c++
index 15186d89aa3f7..df1012dee57e2 100644
--- a/tools/scan-build-py/bin/analyze-c++
+++ b/tools/scan-build-py/bin/analyze-c++
@@ -10,5 +10,5 @@ import os.path
this_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.dirname(this_dir))
-from libscanbuild.analyze import analyze_build_wrapper
-sys.exit(analyze_build_wrapper(True))
+from libscanbuild.analyze import analyze_compiler_wrapper
+sys.exit(analyze_compiler_wrapper())
diff --git a/tools/scan-build-py/bin/analyze-c++.bat b/tools/scan-build-py/bin/analyze-c++.bat
deleted file mode 100644
index f57032f60bd43..0000000000000
--- a/tools/scan-build-py/bin/analyze-c++.bat
+++ /dev/null
@@ -1 +0,0 @@
-python %~dp0analyze-c++ %*
diff --git a/tools/scan-build-py/bin/analyze-cc b/tools/scan-build-py/bin/analyze-cc
index 55519fb7b11de..df1012dee57e2 100644
--- a/tools/scan-build-py/bin/analyze-cc
+++ b/tools/scan-build-py/bin/analyze-cc
@@ -10,5 +10,5 @@ import os.path
this_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.dirname(this_dir))
-from libscanbuild.analyze import analyze_build_wrapper
-sys.exit(analyze_build_wrapper(False))
+from libscanbuild.analyze import analyze_compiler_wrapper
+sys.exit(analyze_compiler_wrapper())
diff --git a/tools/scan-build-py/bin/analyze-cc.bat b/tools/scan-build-py/bin/analyze-cc.bat
deleted file mode 100644
index 41cd8f622eb2b..0000000000000
--- a/tools/scan-build-py/bin/analyze-cc.bat
+++ /dev/null
@@ -1 +0,0 @@
-python %~dp0analyze-cc %*
diff --git a/tools/scan-build-py/bin/intercept-build b/tools/scan-build-py/bin/intercept-build
index 164f2e68be931..2c3a26ecdddc1 100644
--- a/tools/scan-build-py/bin/intercept-build
+++ b/tools/scan-build-py/bin/intercept-build
@@ -13,5 +13,5 @@ import os.path
this_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.dirname(this_dir))
-from libscanbuild.intercept import intercept_build_main
-sys.exit(intercept_build_main(this_dir))
+from libscanbuild.intercept import intercept_build
+sys.exit(intercept_build())
diff --git a/tools/scan-build-py/bin/intercept-build.bat b/tools/scan-build-py/bin/intercept-build.bat
deleted file mode 100644
index 5c824635dfe46..0000000000000
--- a/tools/scan-build-py/bin/intercept-build.bat
+++ /dev/null
@@ -1 +0,0 @@
-python %~dp0intercept-build %*
diff --git a/tools/scan-build-py/bin/intercept-c++ b/tools/scan-build-py/bin/intercept-c++
index fc422287f84b9..67e076f39e4b9 100644
--- a/tools/scan-build-py/bin/intercept-c++
+++ b/tools/scan-build-py/bin/intercept-c++
@@ -10,5 +10,5 @@ import os.path
this_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.dirname(this_dir))
-from libscanbuild.intercept import intercept_build_wrapper
-sys.exit(intercept_build_wrapper(True))
+from libscanbuild.intercept import intercept_compiler_wrapper
+sys.exit(intercept_compiler_wrapper())
diff --git a/tools/scan-build-py/bin/intercept-c++.bat b/tools/scan-build-py/bin/intercept-c++.bat
deleted file mode 100644
index abbd4b177e0f9..0000000000000
--- a/tools/scan-build-py/bin/intercept-c++.bat
+++ /dev/null
@@ -1 +0,0 @@
-python %~dp0intercept-c++ %*
diff --git a/tools/scan-build-py/bin/intercept-cc b/tools/scan-build-py/bin/intercept-cc
index 69d57aaae1079..67e076f39e4b9 100644
--- a/tools/scan-build-py/bin/intercept-cc
+++ b/tools/scan-build-py/bin/intercept-cc
@@ -10,5 +10,5 @@ import os.path
this_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.dirname(this_dir))
-from libscanbuild.intercept import intercept_build_wrapper
-sys.exit(intercept_build_wrapper(False))
+from libscanbuild.intercept import intercept_compiler_wrapper
+sys.exit(intercept_compiler_wrapper())
diff --git a/tools/scan-build-py/bin/intercept-cc.bat b/tools/scan-build-py/bin/intercept-cc.bat
deleted file mode 100644
index 23cbd8d22ca66..0000000000000
--- a/tools/scan-build-py/bin/intercept-cc.bat
+++ /dev/null
@@ -1 +0,0 @@
-python %~dp0intercept-cc %*
diff --git a/tools/scan-build-py/bin/scan-build b/tools/scan-build-py/bin/scan-build
index 601fe89fc30da..f0f34695b0f1e 100644
--- a/tools/scan-build-py/bin/scan-build
+++ b/tools/scan-build-py/bin/scan-build
@@ -13,5 +13,5 @@ import os.path
this_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.dirname(this_dir))
-from libscanbuild.analyze import analyze_build_main
-sys.exit(analyze_build_main(this_dir, True))
+from libscanbuild.analyze import scan_build
+sys.exit(scan_build())
diff --git a/tools/scan-build-py/bin/scan-build.bat b/tools/scan-build-py/bin/scan-build.bat
deleted file mode 100644
index 8caf240a2f0bd..0000000000000
--- a/tools/scan-build-py/bin/scan-build.bat
+++ /dev/null
@@ -1 +0,0 @@
-python %~dp0scan-build %*
diff --git a/tools/scan-build-py/libear/__init__.py b/tools/scan-build-py/libear/__init__.py
index 3e1c13cf2bfe0..421e2e74f0232 100644
--- a/tools/scan-build-py/libear/__init__.py
+++ b/tools/scan-build-py/libear/__init__.py
@@ -207,9 +207,9 @@ class Configure(object):
if m:
key = m.group(1)
if key not in definitions or not definitions[key]:
- return '/* #undef {} */\n'.format(key)
+ return '/* #undef {0} */{1}'.format(key, os.linesep)
else:
- return '#define {}\n'.format(key)
+ return '#define {0}{1}'.format(key, os.linesep)
return line
with open(template, 'r') as src_handle:
diff --git a/tools/scan-build-py/libscanbuild/__init__.py b/tools/scan-build-py/libscanbuild/__init__.py
index c020b4e4345d6..800926ebb6f2f 100644
--- a/tools/scan-build-py/libscanbuild/__init__.py
+++ b/tools/scan-build-py/libscanbuild/__init__.py
@@ -3,10 +3,21 @@
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
-"""
-This module responsible to run the Clang static analyzer against any build
-and generate reports.
-"""
+""" This module is a collection of methods commonly used in this project. """
+import collections
+import functools
+import json
+import logging
+import os
+import os.path
+import re
+import shlex
+import subprocess
+import sys
+
+ENVIRONMENT_KEY = 'INTERCEPT_BUILD'
+
+Execution = collections.namedtuple('Execution', ['pid', 'cwd', 'cmd'])
def duplicate_check(method):
@@ -30,44 +41,89 @@ def duplicate_check(method):
return predicate
-def tempdir():
- """ Return the default temorary directory. """
-
- from os import getenv
- return getenv('TMPDIR', getenv('TEMP', getenv('TMP', '/tmp')))
-
-
-def initialize_logging(verbose_level):
- """ Output content controlled by the verbosity level. """
-
- import sys
- import os.path
- import logging
+def run_build(command, *args, **kwargs):
+ """ Run and report build command execution
+
+ :param command: array of tokens
+ :return: exit code of the process
+ """
+ environment = kwargs.get('env', os.environ)
+ logging.debug('run build %s, in environment: %s', command, environment)
+ exit_code = subprocess.call(command, *args, **kwargs)
+ logging.debug('build finished with exit code: %d', exit_code)
+ return exit_code
+
+
+def run_command(command, cwd=None):
+ """ Run a given command and report the execution.
+
+ :param command: array of tokens
+ :param cwd: the working directory where the command will be executed
+ :return: output of the command
+ """
+ def decode_when_needed(result):
+ """ check_output returns bytes or string depend on python version """
+ return result.decode('utf-8') if isinstance(result, bytes) else result
+
+ try:
+ directory = os.path.abspath(cwd) if cwd else os.getcwd()
+ logging.debug('exec command %s in %s', command, directory)
+ output = subprocess.check_output(command,
+ cwd=directory,
+ stderr=subprocess.STDOUT)
+ return decode_when_needed(output).splitlines()
+ except subprocess.CalledProcessError as ex:
+ ex.output = decode_when_needed(ex.output).splitlines()
+ raise ex
+
+
+def reconfigure_logging(verbose_level):
+ """ Reconfigure logging level and format based on the verbose flag.
+
+ :param verbose_level: number of `-v` flags received by the command
+ :return: no return value
+ """
+ # Exit when nothing to do.
+ if verbose_level == 0:
+ return
+
+ root = logging.getLogger()
+ # Tune logging level.
level = logging.WARNING - min(logging.WARNING, (10 * verbose_level))
-
+ root.setLevel(level)
+ # Be verbose with messages.
if verbose_level <= 3:
- fmt_string = '{0}: %(levelname)s: %(message)s'
+ fmt_string = '%(name)s: %(levelname)s: %(message)s'
else:
- fmt_string = '{0}: %(levelname)s: %(funcName)s: %(message)s'
-
- program = os.path.basename(sys.argv[0])
- logging.basicConfig(format=fmt_string.format(program), level=level)
+ fmt_string = '%(name)s: %(levelname)s: %(funcName)s: %(message)s'
+ handler = logging.StreamHandler(sys.stdout)
+ handler.setFormatter(logging.Formatter(fmt=fmt_string))
+ root.handlers = [handler]
def command_entry_point(function):
- """ Decorator for command entry points. """
+ """ Decorator for command entry methods.
+
+ The decorator initialize/shutdown logging and guard on programming
+ errors (catch exceptions).
- import functools
- import logging
+ The decorated method can have arbitrary parameters, the return value will
+ be the exit code of the process. """
@functools.wraps(function)
def wrapper(*args, **kwargs):
+ """ Do housekeeping tasks and execute the wrapped method. """
- exit_code = 127
try:
- exit_code = function(*args, **kwargs)
+ logging.basicConfig(format='%(name)s: %(message)s',
+ level=logging.WARNING,
+ stream=sys.stdout)
+ # This hack to get the executable name as %(name).
+ logging.getLogger().name = os.path.basename(sys.argv[0])
+ return function(*args, **kwargs)
except KeyboardInterrupt:
- logging.warning('Keyboard interupt')
+ logging.warning('Keyboard interrupt')
+ return 130 # Signal received exit code for bash.
except Exception:
logging.exception('Internal error.')
if logging.getLogger().isEnabledFor(logging.DEBUG):
@@ -75,8 +131,75 @@ def command_entry_point(function):
"to the bug report")
else:
logging.error("Please run this command again and turn on "
- "verbose mode (add '-vvv' as argument).")
+ "verbose mode (add '-vvvv' as argument).")
+ return 64 # Some non used exit code for internal errors.
finally:
- return exit_code
+ logging.shutdown()
return wrapper
+
+
+def compiler_wrapper(function):
+ """ Implements compiler wrapper base functionality.
+
+ A compiler wrapper executes the real compiler, then implement some
+ functionality, then returns with the real compiler exit code.
+
+ :param function: the extra functionality what the wrapper want to
+ do on top of the compiler call. If it throws exception, it will be
+ caught and logged.
+ :return: the exit code of the real compiler.
+
+ The :param function: will receive the following arguments:
+
+ :param result: the exit code of the compilation.
+ :param execution: the command executed by the wrapper. """
+
+ def is_cxx_compiler():
+ """ Find out was it a C++ compiler call. Compiler wrapper names
+ contain the compiler type. C++ compiler wrappers ends with `c++`,
+ but might have `.exe` extension on windows. """
+
+ wrapper_command = os.path.basename(sys.argv[0])
+ return re.match(r'(.+)c\+\+(.*)', wrapper_command)
+
+ def run_compiler(executable):
+ """ Execute compilation with the real compiler. """
+
+ command = executable + sys.argv[1:]
+ logging.debug('compilation: %s', command)
+ result = subprocess.call(command)
+ logging.debug('compilation exit code: %d', result)
+ return result
+
+ # Get relevant parameters from environment.
+ parameters = json.loads(os.environ[ENVIRONMENT_KEY])
+ reconfigure_logging(parameters['verbose'])
+ # Execute the requested compilation. Do crash if anything goes wrong.
+ cxx = is_cxx_compiler()
+ compiler = parameters['cxx'] if cxx else parameters['cc']
+ result = run_compiler(compiler)
+ # Call the wrapped method and ignore it's return value.
+ try:
+ call = Execution(
+ pid=os.getpid(),
+ cwd=os.getcwd(),
+ cmd=['c++' if cxx else 'cc'] + sys.argv[1:])
+ function(result, call)
+ except:
+ logging.exception('Compiler wrapper failed complete.')
+ finally:
+ # Always return the real compiler exit code.
+ return result
+
+
+def wrapper_environment(args):
+ """ Set up environment for interpose compiler wrapper."""
+
+ return {
+ ENVIRONMENT_KEY: json.dumps({
+ 'verbose': args.verbose,
+ 'cc': shlex.split(args.cc),
+ 'cxx': shlex.split(args.cxx)
+ })
+ }
diff --git a/tools/scan-build-py/libscanbuild/analyze.py b/tools/scan-build-py/libscanbuild/analyze.py
index 244c34b75837d..a09c72389d762 100644
--- a/tools/scan-build-py/libscanbuild/analyze.py
+++ b/tools/scan-build-py/libscanbuild/analyze.py
@@ -11,72 +11,75 @@ To run the static analyzer against a build is done in multiple steps:
-- Analyze: run the analyzer against the captured commands,
-- Report: create a cover report from the analyzer outputs. """
-import sys
import re
import os
import os.path
import json
-import argparse
import logging
-import subprocess
import multiprocessing
-from libscanbuild import initialize_logging, tempdir, command_entry_point
-from libscanbuild.runner import run
+import tempfile
+import functools
+import subprocess
+import contextlib
+import datetime
+
+from libscanbuild import command_entry_point, compiler_wrapper, \
+ wrapper_environment, run_build, run_command
+from libscanbuild.arguments import parse_args_for_scan_build, \
+ parse_args_for_analyze_build
from libscanbuild.intercept import capture
-from libscanbuild.report import report_directory, document
-from libscanbuild.clang import get_checkers
-from libscanbuild.compilation import split_command
+from libscanbuild.report import document
+from libscanbuild.compilation import split_command, classify_source, \
+ compiler_language
+from libscanbuild.clang import get_version, get_arguments
+from libscanbuild.shell import decode
-__all__ = ['analyze_build_main', 'analyze_build_wrapper']
+__all__ = ['scan_build', 'analyze_build', 'analyze_compiler_wrapper']
COMPILER_WRAPPER_CC = 'analyze-cc'
COMPILER_WRAPPER_CXX = 'analyze-c++'
@command_entry_point
-def analyze_build_main(bin_dir, from_build_command):
- """ Entry point for 'analyze-build' and 'scan-build'. """
-
- parser = create_parser(from_build_command)
- args = parser.parse_args()
- validate(parser, args, from_build_command)
-
- # setup logging
- initialize_logging(args.verbose)
- logging.debug('Parsed arguments: %s', args)
-
- with report_directory(args.output, args.keep_empty) as target_dir:
- if not from_build_command:
- # run analyzer only and generate cover report
- run_analyzer(args, target_dir)
- number_of_bugs = document(args, target_dir, True)
- return number_of_bugs if args.status_bugs else 0
- elif args.intercept_first:
- # run build command and capture compiler executions
- exit_code = capture(args, bin_dir)
- # next step to run the analyzer against the captured commands
+def scan_build():
+ """ Entry point for scan-build command. """
+
+ args = parse_args_for_scan_build()
+ # will re-assign the report directory as new output
+ with report_directory(args.output, args.keep_empty) as args.output:
+ # Run against a build command. there are cases, when analyzer run
+ # is not required. But we need to set up everything for the
+ # wrappers, because 'configure' needs to capture the CC/CXX values
+ # for the Makefile.
+ if args.intercept_first:
+ # Run build command with intercept module.
+ exit_code = capture(args)
+ # Run the analyzer against the captured commands.
if need_analyzer(args.build):
- run_analyzer(args, target_dir)
- # cover report generation and bug counting
- number_of_bugs = document(args, target_dir, True)
- # remove the compilation database when it was not requested
- if os.path.exists(args.cdb):
- os.unlink(args.cdb)
- # set exit status as it was requested
- return number_of_bugs if args.status_bugs else exit_code
- else:
- return exit_code
+ run_analyzer_parallel(args)
else:
- # run the build command with compiler wrappers which
- # execute the analyzer too. (interposition)
- environment = setup_environment(args, target_dir, bin_dir)
- logging.debug('run build in environment: %s', environment)
- exit_code = subprocess.call(args.build, env=environment)
- logging.debug('build finished with exit code: %d', exit_code)
- # cover report generation and bug counting
- number_of_bugs = document(args, target_dir, False)
- # set exit status as it was requested
- return number_of_bugs if args.status_bugs else exit_code
+ # Run build command and analyzer with compiler wrappers.
+ environment = setup_environment(args)
+ exit_code = run_build(args.build, env=environment)
+ # Cover report generation and bug counting.
+ number_of_bugs = document(args)
+ # Set exit status as it was requested.
+ return number_of_bugs if args.status_bugs else exit_code
+
+
+@command_entry_point
+def analyze_build():
+ """ Entry point for analyze-build command. """
+
+ args = parse_args_for_analyze_build()
+ # will re-assign the report directory as new output
+ with report_directory(args.output, args.keep_empty) as args.output:
+ # Run the analyzer against a compilation db.
+ run_analyzer_parallel(args)
+ # Cover report generation and bug counting.
+ number_of_bugs = document(args)
+ # Set exit status as it was requested.
+ return number_of_bugs if args.status_bugs else 0
def need_analyzer(args):
@@ -92,7 +95,7 @@ def need_analyzer(args):
return len(args) and not re.search('configure|autogen', args[0])
-def run_analyzer(args, output_dir):
+def run_analyzer_parallel(args):
""" Runs the analyzer against the given compilation database. """
def exclude(filename):
@@ -102,7 +105,7 @@ def run_analyzer(args, output_dir):
consts = {
'clang': args.clang,
- 'output_dir': output_dir,
+ 'output_dir': args.output,
'output_format': args.output_format,
'output_failures': args.output_failures,
'direct_args': analyzer_params(args),
@@ -124,18 +127,16 @@ def run_analyzer(args, output_dir):
pool.join()
-def setup_environment(args, destination, bin_dir):
+def setup_environment(args):
""" Set up environment for build command to interpose compiler wrapper. """
environment = dict(os.environ)
+ environment.update(wrapper_environment(args))
environment.update({
- 'CC': os.path.join(bin_dir, COMPILER_WRAPPER_CC),
- 'CXX': os.path.join(bin_dir, COMPILER_WRAPPER_CXX),
- 'ANALYZE_BUILD_CC': args.cc,
- 'ANALYZE_BUILD_CXX': args.cxx,
+ 'CC': COMPILER_WRAPPER_CC,
+ 'CXX': COMPILER_WRAPPER_CXX,
'ANALYZE_BUILD_CLANG': args.clang if need_analyzer(args.build) else '',
- 'ANALYZE_BUILD_VERBOSE': 'DEBUG' if args.verbose > 2 else 'WARNING',
- 'ANALYZE_BUILD_REPORT_DIR': destination,
+ 'ANALYZE_BUILD_REPORT_DIR': args.output,
'ANALYZE_BUILD_REPORT_FORMAT': args.output_format,
'ANALYZE_BUILD_REPORT_FAILURES': 'yes' if args.output_failures else '',
'ANALYZE_BUILD_PARAMETERS': ' '.join(analyzer_params(args)),
@@ -144,51 +145,78 @@ def setup_environment(args, destination, bin_dir):
return environment
-def analyze_build_wrapper(cplusplus):
+@command_entry_point
+def analyze_compiler_wrapper():
""" Entry point for `analyze-cc` and `analyze-c++` compiler wrappers. """
- # initialize wrapper logging
- logging.basicConfig(format='analyze: %(levelname)s: %(message)s',
- level=os.getenv('ANALYZE_BUILD_VERBOSE', 'INFO'))
- # execute with real compiler
- compiler = os.getenv('ANALYZE_BUILD_CXX', 'c++') if cplusplus \
- else os.getenv('ANALYZE_BUILD_CC', 'cc')
- compilation = [compiler] + sys.argv[1:]
- logging.info('execute compiler: %s', compilation)
- result = subprocess.call(compilation)
- # exit when it fails, ...
+ return compiler_wrapper(analyze_compiler_wrapper_impl)
+
+
+def analyze_compiler_wrapper_impl(result, execution):
+ """ Implements analyzer compiler wrapper functionality. """
+
+ # don't run analyzer when compilation fails. or when it's not requested.
if result or not os.getenv('ANALYZE_BUILD_CLANG'):
- return result
- # ... and run the analyzer if all went well.
+ return
+
+ # check is it a compilation?
+ compilation = split_command(execution.cmd)
+ if compilation is None:
+ return
+ # collect the needed parameters from environment, crash when missing
+ parameters = {
+ 'clang': os.getenv('ANALYZE_BUILD_CLANG'),
+ 'output_dir': os.getenv('ANALYZE_BUILD_REPORT_DIR'),
+ 'output_format': os.getenv('ANALYZE_BUILD_REPORT_FORMAT'),
+ 'output_failures': os.getenv('ANALYZE_BUILD_REPORT_FAILURES'),
+ 'direct_args': os.getenv('ANALYZE_BUILD_PARAMETERS',
+ '').split(' '),
+ 'force_debug': os.getenv('ANALYZE_BUILD_FORCE_DEBUG'),
+ 'directory': execution.cwd,
+ 'command': [execution.cmd[0], '-c'] + compilation.flags
+ }
+ # call static analyzer against the compilation
+ for source in compilation.files:
+ parameters.update({'file': source})
+ logging.debug('analyzer parameters %s', parameters)
+ current = run(parameters)
+ # display error message from the static analyzer
+ if current is not None:
+ for line in current['error_output']:
+ logging.info(line.rstrip())
+
+
+@contextlib.contextmanager
+def report_directory(hint, keep):
+ """ Responsible for the report directory.
+
+ hint -- could specify the parent directory of the output directory.
+ keep -- a boolean value to keep or delete the empty report directory. """
+
+ stamp_format = 'scan-build-%Y-%m-%d-%H-%M-%S-%f-'
+ stamp = datetime.datetime.now().strftime(stamp_format)
+ parent_dir = os.path.abspath(hint)
+ if not os.path.exists(parent_dir):
+ os.makedirs(parent_dir)
+ name = tempfile.mkdtemp(prefix=stamp, dir=parent_dir)
+
+ logging.info('Report directory created: %s', name)
+
try:
- # check is it a compilation
- compilation = split_command(sys.argv)
- if compilation is None:
- return result
- # collect the needed parameters from environment, crash when missing
- parameters = {
- 'clang': os.getenv('ANALYZE_BUILD_CLANG'),
- 'output_dir': os.getenv('ANALYZE_BUILD_REPORT_DIR'),
- 'output_format': os.getenv('ANALYZE_BUILD_REPORT_FORMAT'),
- 'output_failures': os.getenv('ANALYZE_BUILD_REPORT_FAILURES'),
- 'direct_args': os.getenv('ANALYZE_BUILD_PARAMETERS',
- '').split(' '),
- 'force_debug': os.getenv('ANALYZE_BUILD_FORCE_DEBUG'),
- 'directory': os.getcwd(),
- 'command': [sys.argv[0], '-c'] + compilation.flags
- }
- # call static analyzer against the compilation
- for source in compilation.files:
- parameters.update({'file': source})
- logging.debug('analyzer parameters %s', parameters)
- current = run(parameters)
- # display error message from the static analyzer
- if current is not None:
- for line in current['error_output']:
- logging.info(line.rstrip())
- except Exception:
- logging.exception("run analyzer inside compiler wrapper failed.")
- return result
+ yield name
+ finally:
+ if os.listdir(name):
+ msg = "Run 'scan-view %s' to examine bug reports."
+ keep = True
+ else:
+ if keep:
+ msg = "Report directory '%s' contains no report, but kept."
+ else:
+ msg = "Removing directory '%s' because it contains no report."
+ logging.warning(msg, name)
+
+ if not keep:
+ os.rmdir(name)
def analyzer_params(args):
@@ -238,279 +266,275 @@ def analyzer_params(args):
return prefix_with('-Xclang', result)
-def print_active_checkers(checkers):
- """ Print active checkers to stdout. """
+def require(required):
+ """ Decorator for checking the required values in state.
- for name in sorted(name for name, (_, active) in checkers.items()
- if active):
- print(name)
+ It checks the required attributes in the passed state and stop when
+ any of those is missing. """
+ def decorator(function):
+ @functools.wraps(function)
+ def wrapper(*args, **kwargs):
+ for key in required:
+ if key not in args[0]:
+ raise KeyError('{0} not passed to {1}'.format(
+ key, function.__name__))
-def print_checkers(checkers):
- """ Print verbose checker help to stdout. """
+ return function(*args, **kwargs)
- print('')
- print('available checkers:')
- print('')
- for name in sorted(checkers.keys()):
- description, active = checkers[name]
- prefix = '+' if active else ' '
- if len(name) > 30:
- print(' {0} {1}'.format(prefix, name))
- print(' ' * 35 + description)
+ return wrapper
+
+ return decorator
+
+
+@require(['command', # entry from compilation database
+ 'directory', # entry from compilation database
+ 'file', # entry from compilation database
+ 'clang', # clang executable name (and path)
+ 'direct_args', # arguments from command line
+ 'force_debug', # kill non debug macros
+ 'output_dir', # where generated report files shall go
+ 'output_format', # it's 'plist' or 'html' or both
+ 'output_failures']) # generate crash reports or not
+def run(opts):
+ """ Entry point to run (or not) static analyzer against a single entry
+ of the compilation database.
+
+ This complex task is decomposed into smaller methods which are calling
+ each other in chain. If the analyzis is not possibe the given method
+ just return and break the chain.
+
+ The passed parameter is a python dictionary. Each method first check
+ that the needed parameters received. (This is done by the 'require'
+ decorator. It's like an 'assert' to check the contract between the
+ caller and the called method.) """
+
+ try:
+ command = opts.pop('command')
+ command = command if isinstance(command, list) else decode(command)
+ logging.debug("Run analyzer against '%s'", command)
+ opts.update(classify_parameters(command))
+
+ return arch_check(opts)
+ except Exception:
+ logging.error("Problem occured during analyzis.", exc_info=1)
+ return None
+
+
+@require(['clang', 'directory', 'flags', 'file', 'output_dir', 'language',
+ 'error_output', 'exit_code'])
+def report_failure(opts):
+ """ Create report when analyzer failed.
+
+ The major report is the preprocessor output. The output filename generated
+ randomly. The compiler output also captured into '.stderr.txt' file.
+ And some more execution context also saved into '.info.txt' file. """
+
+ def extension():
+ """ Generate preprocessor file extension. """
+
+ mapping = {'objective-c++': '.mii', 'objective-c': '.mi', 'c++': '.ii'}
+ return mapping.get(opts['language'], '.i')
+
+ def destination():
+ """ Creates failures directory if not exits yet. """
+
+ failures_dir = os.path.join(opts['output_dir'], 'failures')
+ if not os.path.isdir(failures_dir):
+ os.makedirs(failures_dir)
+ return failures_dir
+
+ # Classify error type: when Clang terminated by a signal it's a 'Crash'.
+ # (python subprocess Popen.returncode is negative when child terminated
+ # by signal.) Everything else is 'Other Error'.
+ error = 'crash' if opts['exit_code'] < 0 else 'other_error'
+ # Create preprocessor output file name. (This is blindly following the
+ # Perl implementation.)
+ (handle, name) = tempfile.mkstemp(suffix=extension(),
+ prefix='clang_' + error + '_',
+ dir=destination())
+ os.close(handle)
+ # Execute Clang again, but run the syntax check only.
+ cwd = opts['directory']
+ cmd = get_arguments(
+ [opts['clang'], '-fsyntax-only', '-E'
+ ] + opts['flags'] + [opts['file'], '-o', name], cwd)
+ run_command(cmd, cwd=cwd)
+ # write general information about the crash
+ with open(name + '.info.txt', 'w') as handle:
+ handle.write(opts['file'] + os.linesep)
+ handle.write(error.title().replace('_', ' ') + os.linesep)
+ handle.write(' '.join(cmd) + os.linesep)
+ handle.write(' '.join(os.uname()) + os.linesep)
+ handle.write(get_version(opts['clang']))
+ handle.close()
+ # write the captured output too
+ with open(name + '.stderr.txt', 'w') as handle:
+ handle.writelines(opts['error_output'])
+ handle.close()
+
+
+@require(['clang', 'directory', 'flags', 'direct_args', 'file', 'output_dir',
+ 'output_format'])
+def run_analyzer(opts, continuation=report_failure):
+ """ It assembles the analysis command line and executes it. Capture the
+ output of the analysis and returns with it. If failure reports are
+ requested, it calls the continuation to generate it. """
+
+ def target():
+ """ Creates output file name for reports. """
+ if opts['output_format'] in {'plist', 'plist-html'}:
+ (handle, name) = tempfile.mkstemp(prefix='report-',
+ suffix='.plist',
+ dir=opts['output_dir'])
+ os.close(handle)
+ return name
+ return opts['output_dir']
+
+ try:
+ cwd = opts['directory']
+ cmd = get_arguments([opts['clang'], '--analyze'] +
+ opts['direct_args'] + opts['flags'] +
+ [opts['file'], '-o', target()],
+ cwd)
+ output = run_command(cmd, cwd=cwd)
+ return {'error_output': output, 'exit_code': 0}
+ except subprocess.CalledProcessError as ex:
+ result = {'error_output': ex.output, 'exit_code': ex.returncode}
+ if opts.get('output_failures', False):
+ opts.update(result)
+ continuation(opts)
+ return result
+
+
+@require(['flags', 'force_debug'])
+def filter_debug_flags(opts, continuation=run_analyzer):
+ """ Filter out nondebug macros when requested. """
+
+ if opts.pop('force_debug'):
+ # lazy implementation just append an undefine macro at the end
+ opts.update({'flags': opts['flags'] + ['-UNDEBUG']})
+
+ return continuation(opts)
+
+
+@require(['language', 'compiler', 'file', 'flags'])
+def language_check(opts, continuation=filter_debug_flags):
+ """ Find out the language from command line parameters or file name
+ extension. The decision also influenced by the compiler invocation. """
+
+ accepted = frozenset({
+ 'c', 'c++', 'objective-c', 'objective-c++', 'c-cpp-output',
+ 'c++-cpp-output', 'objective-c-cpp-output'
+ })
+
+ # language can be given as a parameter...
+ language = opts.pop('language')
+ compiler = opts.pop('compiler')
+ # ... or find out from source file extension
+ if language is None and compiler is not None:
+ language = classify_source(opts['file'], compiler == 'c')
+
+ if language is None:
+ logging.debug('skip analysis, language not known')
+ return None
+ elif language not in accepted:
+ logging.debug('skip analysis, language not supported')
+ return None
+ else:
+ logging.debug('analysis, language: %s', language)
+ opts.update({'language': language,
+ 'flags': ['-x', language] + opts['flags']})
+ return continuation(opts)
+
+
+@require(['arch_list', 'flags'])
+def arch_check(opts, continuation=language_check):
+ """ Do run analyzer through one of the given architectures. """
+
+ disabled = frozenset({'ppc', 'ppc64'})
+
+ received_list = opts.pop('arch_list')
+ if received_list:
+ # filter out disabled architectures and -arch switches
+ filtered_list = [a for a in received_list if a not in disabled]
+ if filtered_list:
+ # There should be only one arch given (or the same multiple
+ # times). If there are multiple arch are given and are not
+ # the same, those should not change the pre-processing step.
+ # But that's the only pass we have before run the analyzer.
+ current = filtered_list.pop()
+ logging.debug('analysis, on arch: %s', current)
+
+ opts.update({'flags': ['-arch', current] + opts['flags']})
+ return continuation(opts)
else:
- print(' {0} {1: <30} {2}'.format(prefix, name, description))
- print('')
- print('NOTE: "+" indicates that an analysis is enabled by default.')
- print('')
-
-
-def validate(parser, args, from_build_command):
- """ Validation done by the parser itself, but semantic check still
- needs to be done. This method is doing that. """
-
- # Make plugins always a list. (It might be None when not specified.)
- args.plugins = args.plugins if args.plugins else []
-
- if args.help_checkers_verbose:
- print_checkers(get_checkers(args.clang, args.plugins))
- parser.exit()
- elif args.help_checkers:
- print_active_checkers(get_checkers(args.clang, args.plugins))
- parser.exit()
-
- if from_build_command and not args.build:
- parser.error('missing build command')
-
-
-def create_parser(from_build_command):
- """ Command line argument parser factory method. """
-
- parser = argparse.ArgumentParser(
- formatter_class=argparse.ArgumentDefaultsHelpFormatter)
-
- parser.add_argument(
- '--verbose', '-v',
- action='count',
- default=0,
- help="""Enable verbose output from '%(prog)s'. A second and third
- flag increases verbosity.""")
- parser.add_argument(
- '--override-compiler',
- action='store_true',
- help="""Always resort to the compiler wrapper even when better
- interposition methods are available.""")
- parser.add_argument(
- '--intercept-first',
- action='store_true',
- help="""Run the build commands only, build a compilation database,
- then run the static analyzer afterwards.
- Generally speaking it has better coverage on build commands.
- With '--override-compiler' it use compiler wrapper, but does
- not run the analyzer till the build is finished. """)
- parser.add_argument(
- '--cdb',
- metavar='<file>',
- default="compile_commands.json",
- help="""The JSON compilation database.""")
-
- parser.add_argument(
- '--output', '-o',
- metavar='<path>',
- default=tempdir(),
- help="""Specifies the output directory for analyzer reports.
- Subdirectory will be created if default directory is targeted.
- """)
- parser.add_argument(
- '--status-bugs',
- action='store_true',
- help="""By default, the exit status of '%(prog)s' is the same as the
- executed build command. Specifying this option causes the exit
- status of '%(prog)s' to be non zero if it found potential bugs
- and zero otherwise.""")
- parser.add_argument(
- '--html-title',
- metavar='<title>',
- help="""Specify the title used on generated HTML pages.
- If not specified, a default title will be used.""")
- parser.add_argument(
- '--analyze-headers',
- action='store_true',
- help="""Also analyze functions in #included files. By default, such
- functions are skipped unless they are called by functions
- within the main source file.""")
- format_group = parser.add_mutually_exclusive_group()
- format_group.add_argument(
- '--plist', '-plist',
- dest='output_format',
- const='plist',
- default='html',
- action='store_const',
- help="""This option outputs the results as a set of .plist files.""")
- format_group.add_argument(
- '--plist-html', '-plist-html',
- dest='output_format',
- const='plist-html',
- default='html',
- action='store_const',
- help="""This option outputs the results as a set of .html and .plist
- files.""")
- # TODO: implement '-view '
-
- advanced = parser.add_argument_group('advanced options')
- advanced.add_argument(
- '--keep-empty',
- action='store_true',
- help="""Don't remove the build results directory even if no issues
- were reported.""")
- advanced.add_argument(
- '--no-failure-reports', '-no-failure-reports',
- dest='output_failures',
- action='store_false',
- help="""Do not create a 'failures' subdirectory that includes analyzer
- crash reports and preprocessed source files.""")
- advanced.add_argument(
- '--stats', '-stats',
- action='store_true',
- help="""Generates visitation statistics for the project being analyzed.
- """)
- advanced.add_argument(
- '--internal-stats',
- action='store_true',
- help="""Generate internal analyzer statistics.""")
- advanced.add_argument(
- '--maxloop', '-maxloop',
- metavar='<loop count>',
- type=int,
- help="""Specifiy the number of times a block can be visited before
- giving up. Increase for more comprehensive coverage at a cost
- of speed.""")
- advanced.add_argument(
- '--store', '-store',
- metavar='<model>',
- dest='store_model',
- choices=['region', 'basic'],
- help="""Specify the store model used by the analyzer.
- 'region' specifies a field- sensitive store model.
- 'basic' which is far less precise but can more quickly
- analyze code. 'basic' was the default store model for
- checker-0.221 and earlier.""")
- advanced.add_argument(
- '--constraints', '-constraints',
- metavar='<model>',
- dest='constraints_model',
- choices=['range', 'basic'],
- help="""Specify the contraint engine used by the analyzer. Specifying
- 'basic' uses a simpler, less powerful constraint model used by
- checker-0.160 and earlier.""")
- advanced.add_argument(
- '--use-analyzer',
- metavar='<path>',
- dest='clang',
- default='clang',
- help="""'%(prog)s' uses the 'clang' executable relative to itself for
- static analysis. One can override this behavior with this
- option by using the 'clang' packaged with Xcode (on OS X) or
- from the PATH.""")
- advanced.add_argument(
- '--use-cc',
- metavar='<path>',
- dest='cc',
- default='cc',
- help="""When '%(prog)s' analyzes a project by interposing a "fake
- compiler", which executes a real compiler for compilation and
- do other tasks (to run the static analyzer or just record the
- compiler invocation). Because of this interposing, '%(prog)s'
- does not know what compiler your project normally uses.
- Instead, it simply overrides the CC environment variable, and
- guesses your default compiler.
-
- If you need '%(prog)s' to use a specific compiler for
- *compilation* then you can use this option to specify a path
- to that compiler.""")
- advanced.add_argument(
- '--use-c++',
- metavar='<path>',
- dest='cxx',
- default='c++',
- help="""This is the same as "--use-cc" but for C++ code.""")
- advanced.add_argument(
- '--analyzer-config', '-analyzer-config',
- metavar='<options>',
- help="""Provide options to pass through to the analyzer's
- -analyzer-config flag. Several options are separated with
- comma: 'key1=val1,key2=val2'
-
- Available options:
- stable-report-filename=true or false (default)
-
- Switch the page naming to:
- report-<filename>-<function/method name>-<id>.html
- instead of report-XXXXXX.html""")
- advanced.add_argument(
- '--exclude',
- metavar='<directory>',
- dest='excludes',
- action='append',
- default=[],
- help="""Do not run static analyzer against files found in this
- directory. (You can specify this option multiple times.)
- Could be usefull when project contains 3rd party libraries.
- The directory path shall be absolute path as file names in
- the compilation database.""")
- advanced.add_argument(
- '--force-analyze-debug-code',
- dest='force_debug',
- action='store_true',
- help="""Tells analyzer to enable assertions in code even if they were
- disabled during compilation, enabling more precise results.""")
-
- plugins = parser.add_argument_group('checker options')
- plugins.add_argument(
- '--load-plugin', '-load-plugin',
- metavar='<plugin library>',
- dest='plugins',
- action='append',
- help="""Loading external checkers using the clang plugin interface.""")
- plugins.add_argument(
- '--enable-checker', '-enable-checker',
- metavar='<checker name>',
- action=AppendCommaSeparated,
- help="""Enable specific checker.""")
- plugins.add_argument(
- '--disable-checker', '-disable-checker',
- metavar='<checker name>',
- action=AppendCommaSeparated,
- help="""Disable specific checker.""")
- plugins.add_argument(
- '--help-checkers',
- action='store_true',
- help="""A default group of checkers is run unless explicitly disabled.
- Exactly which checkers constitute the default group is a
- function of the operating system in use. These can be printed
- with this flag.""")
- plugins.add_argument(
- '--help-checkers-verbose',
- action='store_true',
- help="""Print all available checkers and mark the enabled ones.""")
-
- if from_build_command:
- parser.add_argument(
- dest='build',
- nargs=argparse.REMAINDER,
- help="""Command to run.""")
-
- return parser
-
-
-class AppendCommaSeparated(argparse.Action):
- """ argparse Action class to support multiple comma separated lists. """
-
- def __call__(self, __parser, namespace, values, __option_string):
- # getattr(obj, attr, default) does not really returns default but none
- if getattr(namespace, self.dest, None) is None:
- setattr(namespace, self.dest, [])
- # once it's fixed we can use as expected
- actual = getattr(namespace, self.dest)
- actual.extend(values.split(','))
- setattr(namespace, self.dest, actual)
+ logging.debug('skip analysis, found not supported arch')
+ return None
+ else:
+ logging.debug('analysis, on default arch')
+ return continuation(opts)
+
+# To have good results from static analyzer certain compiler options shall be
+# omitted. The compiler flag filtering only affects the static analyzer run.
+#
+# Keys are the option name, value number of options to skip
+IGNORED_FLAGS = {
+ '-c': 0, # compile option will be overwritten
+ '-fsyntax-only': 0, # static analyzer option will be overwritten
+ '-o': 1, # will set up own output file
+ # flags below are inherited from the perl implementation.
+ '-g': 0,
+ '-save-temps': 0,
+ '-install_name': 1,
+ '-exported_symbols_list': 1,
+ '-current_version': 1,
+ '-compatibility_version': 1,
+ '-init': 1,
+ '-e': 1,
+ '-seg1addr': 1,
+ '-bundle_loader': 1,
+ '-multiply_defined': 1,
+ '-sectorder': 3,
+ '--param': 1,
+ '--serialize-diagnostics': 1
+}
+
+
+def classify_parameters(command):
+ """ Prepare compiler flags (filters some and add others) and take out
+ language (-x) and architecture (-arch) flags for future processing. """
+
+ result = {
+ 'flags': [], # the filtered compiler flags
+ 'arch_list': [], # list of architecture flags
+ 'language': None, # compilation language, None, if not specified
+ 'compiler': compiler_language(command) # 'c' or 'c++'
+ }
+
+ # iterate on the compile options
+ args = iter(command[1:])
+ for arg in args:
+ # take arch flags into a separate basket
+ if arg == '-arch':
+ result['arch_list'].append(next(args))
+ # take language
+ elif arg == '-x':
+ result['language'] = next(args)
+ # parameters which looks source file are not flags
+ elif re.match(r'^[^-].+', arg) and classify_source(arg):
+ pass
+ # ignore some flags
+ elif arg in IGNORED_FLAGS:
+ count = IGNORED_FLAGS[arg]
+ for _ in range(count):
+ next(args)
+ # we don't care about extra warnings, but we should suppress ones
+ # that we don't want to see.
+ elif re.match(r'^-W.+', arg) and not re.match(r'^-Wno-.+', arg):
+ pass
+ # and consider everything else as compilation flag.
+ else:
+ result['flags'].append(arg)
+
+ return result
diff --git a/tools/scan-build-py/libscanbuild/arguments.py b/tools/scan-build-py/libscanbuild/arguments.py
new file mode 100644
index 0000000000000..2735123f9f163
--- /dev/null
+++ b/tools/scan-build-py/libscanbuild/arguments.py
@@ -0,0 +1,431 @@
+# -*- coding: utf-8 -*-
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+""" This module parses and validates arguments for command-line interfaces.
+
+It uses argparse module to create the command line parser. (This library is
+in the standard python library since 3.2 and backported to 2.7, but not
+earlier.)
+
+It also implements basic validation methods, related to the command.
+Validations are mostly calling specific help methods, or mangling values.
+"""
+
+import os
+import sys
+import argparse
+import logging
+import tempfile
+from libscanbuild import reconfigure_logging
+from libscanbuild.clang import get_checkers
+
+__all__ = ['parse_args_for_intercept_build', 'parse_args_for_analyze_build',
+ 'parse_args_for_scan_build']
+
+
+def parse_args_for_intercept_build():
+ """ Parse and validate command-line arguments for intercept-build. """
+
+ parser = create_intercept_parser()
+ args = parser.parse_args()
+
+ reconfigure_logging(args.verbose)
+ logging.debug('Raw arguments %s', sys.argv)
+
+ # short validation logic
+ if not args.build:
+ parser.error(message='missing build command')
+
+ logging.debug('Parsed arguments: %s', args)
+ return args
+
+
+def parse_args_for_analyze_build():
+ """ Parse and validate command-line arguments for analyze-build. """
+
+ from_build_command = False
+ parser = create_analyze_parser(from_build_command)
+ args = parser.parse_args()
+
+ reconfigure_logging(args.verbose)
+ logging.debug('Raw arguments %s', sys.argv)
+
+ normalize_args_for_analyze(args, from_build_command)
+ validate_args_for_analyze(parser, args, from_build_command)
+ logging.debug('Parsed arguments: %s', args)
+ return args
+
+
+def parse_args_for_scan_build():
+ """ Parse and validate command-line arguments for scan-build. """
+
+ from_build_command = True
+ parser = create_analyze_parser(from_build_command)
+ args = parser.parse_args()
+
+ reconfigure_logging(args.verbose)
+ logging.debug('Raw arguments %s', sys.argv)
+
+ normalize_args_for_analyze(args, from_build_command)
+ validate_args_for_analyze(parser, args, from_build_command)
+ logging.debug('Parsed arguments: %s', args)
+ return args
+
+
+def normalize_args_for_analyze(args, from_build_command):
+ """ Normalize parsed arguments for analyze-build and scan-build.
+
+ :param args: Parsed argument object. (Will be mutated.)
+ :param from_build_command: Boolean value tells is the command suppose
+ to run the analyzer against a build command or a compilation db. """
+
+ # make plugins always a list. (it might be None when not specified.)
+ if args.plugins is None:
+ args.plugins = []
+
+ # make exclude directory list unique and absolute.
+ uniq_excludes = set(os.path.abspath(entry) for entry in args.excludes)
+ args.excludes = list(uniq_excludes)
+
+ # because shared codes for all tools, some common used methods are
+ # expecting some argument to be present. so, instead of query the args
+ # object about the presence of the flag, we fake it here. to make those
+ # methods more readable. (it's an arguable choice, took it only for those
+ # which have good default value.)
+ if from_build_command:
+ # add cdb parameter invisibly to make report module working.
+ args.cdb = 'compile_commands.json'
+
+
+def validate_args_for_analyze(parser, args, from_build_command):
+ """ Command line parsing is done by the argparse module, but semantic
+ validation still needs to be done. This method is doing it for
+ analyze-build and scan-build commands.
+
+ :param parser: The command line parser object.
+ :param args: Parsed argument object.
+ :param from_build_command: Boolean value tells is the command suppose
+ to run the analyzer against a build command or a compilation db.
+ :return: No return value, but this call might throw when validation
+ fails. """
+
+ if args.help_checkers_verbose:
+ print_checkers(get_checkers(args.clang, args.plugins))
+ parser.exit(status=0)
+ elif args.help_checkers:
+ print_active_checkers(get_checkers(args.clang, args.plugins))
+ parser.exit(status=0)
+ elif from_build_command and not args.build:
+ parser.error(message='missing build command')
+ elif not from_build_command and not os.path.exists(args.cdb):
+ parser.error(message='compilation database is missing')
+
+
+def create_intercept_parser():
+ """ Creates a parser for command-line arguments to 'intercept'. """
+
+ parser = create_default_parser()
+ parser_add_cdb(parser)
+
+ parser_add_prefer_wrapper(parser)
+ parser_add_compilers(parser)
+
+ advanced = parser.add_argument_group('advanced options')
+ group = advanced.add_mutually_exclusive_group()
+ group.add_argument(
+ '--append',
+ action='store_true',
+ help="""Extend existing compilation database with new entries.
+ Duplicate entries are detected and not present in the final output.
+ The output is not continuously updated, it's done when the build
+ command finished. """)
+
+ parser.add_argument(
+ dest='build', nargs=argparse.REMAINDER, help="""Command to run.""")
+ return parser
+
+
+def create_analyze_parser(from_build_command):
+ """ Creates a parser for command-line arguments to 'analyze'. """
+
+ parser = create_default_parser()
+
+ if from_build_command:
+ parser_add_prefer_wrapper(parser)
+ parser_add_compilers(parser)
+
+ parser.add_argument(
+ '--intercept-first',
+ action='store_true',
+ help="""Run the build commands first, intercept compiler
+ calls and then run the static analyzer afterwards.
+ Generally speaking it has better coverage on build commands.
+ With '--override-compiler' it use compiler wrapper, but does
+ not run the analyzer till the build is finished.""")
+ else:
+ parser_add_cdb(parser)
+
+ parser.add_argument(
+ '--status-bugs',
+ action='store_true',
+ help="""The exit status of '%(prog)s' is the same as the executed
+ build command. This option ignores the build exit status and sets to
+ be non zero if it found potential bugs or zero otherwise.""")
+ parser.add_argument(
+ '--exclude',
+ metavar='<directory>',
+ dest='excludes',
+ action='append',
+ default=[],
+ help="""Do not run static analyzer against files found in this
+ directory. (You can specify this option multiple times.)
+ Could be useful when project contains 3rd party libraries.""")
+
+ output = parser.add_argument_group('output control options')
+ output.add_argument(
+ '--output',
+ '-o',
+ metavar='<path>',
+ default=tempfile.gettempdir(),
+ help="""Specifies the output directory for analyzer reports.
+ Subdirectory will be created if default directory is targeted.""")
+ output.add_argument(
+ '--keep-empty',
+ action='store_true',
+ help="""Don't remove the build results directory even if no issues
+ were reported.""")
+ output.add_argument(
+ '--html-title',
+ metavar='<title>',
+ help="""Specify the title used on generated HTML pages.
+ If not specified, a default title will be used.""")
+ format_group = output.add_mutually_exclusive_group()
+ format_group.add_argument(
+ '--plist',
+ '-plist',
+ dest='output_format',
+ const='plist',
+ default='html',
+ action='store_const',
+ help="""Cause the results as a set of .plist files.""")
+ format_group.add_argument(
+ '--plist-html',
+ '-plist-html',
+ dest='output_format',
+ const='plist-html',
+ default='html',
+ action='store_const',
+ help="""Cause the results as a set of .html and .plist files.""")
+ # TODO: implement '-view '
+
+ advanced = parser.add_argument_group('advanced options')
+ advanced.add_argument(
+ '--use-analyzer',
+ metavar='<path>',
+ dest='clang',
+ default='clang',
+ help="""'%(prog)s' uses the 'clang' executable relative to itself for
+ static analysis. One can override this behavior with this option by
+ using the 'clang' packaged with Xcode (on OS X) or from the PATH.""")
+ advanced.add_argument(
+ '--no-failure-reports',
+ '-no-failure-reports',
+ dest='output_failures',
+ action='store_false',
+ help="""Do not create a 'failures' subdirectory that includes analyzer
+ crash reports and preprocessed source files.""")
+ parser.add_argument(
+ '--analyze-headers',
+ action='store_true',
+ help="""Also analyze functions in #included files. By default, such
+ functions are skipped unless they are called by functions within the
+ main source file.""")
+ advanced.add_argument(
+ '--stats',
+ '-stats',
+ action='store_true',
+ help="""Generates visitation statistics for the project.""")
+ advanced.add_argument(
+ '--internal-stats',
+ action='store_true',
+ help="""Generate internal analyzer statistics.""")
+ advanced.add_argument(
+ '--maxloop',
+ '-maxloop',
+ metavar='<loop count>',
+ type=int,
+ help="""Specifiy the number of times a block can be visited before
+ giving up. Increase for more comprehensive coverage at a cost of
+ speed.""")
+ advanced.add_argument(
+ '--store',
+ '-store',
+ metavar='<model>',
+ dest='store_model',
+ choices=['region', 'basic'],
+ help="""Specify the store model used by the analyzer. 'region'
+ specifies a field- sensitive store model. 'basic' which is far less
+ precise but can more quickly analyze code. 'basic' was the default
+ store model for checker-0.221 and earlier.""")
+ advanced.add_argument(
+ '--constraints',
+ '-constraints',
+ metavar='<model>',
+ dest='constraints_model',
+ choices=['range', 'basic'],
+ help="""Specify the constraint engine used by the analyzer. Specifying
+ 'basic' uses a simpler, less powerful constraint model used by
+ checker-0.160 and earlier.""")
+ advanced.add_argument(
+ '--analyzer-config',
+ '-analyzer-config',
+ metavar='<options>',
+ help="""Provide options to pass through to the analyzer's
+ -analyzer-config flag. Several options are separated with comma:
+ 'key1=val1,key2=val2'
+
+ Available options:
+ stable-report-filename=true or false (default)
+
+ Switch the page naming to:
+ report-<filename>-<function/method name>-<id>.html
+ instead of report-XXXXXX.html""")
+ advanced.add_argument(
+ '--force-analyze-debug-code',
+ dest='force_debug',
+ action='store_true',
+ help="""Tells analyzer to enable assertions in code even if they were
+ disabled during compilation, enabling more precise results.""")
+
+ plugins = parser.add_argument_group('checker options')
+ plugins.add_argument(
+ '--load-plugin',
+ '-load-plugin',
+ metavar='<plugin library>',
+ dest='plugins',
+ action='append',
+ help="""Loading external checkers using the clang plugin interface.""")
+ plugins.add_argument(
+ '--enable-checker',
+ '-enable-checker',
+ metavar='<checker name>',
+ action=AppendCommaSeparated,
+ help="""Enable specific checker.""")
+ plugins.add_argument(
+ '--disable-checker',
+ '-disable-checker',
+ metavar='<checker name>',
+ action=AppendCommaSeparated,
+ help="""Disable specific checker.""")
+ plugins.add_argument(
+ '--help-checkers',
+ action='store_true',
+ help="""A default group of checkers is run unless explicitly disabled.
+ Exactly which checkers constitute the default group is a function of
+ the operating system in use. These can be printed with this flag.""")
+ plugins.add_argument(
+ '--help-checkers-verbose',
+ action='store_true',
+ help="""Print all available checkers and mark the enabled ones.""")
+
+ if from_build_command:
+ parser.add_argument(
+ dest='build', nargs=argparse.REMAINDER, help="""Command to run.""")
+ return parser
+
+
+def create_default_parser():
+ """ Creates command line parser for all build wrapper commands. """
+
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+
+ parser.add_argument(
+ '--verbose',
+ '-v',
+ action='count',
+ default=0,
+ help="""Enable verbose output from '%(prog)s'. A second, third and
+ fourth flags increases verbosity.""")
+ return parser
+
+
+def parser_add_cdb(parser):
+ parser.add_argument(
+ '--cdb',
+ metavar='<file>',
+ default="compile_commands.json",
+ help="""The JSON compilation database.""")
+
+
+def parser_add_prefer_wrapper(parser):
+ parser.add_argument(
+ '--override-compiler',
+ action='store_true',
+ help="""Always resort to the compiler wrapper even when better
+ intercept methods are available.""")
+
+
+def parser_add_compilers(parser):
+ parser.add_argument(
+ '--use-cc',
+ metavar='<path>',
+ dest='cc',
+ default=os.getenv('CC', 'cc'),
+ help="""When '%(prog)s' analyzes a project by interposing a compiler
+ wrapper, which executes a real compiler for compilation and do other
+ tasks (record the compiler invocation). Because of this interposing,
+ '%(prog)s' does not know what compiler your project normally uses.
+ Instead, it simply overrides the CC environment variable, and guesses
+ your default compiler.
+
+ If you need '%(prog)s' to use a specific compiler for *compilation*
+ then you can use this option to specify a path to that compiler.""")
+ parser.add_argument(
+ '--use-c++',
+ metavar='<path>',
+ dest='cxx',
+ default=os.getenv('CXX', 'c++'),
+ help="""This is the same as "--use-cc" but for C++ code.""")
+
+
+class AppendCommaSeparated(argparse.Action):
+ """ argparse Action class to support multiple comma separated lists. """
+
+ def __call__(self, __parser, namespace, values, __option_string):
+ # getattr(obj, attr, default) does not really returns default but none
+ if getattr(namespace, self.dest, None) is None:
+ setattr(namespace, self.dest, [])
+ # once it's fixed we can use as expected
+ actual = getattr(namespace, self.dest)
+ actual.extend(values.split(','))
+ setattr(namespace, self.dest, actual)
+
+
+def print_active_checkers(checkers):
+ """ Print active checkers to stdout. """
+
+ for name in sorted(name for name, (_, active) in checkers.items()
+ if active):
+ print(name)
+
+
+def print_checkers(checkers):
+ """ Print verbose checker help to stdout. """
+
+ print('')
+ print('available checkers:')
+ print('')
+ for name in sorted(checkers.keys()):
+ description, active = checkers[name]
+ prefix = '+' if active else ' '
+ if len(name) > 30:
+ print(' {0} {1}'.format(prefix, name))
+ print(' ' * 35 + description)
+ else:
+ print(' {0} {1: <30} {2}'.format(prefix, name, description))
+ print('')
+ print('NOTE: "+" indicates that an analysis is enabled by default.')
+ print('')
diff --git a/tools/scan-build-py/libscanbuild/clang.py b/tools/scan-build-py/libscanbuild/clang.py
index 833e77d28bbe1..192e708782c10 100644
--- a/tools/scan-build-py/libscanbuild/clang.py
+++ b/tools/scan-build-py/libscanbuild/clang.py
@@ -9,8 +9,7 @@ Since Clang command line interface is so rich, but this project is using only
a subset of that, it makes sense to create a function specific wrapper. """
import re
-import subprocess
-import logging
+from libscanbuild import run_command
from libscanbuild.shell import decode
__all__ = ['get_version', 'get_arguments', 'get_checkers']
@@ -25,8 +24,9 @@ def get_version(clang):
:param clang: the compiler we are using
:return: the version string printed to stderr """
- output = subprocess.check_output([clang, '-v'], stderr=subprocess.STDOUT)
- return output.decode('utf-8').splitlines()[0]
+ output = run_command([clang, '-v'])
+ # the relevant version info is in the first line
+ return output[0]
def get_arguments(command, cwd):
@@ -38,12 +38,11 @@ def get_arguments(command, cwd):
cmd = command[:]
cmd.insert(1, '-###')
- logging.debug('exec command in %s: %s', cwd, ' '.join(cmd))
- output = subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT)
+ output = run_command(cmd, cwd=cwd)
# The relevant information is in the last line of the output.
# Don't check if finding last line fails, would throw exception anyway.
- last_line = output.decode('utf-8').splitlines()[-1]
+ last_line = output[-1]
if re.search(r'clang(.*): error:', last_line):
raise Exception(last_line)
return decode(last_line)
@@ -141,9 +140,7 @@ def get_checkers(clang, plugins):
load = [elem for plugin in plugins for elem in ['-load', plugin]]
cmd = [clang, '-cc1'] + load + ['-analyzer-checker-help']
- logging.debug('exec command: %s', ' '.join(cmd))
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- lines = output.decode('utf-8').splitlines()
+ lines = run_command(cmd)
is_active_checker = is_active(get_active_checkers(clang, plugins))
diff --git a/tools/scan-build-py/libscanbuild/intercept.py b/tools/scan-build-py/libscanbuild/intercept.py
index 6a9f75349fb58..b9bf9e917526d 100644
--- a/tools/scan-build-py/libscanbuild/intercept.py
+++ b/tools/scan-build-py/libscanbuild/intercept.py
@@ -27,16 +27,16 @@ import re
import itertools
import json
import glob
-import argparse
import logging
-import subprocess
from libear import build_libear, TemporaryDirectory
-from libscanbuild import command_entry_point
-from libscanbuild import duplicate_check, tempdir, initialize_logging
+from libscanbuild import command_entry_point, compiler_wrapper, \
+ wrapper_environment, run_command, run_build
+from libscanbuild import duplicate_check
from libscanbuild.compilation import split_command
+from libscanbuild.arguments import parse_args_for_intercept_build
from libscanbuild.shell import encode, decode
-__all__ = ['capture', 'intercept_build_main', 'intercept_build_wrapper']
+__all__ = ['capture', 'intercept_build', 'intercept_compiler_wrapper']
GS = chr(0x1d)
RS = chr(0x1e)
@@ -44,26 +44,19 @@ US = chr(0x1f)
COMPILER_WRAPPER_CC = 'intercept-cc'
COMPILER_WRAPPER_CXX = 'intercept-c++'
+TRACE_FILE_EXTENSION = '.cmd' # same as in ear.c
+WRAPPER_ONLY_PLATFORMS = frozenset({'win32', 'cygwin'})
@command_entry_point
-def intercept_build_main(bin_dir):
+def intercept_build():
""" Entry point for 'intercept-build' command. """
- parser = create_parser()
- args = parser.parse_args()
+ args = parse_args_for_intercept_build()
+ return capture(args)
- initialize_logging(args.verbose)
- logging.debug('Parsed arguments: %s', args)
- if not args.build:
- parser.print_help()
- return 0
-
- return capture(args, bin_dir)
-
-
-def capture(args, bin_dir):
+def capture(args):
""" The entry point of build command interception. """
def post_processing(commands):
@@ -91,28 +84,23 @@ def capture(args, bin_dir):
for entry in itertools.chain(previous, current)
if os.path.exists(entry['file']) and not duplicate(entry))
- with TemporaryDirectory(prefix='intercept-', dir=tempdir()) as tmp_dir:
+ with TemporaryDirectory(prefix='intercept-') as tmp_dir:
# run the build command
- environment = setup_environment(args, tmp_dir, bin_dir)
- logging.debug('run build in environment: %s', environment)
- exit_code = subprocess.call(args.build, env=environment)
- logging.info('build finished with exit code: %d', exit_code)
+ environment = setup_environment(args, tmp_dir)
+ exit_code = run_build(args.build, env=environment)
# read the intercepted exec calls
exec_traces = itertools.chain.from_iterable(
parse_exec_trace(os.path.join(tmp_dir, filename))
for filename in sorted(glob.iglob(os.path.join(tmp_dir, '*.cmd'))))
- # do post processing only if that was requested
- if 'raw_entries' not in args or not args.raw_entries:
- entries = post_processing(exec_traces)
- else:
- entries = exec_traces
+ # do post processing
+ entries = post_processing(exec_traces)
# dump the compilation database
with open(args.cdb, 'w+') as handle:
json.dump(list(entries), handle, sort_keys=True, indent=4)
return exit_code
-def setup_environment(args, destination, bin_dir):
+def setup_environment(args, destination):
""" Sets up the environment for the build command.
It sets the required environment variables and execute the given command.
@@ -130,12 +118,10 @@ def setup_environment(args, destination, bin_dir):
if not libear_path:
logging.debug('intercept gonna use compiler wrappers')
+ environment.update(wrapper_environment(args))
environment.update({
- 'CC': os.path.join(bin_dir, COMPILER_WRAPPER_CC),
- 'CXX': os.path.join(bin_dir, COMPILER_WRAPPER_CXX),
- 'INTERCEPT_BUILD_CC': c_compiler,
- 'INTERCEPT_BUILD_CXX': cxx_compiler,
- 'INTERCEPT_BUILD_VERBOSE': 'DEBUG' if args.verbose > 2 else 'INFO'
+ 'CC': COMPILER_WRAPPER_CC,
+ 'CXX': COMPILER_WRAPPER_CXX
})
elif sys.platform == 'darwin':
logging.debug('intercept gonna preload libear on OSX')
@@ -150,42 +136,49 @@ def setup_environment(args, destination, bin_dir):
return environment
-def intercept_build_wrapper(cplusplus):
- """ Entry point for `intercept-cc` and `intercept-c++` compiler wrappers.
+@command_entry_point
+def intercept_compiler_wrapper():
+ """ Entry point for `intercept-cc` and `intercept-c++`. """
+
+ return compiler_wrapper(intercept_compiler_wrapper_impl)
+
+
+def intercept_compiler_wrapper_impl(_, execution):
+ """ Implement intercept compiler wrapper functionality.
- It does generate execution report into target directory. And execute
- the wrapped compilation with the real compiler. The parameters for
- report and execution are from environment variables.
+ It does generate execution report into target directory.
+ The target directory name is from environment variables. """
- Those parameters which for 'libear' library can't have meaningful
- values are faked. """
+ message_prefix = 'execution report might be incomplete: %s'
- # initialize wrapper logging
- logging.basicConfig(format='intercept: %(levelname)s: %(message)s',
- level=os.getenv('INTERCEPT_BUILD_VERBOSE', 'INFO'))
- # write report
+ target_dir = os.getenv('INTERCEPT_BUILD_TARGET_DIR')
+ if not target_dir:
+ logging.warning(message_prefix, 'missing target directory')
+ return
+ # write current execution info to the pid file
try:
- target_dir = os.getenv('INTERCEPT_BUILD_TARGET_DIR')
- if not target_dir:
- raise UserWarning('exec report target directory not found')
- pid = str(os.getpid())
- target_file = os.path.join(target_dir, pid + '.cmd')
- logging.debug('writing exec report to: %s', target_file)
- with open(target_file, 'ab') as handler:
- working_dir = os.getcwd()
- command = US.join(sys.argv) + US
- content = RS.join([pid, pid, 'wrapper', working_dir, command]) + GS
- handler.write(content.encode('utf-8'))
+ target_file_name = str(os.getpid()) + TRACE_FILE_EXTENSION
+ target_file = os.path.join(target_dir, target_file_name)
+ logging.debug('writing execution report to: %s', target_file)
+ write_exec_trace(target_file, execution)
except IOError:
- logging.exception('writing exec report failed')
- except UserWarning as warning:
- logging.warning(warning)
- # execute with real compiler
- compiler = os.getenv('INTERCEPT_BUILD_CXX', 'c++') if cplusplus \
- else os.getenv('INTERCEPT_BUILD_CC', 'cc')
- compilation = [compiler] + sys.argv[1:]
- logging.debug('execute compiler: %s', compilation)
- return subprocess.call(compilation)
+ logging.warning(message_prefix, 'io problem')
+
+
+def write_exec_trace(filename, entry):
+ """ Write execution report file.
+
+ This method shall be sync with the execution report writer in interception
+ library. The entry in the file is a JSON objects.
+
+ :param filename: path to the output execution trace file,
+ :param entry: the Execution object to append to that file. """
+
+ with open(filename, 'ab') as handler:
+ pid = str(entry.pid)
+ command = US.join(entry.cmd) + US
+ content = RS.join([pid, pid, 'wrapper', entry.cwd, command]) + GS
+ handler.write(content.encode('utf-8'))
def parse_exec_trace(filename):
@@ -238,24 +231,21 @@ def is_preload_disabled(platform):
the path and, if so, (2) whether the output of executing 'csrutil status'
contains 'System Integrity Protection status: enabled'.
- Same problem on linux when SELinux is enabled. The status query program
- 'sestatus' and the output when it's enabled 'SELinux status: enabled'. """
+ :param platform: name of the platform (returned by sys.platform),
+ :return: True if library preload will fail by the dynamic linker. """
- if platform == 'darwin':
- pattern = re.compile(r'System Integrity Protection status:\s+enabled')
+ if platform in WRAPPER_ONLY_PLATFORMS:
+ return True
+ elif platform == 'darwin':
command = ['csrutil', 'status']
- elif platform in {'linux', 'linux2'}:
- pattern = re.compile(r'SELinux status:\s+enabled')
- command = ['sestatus']
+ pattern = re.compile(r'System Integrity Protection status:\s+enabled')
+ try:
+ return any(pattern.match(line) for line in run_command(command))
+ except:
+ return False
else:
return False
- try:
- lines = subprocess.check_output(command).decode('utf-8')
- return any((pattern.match(line) for line in lines.splitlines()))
- except:
- return False
-
def entry_hash(entry):
""" Implement unique hash method for compilation database entries. """
@@ -271,69 +261,3 @@ def entry_hash(entry):
command = ' '.join(decode(entry['command'])[1:])
return '<>'.join([filename, directory, command])
-
-
-def create_parser():
- """ Command line argument parser factory method. """
-
- parser = argparse.ArgumentParser(
- formatter_class=argparse.ArgumentDefaultsHelpFormatter)
-
- parser.add_argument(
- '--verbose', '-v',
- action='count',
- default=0,
- help="""Enable verbose output from '%(prog)s'. A second and third
- flag increases verbosity.""")
- parser.add_argument(
- '--cdb',
- metavar='<file>',
- default="compile_commands.json",
- help="""The JSON compilation database.""")
- group = parser.add_mutually_exclusive_group()
- group.add_argument(
- '--append',
- action='store_true',
- help="""Append new entries to existing compilation database.""")
- group.add_argument(
- '--disable-filter', '-n',
- dest='raw_entries',
- action='store_true',
- help="""Intercepted child process creation calls (exec calls) are all
- logged to the output. The output is not a compilation database.
- This flag is for debug purposes.""")
-
- advanced = parser.add_argument_group('advanced options')
- advanced.add_argument(
- '--override-compiler',
- action='store_true',
- help="""Always resort to the compiler wrapper even when better
- intercept methods are available.""")
- advanced.add_argument(
- '--use-cc',
- metavar='<path>',
- dest='cc',
- default='cc',
- help="""When '%(prog)s' analyzes a project by interposing a compiler
- wrapper, which executes a real compiler for compilation and
- do other tasks (record the compiler invocation). Because of
- this interposing, '%(prog)s' does not know what compiler your
- project normally uses. Instead, it simply overrides the CC
- environment variable, and guesses your default compiler.
-
- If you need '%(prog)s' to use a specific compiler for
- *compilation* then you can use this option to specify a path
- to that compiler.""")
- advanced.add_argument(
- '--use-c++',
- metavar='<path>',
- dest='cxx',
- default='c++',
- help="""This is the same as "--use-cc" but for C++ code.""")
-
- parser.add_argument(
- dest='build',
- nargs=argparse.REMAINDER,
- help="""Command to run.""")
-
- return parser
diff --git a/tools/scan-build-py/libscanbuild/report.py b/tools/scan-build-py/libscanbuild/report.py
index 766ddef719909..54b9695d927fc 100644
--- a/tools/scan-build-py/libscanbuild/report.py
+++ b/tools/scan-build-py/libscanbuild/report.py
@@ -13,102 +13,65 @@ import os
import os.path
import sys
import shutil
-import time
-import tempfile
import itertools
import plistlib
import glob
import json
import logging
-import contextlib
import datetime
from libscanbuild import duplicate_check
from libscanbuild.clang import get_version
-__all__ = ['report_directory', 'document']
+__all__ = ['document']
-@contextlib.contextmanager
-def report_directory(hint, keep):
- """ Responsible for the report directory.
-
- hint -- could specify the parent directory of the output directory.
- keep -- a boolean value to keep or delete the empty report directory. """
-
- stamp_format = 'scan-build-%Y-%m-%d-%H-%M-%S-%f-'
- stamp = datetime.datetime.now().strftime(stamp_format)
-
- parentdir = os.path.abspath(hint)
- if not os.path.exists(parentdir):
- os.makedirs(parentdir)
-
- name = tempfile.mkdtemp(prefix=stamp, dir=parentdir)
-
- logging.info('Report directory created: %s', name)
-
- try:
- yield name
- finally:
- if os.listdir(name):
- msg = "Run 'scan-view %s' to examine bug reports."
- keep = True
- else:
- if keep:
- msg = "Report directory '%s' contans no report, but kept."
- else:
- msg = "Removing directory '%s' because it contains no report."
- logging.warning(msg, name)
-
- if not keep:
- os.rmdir(name)
-
-
-def document(args, output_dir, use_cdb):
+def document(args):
""" Generates cover report and returns the number of bugs/crashes. """
html_reports_available = args.output_format in {'html', 'plist-html'}
logging.debug('count crashes and bugs')
- crash_count = sum(1 for _ in read_crashes(output_dir))
+ crash_count = sum(1 for _ in read_crashes(args.output))
bug_counter = create_counters()
- for bug in read_bugs(output_dir, html_reports_available):
+ for bug in read_bugs(args.output, html_reports_available):
bug_counter(bug)
result = crash_count + bug_counter.total
if html_reports_available and result:
+ use_cdb = os.path.exists(args.cdb)
+
logging.debug('generate index.html file')
- # common prefix for source files to have sort filenames
+ # common prefix for source files to have sorter path
prefix = commonprefix_from(args.cdb) if use_cdb else os.getcwd()
# assemble the cover from multiple fragments
+ fragments = []
try:
- fragments = []
if bug_counter.total:
- fragments.append(bug_summary(output_dir, bug_counter))
- fragments.append(bug_report(output_dir, prefix))
+ fragments.append(bug_summary(args.output, bug_counter))
+ fragments.append(bug_report(args.output, prefix))
if crash_count:
- fragments.append(crash_report(output_dir, prefix))
- assemble_cover(output_dir, prefix, args, fragments)
- # copy additinal files to the report
- copy_resource_files(output_dir)
+ fragments.append(crash_report(args.output, prefix))
+ assemble_cover(args, prefix, fragments)
+ # copy additional files to the report
+ copy_resource_files(args.output)
if use_cdb:
- shutil.copy(args.cdb, output_dir)
+ shutil.copy(args.cdb, args.output)
finally:
for fragment in fragments:
os.remove(fragment)
return result
-def assemble_cover(output_dir, prefix, args, fragments):
+def assemble_cover(args, prefix, fragments):
""" Put together the fragments into a final report. """
import getpass
import socket
- import datetime
if args.html_title is None:
args.html_title = os.path.basename(prefix) + ' - analyzer results'
- with open(os.path.join(output_dir, 'index.html'), 'w') as handle:
+ with open(os.path.join(args.output, 'index.html'), 'w') as handle:
indent = 0
handle.write(reindent("""
|<!DOCTYPE html>
@@ -375,11 +338,12 @@ def parse_crash(filename):
match = re.match(r'(.*)\.info\.txt', filename)
name = match.group(1) if match else None
- with open(filename) as handler:
- lines = handler.readlines()
+ with open(filename, mode='rb') as handler:
+ # this is a workaround to fix windows read '\r\n' as new lines.
+ lines = [line.decode().rstrip() for line in handler.readlines()]
return {
- 'source': lines[0].rstrip(),
- 'problem': lines[1].rstrip(),
+ 'source': lines[0],
+ 'problem': lines[1],
'file': name,
'info': name + '.info.txt',
'stderr': name + '.stderr.txt'
@@ -519,9 +483,10 @@ def commonprefix_from(filename):
def commonprefix(files):
- """ Fixed version of os.path.commonprefix. Return the longest path prefix
- that is a prefix of all paths in filenames. """
+ """ Fixed version of os.path.commonprefix.
+ :param files: list of file names.
+ :return: the longest path prefix that is a prefix of all files. """
result = None
for current in files:
if result is not None:
diff --git a/tools/scan-build-py/libscanbuild/runner.py b/tools/scan-build-py/libscanbuild/runner.py
deleted file mode 100644
index 72d02c85fed15..0000000000000
--- a/tools/scan-build-py/libscanbuild/runner.py
+++ /dev/null
@@ -1,302 +0,0 @@
-# -*- coding: utf-8 -*-
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-""" This module is responsible to run the analyzer commands. """
-
-import re
-import os
-import os.path
-import tempfile
-import functools
-import subprocess
-import logging
-from libscanbuild.compilation import classify_source, compiler_language
-from libscanbuild.clang import get_version, get_arguments
-from libscanbuild.shell import decode
-
-__all__ = ['run']
-
-# To have good results from static analyzer certain compiler options shall be
-# omitted. The compiler flag filtering only affects the static analyzer run.
-#
-# Keys are the option name, value number of options to skip
-IGNORED_FLAGS = {
- '-c': 0, # compile option will be overwritten
- '-fsyntax-only': 0, # static analyzer option will be overwritten
- '-o': 1, # will set up own output file
- # flags below are inherited from the perl implementation.
- '-g': 0,
- '-save-temps': 0,
- '-install_name': 1,
- '-exported_symbols_list': 1,
- '-current_version': 1,
- '-compatibility_version': 1,
- '-init': 1,
- '-e': 1,
- '-seg1addr': 1,
- '-bundle_loader': 1,
- '-multiply_defined': 1,
- '-sectorder': 3,
- '--param': 1,
- '--serialize-diagnostics': 1
-}
-
-
-def require(required):
- """ Decorator for checking the required values in state.
-
- It checks the required attributes in the passed state and stop when
- any of those is missing. """
-
- def decorator(function):
- @functools.wraps(function)
- def wrapper(*args, **kwargs):
- for key in required:
- if key not in args[0]:
- raise KeyError('{0} not passed to {1}'.format(
- key, function.__name__))
-
- return function(*args, **kwargs)
-
- return wrapper
-
- return decorator
-
-
-@require(['command', # entry from compilation database
- 'directory', # entry from compilation database
- 'file', # entry from compilation database
- 'clang', # clang executable name (and path)
- 'direct_args', # arguments from command line
- 'force_debug', # kill non debug macros
- 'output_dir', # where generated report files shall go
- 'output_format', # it's 'plist' or 'html' or both
- 'output_failures']) # generate crash reports or not
-def run(opts):
- """ Entry point to run (or not) static analyzer against a single entry
- of the compilation database.
-
- This complex task is decomposed into smaller methods which are calling
- each other in chain. If the analyzis is not possibe the given method
- just return and break the chain.
-
- The passed parameter is a python dictionary. Each method first check
- that the needed parameters received. (This is done by the 'require'
- decorator. It's like an 'assert' to check the contract between the
- caller and the called method.) """
-
- try:
- command = opts.pop('command')
- command = command if isinstance(command, list) else decode(command)
- logging.debug("Run analyzer against '%s'", command)
- opts.update(classify_parameters(command))
-
- return arch_check(opts)
- except Exception:
- logging.error("Problem occured during analyzis.", exc_info=1)
- return None
-
-
-@require(['clang', 'directory', 'flags', 'file', 'output_dir', 'language',
- 'error_type', 'error_output', 'exit_code'])
-def report_failure(opts):
- """ Create report when analyzer failed.
-
- The major report is the preprocessor output. The output filename generated
- randomly. The compiler output also captured into '.stderr.txt' file.
- And some more execution context also saved into '.info.txt' file. """
-
- def extension(opts):
- """ Generate preprocessor file extension. """
-
- mapping = {'objective-c++': '.mii', 'objective-c': '.mi', 'c++': '.ii'}
- return mapping.get(opts['language'], '.i')
-
- def destination(opts):
- """ Creates failures directory if not exits yet. """
-
- name = os.path.join(opts['output_dir'], 'failures')
- if not os.path.isdir(name):
- os.makedirs(name)
- return name
-
- error = opts['error_type']
- (handle, name) = tempfile.mkstemp(suffix=extension(opts),
- prefix='clang_' + error + '_',
- dir=destination(opts))
- os.close(handle)
- cwd = opts['directory']
- cmd = get_arguments([opts['clang'], '-fsyntax-only', '-E'] +
- opts['flags'] + [opts['file'], '-o', name], cwd)
- logging.debug('exec command in %s: %s', cwd, ' '.join(cmd))
- subprocess.call(cmd, cwd=cwd)
- # write general information about the crash
- with open(name + '.info.txt', 'w') as handle:
- handle.write(opts['file'] + os.linesep)
- handle.write(error.title().replace('_', ' ') + os.linesep)
- handle.write(' '.join(cmd) + os.linesep)
- handle.write(' '.join(os.uname()) + os.linesep)
- handle.write(get_version(opts['clang']))
- handle.close()
- # write the captured output too
- with open(name + '.stderr.txt', 'w') as handle:
- handle.writelines(opts['error_output'])
- handle.close()
- # return with the previous step exit code and output
- return {
- 'error_output': opts['error_output'],
- 'exit_code': opts['exit_code']
- }
-
-
-@require(['clang', 'directory', 'flags', 'direct_args', 'file', 'output_dir',
- 'output_format'])
-def run_analyzer(opts, continuation=report_failure):
- """ It assembles the analysis command line and executes it. Capture the
- output of the analysis and returns with it. If failure reports are
- requested, it calls the continuation to generate it. """
-
- def output():
- """ Creates output file name for reports. """
- if opts['output_format'] in {'plist', 'plist-html'}:
- (handle, name) = tempfile.mkstemp(prefix='report-',
- suffix='.plist',
- dir=opts['output_dir'])
- os.close(handle)
- return name
- return opts['output_dir']
-
- cwd = opts['directory']
- cmd = get_arguments([opts['clang'], '--analyze'] + opts['direct_args'] +
- opts['flags'] + [opts['file'], '-o', output()],
- cwd)
- logging.debug('exec command in %s: %s', cwd, ' '.join(cmd))
- child = subprocess.Popen(cmd,
- cwd=cwd,
- universal_newlines=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- output = child.stdout.readlines()
- child.stdout.close()
- # do report details if it were asked
- child.wait()
- if opts.get('output_failures', False) and child.returncode:
- error_type = 'crash' if child.returncode & 127 else 'other_error'
- opts.update({
- 'error_type': error_type,
- 'error_output': output,
- 'exit_code': child.returncode
- })
- return continuation(opts)
- # return the output for logging and exit code for testing
- return {'error_output': output, 'exit_code': child.returncode}
-
-
-@require(['flags', 'force_debug'])
-def filter_debug_flags(opts, continuation=run_analyzer):
- """ Filter out nondebug macros when requested. """
-
- if opts.pop('force_debug'):
- # lazy implementation just append an undefine macro at the end
- opts.update({'flags': opts['flags'] + ['-UNDEBUG']})
-
- return continuation(opts)
-
-
-@require(['language', 'compiler', 'file', 'flags'])
-def language_check(opts, continuation=filter_debug_flags):
- """ Find out the language from command line parameters or file name
- extension. The decision also influenced by the compiler invocation. """
-
- accepted = frozenset({
- 'c', 'c++', 'objective-c', 'objective-c++', 'c-cpp-output',
- 'c++-cpp-output', 'objective-c-cpp-output'
- })
-
- # language can be given as a parameter...
- language = opts.pop('language')
- compiler = opts.pop('compiler')
- # ... or find out from source file extension
- if language is None and compiler is not None:
- language = classify_source(opts['file'], compiler == 'c')
-
- if language is None:
- logging.debug('skip analysis, language not known')
- return None
- elif language not in accepted:
- logging.debug('skip analysis, language not supported')
- return None
- else:
- logging.debug('analysis, language: %s', language)
- opts.update({'language': language,
- 'flags': ['-x', language] + opts['flags']})
- return continuation(opts)
-
-
-@require(['arch_list', 'flags'])
-def arch_check(opts, continuation=language_check):
- """ Do run analyzer through one of the given architectures. """
-
- disabled = frozenset({'ppc', 'ppc64'})
-
- received_list = opts.pop('arch_list')
- if received_list:
- # filter out disabled architectures and -arch switches
- filtered_list = [a for a in received_list if a not in disabled]
- if filtered_list:
- # There should be only one arch given (or the same multiple
- # times). If there are multiple arch are given and are not
- # the same, those should not change the pre-processing step.
- # But that's the only pass we have before run the analyzer.
- current = filtered_list.pop()
- logging.debug('analysis, on arch: %s', current)
-
- opts.update({'flags': ['-arch', current] + opts['flags']})
- return continuation(opts)
- else:
- logging.debug('skip analysis, found not supported arch')
- return None
- else:
- logging.debug('analysis, on default arch')
- return continuation(opts)
-
-
-def classify_parameters(command):
- """ Prepare compiler flags (filters some and add others) and take out
- language (-x) and architecture (-arch) flags for future processing. """
-
- result = {
- 'flags': [], # the filtered compiler flags
- 'arch_list': [], # list of architecture flags
- 'language': None, # compilation language, None, if not specified
- 'compiler': compiler_language(command) # 'c' or 'c++'
- }
-
- # iterate on the compile options
- args = iter(command[1:])
- for arg in args:
- # take arch flags into a separate basket
- if arg == '-arch':
- result['arch_list'].append(next(args))
- # take language
- elif arg == '-x':
- result['language'] = next(args)
- # parameters which looks source file are not flags
- elif re.match(r'^[^-].+', arg) and classify_source(arg):
- pass
- # ignore some flags
- elif arg in IGNORED_FLAGS:
- count = IGNORED_FLAGS[arg]
- for _ in range(count):
- next(args)
- # we don't care about extra warnings, but we should suppress ones
- # that we don't want to see.
- elif re.match(r'^-W.+', arg) and not re.match(r'^-Wno-.+', arg):
- pass
- # and consider everything else as compilation flag.
- else:
- result['flags'].append(arg)
-
- return result
diff --git a/tools/scan-build-py/tests/unit/__init__.py b/tools/scan-build-py/tests/unit/__init__.py
index dc8bf12eb47c1..6b7fd9fa0d027 100644
--- a/tools/scan-build-py/tests/unit/__init__.py
+++ b/tools/scan-build-py/tests/unit/__init__.py
@@ -7,7 +7,6 @@
from . import test_libear
from . import test_compilation
from . import test_clang
-from . import test_runner
from . import test_report
from . import test_analyze
from . import test_intercept
@@ -18,7 +17,6 @@ def load_tests(loader, suite, _):
suite.addTests(loader.loadTestsFromModule(test_libear))
suite.addTests(loader.loadTestsFromModule(test_compilation))
suite.addTests(loader.loadTestsFromModule(test_clang))
- suite.addTests(loader.loadTestsFromModule(test_runner))
suite.addTests(loader.loadTestsFromModule(test_report))
suite.addTests(loader.loadTestsFromModule(test_analyze))
suite.addTests(loader.loadTestsFromModule(test_intercept))
diff --git a/tools/scan-build-py/tests/unit/test_analyze.py b/tools/scan-build-py/tests/unit/test_analyze.py
index 481cc0c0993b9..a250ff22132cd 100644
--- a/tools/scan-build-py/tests/unit/test_analyze.py
+++ b/tools/scan-build-py/tests/unit/test_analyze.py
@@ -4,4 +4,332 @@
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
+import libear
import libscanbuild.analyze as sut
+import unittest
+import re
+import os
+import os.path
+
+
+class ReportDirectoryTest(unittest.TestCase):
+
+ # Test that successive report directory names ascend in lexicographic
+ # order. This is required so that report directories from two runs of
+ # scan-build can be easily matched up to compare results.
+ def test_directory_name_comparison(self):
+ with libear.TemporaryDirectory() as tmpdir, \
+ sut.report_directory(tmpdir, False) as report_dir1, \
+ sut.report_directory(tmpdir, False) as report_dir2, \
+ sut.report_directory(tmpdir, False) as report_dir3:
+ self.assertLess(report_dir1, report_dir2)
+ self.assertLess(report_dir2, report_dir3)
+
+
+class FilteringFlagsTest(unittest.TestCase):
+
+ def test_language_captured(self):
+ def test(flags):
+ cmd = ['clang', '-c', 'source.c'] + flags
+ opts = sut.classify_parameters(cmd)
+ return opts['language']
+
+ self.assertEqual(None, test([]))
+ self.assertEqual('c', test(['-x', 'c']))
+ self.assertEqual('cpp', test(['-x', 'cpp']))
+
+ def test_arch(self):
+ def test(flags):
+ cmd = ['clang', '-c', 'source.c'] + flags
+ opts = sut.classify_parameters(cmd)
+ return opts['arch_list']
+
+ self.assertEqual([], test([]))
+ self.assertEqual(['mips'], test(['-arch', 'mips']))
+ self.assertEqual(['mips', 'i386'],
+ test(['-arch', 'mips', '-arch', 'i386']))
+
+ def assertFlagsChanged(self, expected, flags):
+ cmd = ['clang', '-c', 'source.c'] + flags
+ opts = sut.classify_parameters(cmd)
+ self.assertEqual(expected, opts['flags'])
+
+ def assertFlagsUnchanged(self, flags):
+ self.assertFlagsChanged(flags, flags)
+
+ def assertFlagsFiltered(self, flags):
+ self.assertFlagsChanged([], flags)
+
+ def test_optimalizations_pass(self):
+ self.assertFlagsUnchanged(['-O'])
+ self.assertFlagsUnchanged(['-O1'])
+ self.assertFlagsUnchanged(['-Os'])
+ self.assertFlagsUnchanged(['-O2'])
+ self.assertFlagsUnchanged(['-O3'])
+
+ def test_include_pass(self):
+ self.assertFlagsUnchanged([])
+ self.assertFlagsUnchanged(['-include', '/usr/local/include'])
+ self.assertFlagsUnchanged(['-I.'])
+ self.assertFlagsUnchanged(['-I', '.'])
+ self.assertFlagsUnchanged(['-I/usr/local/include'])
+ self.assertFlagsUnchanged(['-I', '/usr/local/include'])
+ self.assertFlagsUnchanged(['-I/opt', '-I', '/opt/otp/include'])
+ self.assertFlagsUnchanged(['-isystem', '/path'])
+ self.assertFlagsUnchanged(['-isystem=/path'])
+
+ def test_define_pass(self):
+ self.assertFlagsUnchanged(['-DNDEBUG'])
+ self.assertFlagsUnchanged(['-UNDEBUG'])
+ self.assertFlagsUnchanged(['-Dvar1=val1', '-Dvar2=val2'])
+ self.assertFlagsUnchanged(['-Dvar="val ues"'])
+
+ def test_output_filtered(self):
+ self.assertFlagsFiltered(['-o', 'source.o'])
+
+ def test_some_warning_filtered(self):
+ self.assertFlagsFiltered(['-Wall'])
+ self.assertFlagsFiltered(['-Wnoexcept'])
+ self.assertFlagsFiltered(['-Wreorder', '-Wunused', '-Wundef'])
+ self.assertFlagsUnchanged(['-Wno-reorder', '-Wno-unused'])
+
+ def test_compile_only_flags_pass(self):
+ self.assertFlagsUnchanged(['-std=C99'])
+ self.assertFlagsUnchanged(['-nostdinc'])
+ self.assertFlagsUnchanged(['-isystem', '/image/debian'])
+ self.assertFlagsUnchanged(['-iprefix', '/usr/local'])
+ self.assertFlagsUnchanged(['-iquote=me'])
+ self.assertFlagsUnchanged(['-iquote', 'me'])
+
+ def test_compile_and_link_flags_pass(self):
+ self.assertFlagsUnchanged(['-fsinged-char'])
+ self.assertFlagsUnchanged(['-fPIC'])
+ self.assertFlagsUnchanged(['-stdlib=libc++'])
+ self.assertFlagsUnchanged(['--sysroot', '/'])
+ self.assertFlagsUnchanged(['-isysroot', '/'])
+
+ def test_some_flags_filtered(self):
+ self.assertFlagsFiltered(['-g'])
+ self.assertFlagsFiltered(['-fsyntax-only'])
+ self.assertFlagsFiltered(['-save-temps'])
+ self.assertFlagsFiltered(['-init', 'my_init'])
+ self.assertFlagsFiltered(['-sectorder', 'a', 'b', 'c'])
+
+
+class Spy(object):
+ def __init__(self):
+ self.arg = None
+ self.success = 0
+
+ def call(self, params):
+ self.arg = params
+ return self.success
+
+
+class RunAnalyzerTest(unittest.TestCase):
+
+ @staticmethod
+ def run_analyzer(content, failures_report):
+ with libear.TemporaryDirectory() as tmpdir:
+ filename = os.path.join(tmpdir, 'test.cpp')
+ with open(filename, 'w') as handle:
+ handle.write(content)
+
+ opts = {
+ 'clang': 'clang',
+ 'directory': os.getcwd(),
+ 'flags': [],
+ 'direct_args': [],
+ 'file': filename,
+ 'output_dir': tmpdir,
+ 'output_format': 'plist',
+ 'output_failures': failures_report
+ }
+ spy = Spy()
+ result = sut.run_analyzer(opts, spy.call)
+ return (result, spy.arg)
+
+ def test_run_analyzer(self):
+ content = "int div(int n, int d) { return n / d; }"
+ (result, fwds) = RunAnalyzerTest.run_analyzer(content, False)
+ self.assertEqual(None, fwds)
+ self.assertEqual(0, result['exit_code'])
+
+ def test_run_analyzer_crash(self):
+ content = "int div(int n, int d) { return n / d }"
+ (result, fwds) = RunAnalyzerTest.run_analyzer(content, False)
+ self.assertEqual(None, fwds)
+ self.assertEqual(1, result['exit_code'])
+
+ def test_run_analyzer_crash_and_forwarded(self):
+ content = "int div(int n, int d) { return n / d }"
+ (_, fwds) = RunAnalyzerTest.run_analyzer(content, True)
+ self.assertEqual(1, fwds['exit_code'])
+ self.assertTrue(len(fwds['error_output']) > 0)
+
+
+class ReportFailureTest(unittest.TestCase):
+
+ def assertUnderFailures(self, path):
+ self.assertEqual('failures', os.path.basename(os.path.dirname(path)))
+
+ def test_report_failure_create_files(self):
+ with libear.TemporaryDirectory() as tmpdir:
+ # create input file
+ filename = os.path.join(tmpdir, 'test.c')
+ with open(filename, 'w') as handle:
+ handle.write('int main() { return 0')
+ uname_msg = ' '.join(os.uname()) + os.linesep
+ error_msg = 'this is my error output'
+ # execute test
+ opts = {
+ 'clang': 'clang',
+ 'directory': os.getcwd(),
+ 'flags': [],
+ 'file': filename,
+ 'output_dir': tmpdir,
+ 'language': 'c',
+ 'error_type': 'other_error',
+ 'error_output': error_msg,
+ 'exit_code': 13
+ }
+ sut.report_failure(opts)
+ # verify the result
+ result = dict()
+ pp_file = None
+ for root, _, files in os.walk(tmpdir):
+ keys = [os.path.join(root, name) for name in files]
+ for key in keys:
+ with open(key, 'r') as handle:
+ result[key] = handle.readlines()
+ if re.match(r'^(.*/)+clang(.*)\.i$', key):
+ pp_file = key
+
+ # prepocessor file generated
+ self.assertUnderFailures(pp_file)
+ # info file generated and content dumped
+ info_file = pp_file + '.info.txt'
+ self.assertTrue(info_file in result)
+ self.assertEqual('Other Error\n', result[info_file][1])
+ self.assertEqual(uname_msg, result[info_file][3])
+ # error file generated and content dumped
+ error_file = pp_file + '.stderr.txt'
+ self.assertTrue(error_file in result)
+ self.assertEqual([error_msg], result[error_file])
+
+
+class AnalyzerTest(unittest.TestCase):
+
+ def test_nodebug_macros_appended(self):
+ def test(flags):
+ spy = Spy()
+ opts = {'flags': flags, 'force_debug': True}
+ self.assertEqual(spy.success,
+ sut.filter_debug_flags(opts, spy.call))
+ return spy.arg['flags']
+
+ self.assertEqual(['-UNDEBUG'], test([]))
+ self.assertEqual(['-DNDEBUG', '-UNDEBUG'], test(['-DNDEBUG']))
+ self.assertEqual(['-DSomething', '-UNDEBUG'], test(['-DSomething']))
+
+ def test_set_language_fall_through(self):
+ def language(expected, input):
+ spy = Spy()
+ input.update({'compiler': 'c', 'file': 'test.c'})
+ self.assertEqual(spy.success, sut.language_check(input, spy.call))
+ self.assertEqual(expected, spy.arg['language'])
+
+ language('c', {'language': 'c', 'flags': []})
+ language('c++', {'language': 'c++', 'flags': []})
+
+ def test_set_language_stops_on_not_supported(self):
+ spy = Spy()
+ input = {
+ 'compiler': 'c',
+ 'flags': [],
+ 'file': 'test.java',
+ 'language': 'java'
+ }
+ self.assertIsNone(sut.language_check(input, spy.call))
+ self.assertIsNone(spy.arg)
+
+ def test_set_language_sets_flags(self):
+ def flags(expected, input):
+ spy = Spy()
+ input.update({'compiler': 'c', 'file': 'test.c'})
+ self.assertEqual(spy.success, sut.language_check(input, spy.call))
+ self.assertEqual(expected, spy.arg['flags'])
+
+ flags(['-x', 'c'], {'language': 'c', 'flags': []})
+ flags(['-x', 'c++'], {'language': 'c++', 'flags': []})
+
+ def test_set_language_from_filename(self):
+ def language(expected, input):
+ spy = Spy()
+ input.update({'language': None, 'flags': []})
+ self.assertEqual(spy.success, sut.language_check(input, spy.call))
+ self.assertEqual(expected, spy.arg['language'])
+
+ language('c', {'file': 'file.c', 'compiler': 'c'})
+ language('c++', {'file': 'file.c', 'compiler': 'c++'})
+ language('c++', {'file': 'file.cxx', 'compiler': 'c'})
+ language('c++', {'file': 'file.cxx', 'compiler': 'c++'})
+ language('c++', {'file': 'file.cpp', 'compiler': 'c++'})
+ language('c-cpp-output', {'file': 'file.i', 'compiler': 'c'})
+ language('c++-cpp-output', {'file': 'file.i', 'compiler': 'c++'})
+
+ def test_arch_loop_sets_flags(self):
+ def flags(archs):
+ spy = Spy()
+ input = {'flags': [], 'arch_list': archs}
+ sut.arch_check(input, spy.call)
+ return spy.arg['flags']
+
+ self.assertEqual([], flags([]))
+ self.assertEqual(['-arch', 'i386'], flags(['i386']))
+ self.assertEqual(['-arch', 'i386'], flags(['i386', 'ppc']))
+ self.assertEqual(['-arch', 'sparc'], flags(['i386', 'sparc']))
+
+ def test_arch_loop_stops_on_not_supported(self):
+ def stop(archs):
+ spy = Spy()
+ input = {'flags': [], 'arch_list': archs}
+ self.assertIsNone(sut.arch_check(input, spy.call))
+ self.assertIsNone(spy.arg)
+
+ stop(['ppc'])
+ stop(['ppc64'])
+
+
+@sut.require([])
+def method_without_expecteds(opts):
+ return 0
+
+
+@sut.require(['this', 'that'])
+def method_with_expecteds(opts):
+ return 0
+
+
+@sut.require([])
+def method_exception_from_inside(opts):
+ raise Exception('here is one')
+
+
+class RequireDecoratorTest(unittest.TestCase):
+
+ def test_method_without_expecteds(self):
+ self.assertEqual(method_without_expecteds(dict()), 0)
+ self.assertEqual(method_without_expecteds({}), 0)
+ self.assertEqual(method_without_expecteds({'this': 2}), 0)
+ self.assertEqual(method_without_expecteds({'that': 3}), 0)
+
+ def test_method_with_expecteds(self):
+ self.assertRaises(KeyError, method_with_expecteds, dict())
+ self.assertRaises(KeyError, method_with_expecteds, {})
+ self.assertRaises(KeyError, method_with_expecteds, {'this': 2})
+ self.assertRaises(KeyError, method_with_expecteds, {'that': 3})
+ self.assertEqual(method_with_expecteds({'this': 0, 'that': 3}), 0)
+
+ def test_method_exception_not_caught(self):
+ self.assertRaises(Exception, method_exception_from_inside, dict())
diff --git a/tools/scan-build-py/tests/unit/test_intercept.py b/tools/scan-build-py/tests/unit/test_intercept.py
index 5b6ed2cee1f65..583d1c3da979f 100644
--- a/tools/scan-build-py/tests/unit/test_intercept.py
+++ b/tools/scan-build-py/tests/unit/test_intercept.py
@@ -65,11 +65,10 @@ class InterceptUtilTest(unittest.TestCase):
DISABLED = 'disabled'
OSX = 'darwin'
- LINUX = 'linux'
with libear.TemporaryDirectory() as tmpdir:
+ saved = os.environ['PATH']
try:
- saved = os.environ['PATH']
os.environ['PATH'] = tmpdir + ':' + saved
create_csrutil(tmpdir, ENABLED)
@@ -77,21 +76,14 @@ class InterceptUtilTest(unittest.TestCase):
create_csrutil(tmpdir, DISABLED)
self.assertFalse(sut.is_preload_disabled(OSX))
-
- create_sestatus(tmpdir, ENABLED)
- self.assertTrue(sut.is_preload_disabled(LINUX))
-
- create_sestatus(tmpdir, DISABLED)
- self.assertFalse(sut.is_preload_disabled(LINUX))
finally:
os.environ['PATH'] = saved
+ saved = os.environ['PATH']
try:
- saved = os.environ['PATH']
os.environ['PATH'] = ''
# shall be false when it's not in the path
self.assertFalse(sut.is_preload_disabled(OSX))
- self.assertFalse(sut.is_preload_disabled(LINUX))
self.assertFalse(sut.is_preload_disabled('unix'))
finally:
diff --git a/tools/scan-build-py/tests/unit/test_report.py b/tools/scan-build-py/tests/unit/test_report.py
index c82b5593e0dc7..c9436991561da 100644
--- a/tools/scan-build-py/tests/unit/test_report.py
+++ b/tools/scan-build-py/tests/unit/test_report.py
@@ -75,7 +75,7 @@ class ParseFileTest(unittest.TestCase):
'file.i.stderr.txt')
def test_parse_real_crash(self):
- import libscanbuild.runner as sut2
+ import libscanbuild.analyze as sut2
import re
with libear.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, 'test.c')
@@ -146,16 +146,3 @@ class GetPrefixFromCompilationDatabaseTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(
sut.commonprefix([]), '')
-
-class ReportDirectoryTest(unittest.TestCase):
-
- # Test that successive report directory names ascend in lexicographic
- # order. This is required so that report directories from two runs of
- # scan-build can be easily matched up to compare results.
- def test_directory_name_comparison(self):
- with libear.TemporaryDirectory() as tmpdir, \
- sut.report_directory(tmpdir, False) as report_dir1, \
- sut.report_directory(tmpdir, False) as report_dir2, \
- sut.report_directory(tmpdir, False) as report_dir3:
- self.assertLess(report_dir1, report_dir2)
- self.assertLess(report_dir2, report_dir3)
diff --git a/tools/scan-build-py/tests/unit/test_runner.py b/tools/scan-build-py/tests/unit/test_runner.py
deleted file mode 100644
index 2d09062233292..0000000000000
--- a/tools/scan-build-py/tests/unit/test_runner.py
+++ /dev/null
@@ -1,322 +0,0 @@
-# -*- coding: utf-8 -*-
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-
-import libear
-import libscanbuild.runner as sut
-import unittest
-import re
-import os
-import os.path
-
-
-class FilteringFlagsTest(unittest.TestCase):
-
- def test_language_captured(self):
- def test(flags):
- cmd = ['clang', '-c', 'source.c'] + flags
- opts = sut.classify_parameters(cmd)
- return opts['language']
-
- self.assertEqual(None, test([]))
- self.assertEqual('c', test(['-x', 'c']))
- self.assertEqual('cpp', test(['-x', 'cpp']))
-
- def test_arch(self):
- def test(flags):
- cmd = ['clang', '-c', 'source.c'] + flags
- opts = sut.classify_parameters(cmd)
- return opts['arch_list']
-
- self.assertEqual([], test([]))
- self.assertEqual(['mips'], test(['-arch', 'mips']))
- self.assertEqual(['mips', 'i386'],
- test(['-arch', 'mips', '-arch', 'i386']))
-
- def assertFlagsChanged(self, expected, flags):
- cmd = ['clang', '-c', 'source.c'] + flags
- opts = sut.classify_parameters(cmd)
- self.assertEqual(expected, opts['flags'])
-
- def assertFlagsUnchanged(self, flags):
- self.assertFlagsChanged(flags, flags)
-
- def assertFlagsFiltered(self, flags):
- self.assertFlagsChanged([], flags)
-
- def test_optimalizations_pass(self):
- self.assertFlagsUnchanged(['-O'])
- self.assertFlagsUnchanged(['-O1'])
- self.assertFlagsUnchanged(['-Os'])
- self.assertFlagsUnchanged(['-O2'])
- self.assertFlagsUnchanged(['-O3'])
-
- def test_include_pass(self):
- self.assertFlagsUnchanged([])
- self.assertFlagsUnchanged(['-include', '/usr/local/include'])
- self.assertFlagsUnchanged(['-I.'])
- self.assertFlagsUnchanged(['-I', '.'])
- self.assertFlagsUnchanged(['-I/usr/local/include'])
- self.assertFlagsUnchanged(['-I', '/usr/local/include'])
- self.assertFlagsUnchanged(['-I/opt', '-I', '/opt/otp/include'])
- self.assertFlagsUnchanged(['-isystem', '/path'])
- self.assertFlagsUnchanged(['-isystem=/path'])
-
- def test_define_pass(self):
- self.assertFlagsUnchanged(['-DNDEBUG'])
- self.assertFlagsUnchanged(['-UNDEBUG'])
- self.assertFlagsUnchanged(['-Dvar1=val1', '-Dvar2=val2'])
- self.assertFlagsUnchanged(['-Dvar="val ues"'])
-
- def test_output_filtered(self):
- self.assertFlagsFiltered(['-o', 'source.o'])
-
- def test_some_warning_filtered(self):
- self.assertFlagsFiltered(['-Wall'])
- self.assertFlagsFiltered(['-Wnoexcept'])
- self.assertFlagsFiltered(['-Wreorder', '-Wunused', '-Wundef'])
- self.assertFlagsUnchanged(['-Wno-reorder', '-Wno-unused'])
-
- def test_compile_only_flags_pass(self):
- self.assertFlagsUnchanged(['-std=C99'])
- self.assertFlagsUnchanged(['-nostdinc'])
- self.assertFlagsUnchanged(['-isystem', '/image/debian'])
- self.assertFlagsUnchanged(['-iprefix', '/usr/local'])
- self.assertFlagsUnchanged(['-iquote=me'])
- self.assertFlagsUnchanged(['-iquote', 'me'])
-
- def test_compile_and_link_flags_pass(self):
- self.assertFlagsUnchanged(['-fsinged-char'])
- self.assertFlagsUnchanged(['-fPIC'])
- self.assertFlagsUnchanged(['-stdlib=libc++'])
- self.assertFlagsUnchanged(['--sysroot', '/'])
- self.assertFlagsUnchanged(['-isysroot', '/'])
-
- def test_some_flags_filtered(self):
- self.assertFlagsFiltered(['-g'])
- self.assertFlagsFiltered(['-fsyntax-only'])
- self.assertFlagsFiltered(['-save-temps'])
- self.assertFlagsFiltered(['-init', 'my_init'])
- self.assertFlagsFiltered(['-sectorder', 'a', 'b', 'c'])
-
-
-class Spy(object):
- def __init__(self):
- self.arg = None
- self.success = 0
-
- def call(self, params):
- self.arg = params
- return self.success
-
-
-class RunAnalyzerTest(unittest.TestCase):
-
- @staticmethod
- def run_analyzer(content, failures_report):
- with libear.TemporaryDirectory() as tmpdir:
- filename = os.path.join(tmpdir, 'test.cpp')
- with open(filename, 'w') as handle:
- handle.write(content)
-
- opts = {
- 'clang': 'clang',
- 'directory': os.getcwd(),
- 'flags': [],
- 'direct_args': [],
- 'file': filename,
- 'output_dir': tmpdir,
- 'output_format': 'plist',
- 'output_failures': failures_report
- }
- spy = Spy()
- result = sut.run_analyzer(opts, spy.call)
- return (result, spy.arg)
-
- def test_run_analyzer(self):
- content = "int div(int n, int d) { return n / d; }"
- (result, fwds) = RunAnalyzerTest.run_analyzer(content, False)
- self.assertEqual(None, fwds)
- self.assertEqual(0, result['exit_code'])
-
- def test_run_analyzer_crash(self):
- content = "int div(int n, int d) { return n / d }"
- (result, fwds) = RunAnalyzerTest.run_analyzer(content, False)
- self.assertEqual(None, fwds)
- self.assertEqual(1, result['exit_code'])
-
- def test_run_analyzer_crash_and_forwarded(self):
- content = "int div(int n, int d) { return n / d }"
- (_, fwds) = RunAnalyzerTest.run_analyzer(content, True)
- self.assertEqual('crash', fwds['error_type'])
- self.assertEqual(1, fwds['exit_code'])
- self.assertTrue(len(fwds['error_output']) > 0)
-
-
-class ReportFailureTest(unittest.TestCase):
-
- def assertUnderFailures(self, path):
- self.assertEqual('failures', os.path.basename(os.path.dirname(path)))
-
- def test_report_failure_create_files(self):
- with libear.TemporaryDirectory() as tmpdir:
- # create input file
- filename = os.path.join(tmpdir, 'test.c')
- with open(filename, 'w') as handle:
- handle.write('int main() { return 0')
- uname_msg = ' '.join(os.uname()) + os.linesep
- error_msg = 'this is my error output'
- # execute test
- opts = {
- 'clang': 'clang',
- 'directory': os.getcwd(),
- 'flags': [],
- 'file': filename,
- 'output_dir': tmpdir,
- 'language': 'c',
- 'error_type': 'other_error',
- 'error_output': error_msg,
- 'exit_code': 13
- }
- sut.report_failure(opts)
- # verify the result
- result = dict()
- pp_file = None
- for root, _, files in os.walk(tmpdir):
- keys = [os.path.join(root, name) for name in files]
- for key in keys:
- with open(key, 'r') as handle:
- result[key] = handle.readlines()
- if re.match(r'^(.*/)+clang(.*)\.i$', key):
- pp_file = key
-
- # prepocessor file generated
- self.assertUnderFailures(pp_file)
- # info file generated and content dumped
- info_file = pp_file + '.info.txt'
- self.assertTrue(info_file in result)
- self.assertEqual('Other Error\n', result[info_file][1])
- self.assertEqual(uname_msg, result[info_file][3])
- # error file generated and content dumped
- error_file = pp_file + '.stderr.txt'
- self.assertTrue(error_file in result)
- self.assertEqual([error_msg], result[error_file])
-
-
-class AnalyzerTest(unittest.TestCase):
-
- def test_nodebug_macros_appended(self):
- def test(flags):
- spy = Spy()
- opts = {'flags': flags, 'force_debug': True}
- self.assertEqual(spy.success,
- sut.filter_debug_flags(opts, spy.call))
- return spy.arg['flags']
-
- self.assertEqual(['-UNDEBUG'], test([]))
- self.assertEqual(['-DNDEBUG', '-UNDEBUG'], test(['-DNDEBUG']))
- self.assertEqual(['-DSomething', '-UNDEBUG'], test(['-DSomething']))
-
- def test_set_language_fall_through(self):
- def language(expected, input):
- spy = Spy()
- input.update({'compiler': 'c', 'file': 'test.c'})
- self.assertEqual(spy.success, sut.language_check(input, spy.call))
- self.assertEqual(expected, spy.arg['language'])
-
- language('c', {'language': 'c', 'flags': []})
- language('c++', {'language': 'c++', 'flags': []})
-
- def test_set_language_stops_on_not_supported(self):
- spy = Spy()
- input = {
- 'compiler': 'c',
- 'flags': [],
- 'file': 'test.java',
- 'language': 'java'
- }
- self.assertIsNone(sut.language_check(input, spy.call))
- self.assertIsNone(spy.arg)
-
- def test_set_language_sets_flags(self):
- def flags(expected, input):
- spy = Spy()
- input.update({'compiler': 'c', 'file': 'test.c'})
- self.assertEqual(spy.success, sut.language_check(input, spy.call))
- self.assertEqual(expected, spy.arg['flags'])
-
- flags(['-x', 'c'], {'language': 'c', 'flags': []})
- flags(['-x', 'c++'], {'language': 'c++', 'flags': []})
-
- def test_set_language_from_filename(self):
- def language(expected, input):
- spy = Spy()
- input.update({'language': None, 'flags': []})
- self.assertEqual(spy.success, sut.language_check(input, spy.call))
- self.assertEqual(expected, spy.arg['language'])
-
- language('c', {'file': 'file.c', 'compiler': 'c'})
- language('c++', {'file': 'file.c', 'compiler': 'c++'})
- language('c++', {'file': 'file.cxx', 'compiler': 'c'})
- language('c++', {'file': 'file.cxx', 'compiler': 'c++'})
- language('c++', {'file': 'file.cpp', 'compiler': 'c++'})
- language('c-cpp-output', {'file': 'file.i', 'compiler': 'c'})
- language('c++-cpp-output', {'file': 'file.i', 'compiler': 'c++'})
-
- def test_arch_loop_sets_flags(self):
- def flags(archs):
- spy = Spy()
- input = {'flags': [], 'arch_list': archs}
- sut.arch_check(input, spy.call)
- return spy.arg['flags']
-
- self.assertEqual([], flags([]))
- self.assertEqual(['-arch', 'i386'], flags(['i386']))
- self.assertEqual(['-arch', 'i386'], flags(['i386', 'ppc']))
- self.assertEqual(['-arch', 'sparc'], flags(['i386', 'sparc']))
-
- def test_arch_loop_stops_on_not_supported(self):
- def stop(archs):
- spy = Spy()
- input = {'flags': [], 'arch_list': archs}
- self.assertIsNone(sut.arch_check(input, spy.call))
- self.assertIsNone(spy.arg)
-
- stop(['ppc'])
- stop(['ppc64'])
-
-
-@sut.require([])
-def method_without_expecteds(opts):
- return 0
-
-
-@sut.require(['this', 'that'])
-def method_with_expecteds(opts):
- return 0
-
-
-@sut.require([])
-def method_exception_from_inside(opts):
- raise Exception('here is one')
-
-
-class RequireDecoratorTest(unittest.TestCase):
-
- def test_method_without_expecteds(self):
- self.assertEqual(method_without_expecteds(dict()), 0)
- self.assertEqual(method_without_expecteds({}), 0)
- self.assertEqual(method_without_expecteds({'this': 2}), 0)
- self.assertEqual(method_without_expecteds({'that': 3}), 0)
-
- def test_method_with_expecteds(self):
- self.assertRaises(KeyError, method_with_expecteds, dict())
- self.assertRaises(KeyError, method_with_expecteds, {})
- self.assertRaises(KeyError, method_with_expecteds, {'this': 2})
- self.assertRaises(KeyError, method_with_expecteds, {'that': 3})
- self.assertEqual(method_with_expecteds({'this': 0, 'that': 3}), 0)
-
- def test_method_exception_not_caught(self):
- self.assertRaises(Exception, method_exception_from_inside, dict())