summaryrefslogtreecommitdiff
path: root/tools/scan-build-py/libscanbuild
diff options
context:
space:
mode:
Diffstat (limited to 'tools/scan-build-py/libscanbuild')
-rw-r--r--tools/scan-build-py/libscanbuild/analyze.py39
-rw-r--r--tools/scan-build-py/libscanbuild/command.py133
-rw-r--r--tools/scan-build-py/libscanbuild/compilation.py141
-rw-r--r--tools/scan-build-py/libscanbuild/intercept.py58
-rw-r--r--tools/scan-build-py/libscanbuild/report.py7
-rw-r--r--tools/scan-build-py/libscanbuild/runner.py217
6 files changed, 328 insertions, 267 deletions
diff --git a/tools/scan-build-py/libscanbuild/analyze.py b/tools/scan-build-py/libscanbuild/analyze.py
index 0d3547befeef4..0ed0aef838737 100644
--- a/tools/scan-build-py/libscanbuild/analyze.py
+++ b/tools/scan-build-py/libscanbuild/analyze.py
@@ -25,8 +25,7 @@ from libscanbuild.runner import run
from libscanbuild.intercept import capture
from libscanbuild.report import report_directory, document
from libscanbuild.clang import get_checkers
-from libscanbuild.runner import action_check
-from libscanbuild.command import classify_parameters, classify_source
+from libscanbuild.compilation import split_command
__all__ = ['analyze_build_main', 'analyze_build_wrapper']
@@ -106,7 +105,8 @@ def run_analyzer(args, output_dir):
'output_dir': output_dir,
'output_format': args.output_format,
'output_failures': args.output_failures,
- 'direct_args': analyzer_params(args)
+ 'direct_args': analyzer_params(args),
+ 'force_debug': args.force_debug
}
logging.debug('run analyzer against compilation database')
@@ -138,7 +138,8 @@ def setup_environment(args, destination, bin_dir):
'ANALYZE_BUILD_REPORT_DIR': destination,
'ANALYZE_BUILD_REPORT_FORMAT': args.output_format,
'ANALYZE_BUILD_REPORT_FAILURES': 'yes' if args.output_failures else '',
- 'ANALYZE_BUILD_PARAMETERS': ' '.join(analyzer_params(args))
+ 'ANALYZE_BUILD_PARAMETERS': ' '.join(analyzer_params(args)),
+ 'ANALYZE_BUILD_FORCE_DEBUG': 'yes' if args.force_debug else ''
})
return environment
@@ -160,30 +161,34 @@ def analyze_build_wrapper(cplusplus):
return result
# ... and run the analyzer if all went well.
try:
+ # check is it a compilation
+ compilation = split_command(sys.argv)
+ if compilation is None:
+ return result
# collect the needed parameters from environment, crash when missing
- consts = {
+ parameters = {
'clang': os.getenv('ANALYZE_BUILD_CLANG'),
'output_dir': os.getenv('ANALYZE_BUILD_REPORT_DIR'),
'output_format': os.getenv('ANALYZE_BUILD_REPORT_FORMAT'),
'output_failures': os.getenv('ANALYZE_BUILD_REPORT_FAILURES'),
'direct_args': os.getenv('ANALYZE_BUILD_PARAMETERS',
'').split(' '),
+ 'force_debug': os.getenv('ANALYZE_BUILD_FORCE_DEBUG'),
'directory': os.getcwd(),
+ 'command': [sys.argv[0], '-c'] + compilation.flags
}
- # get relevant parameters from command line arguments
- args = classify_parameters(sys.argv)
- filenames = args.pop('files', [])
- for filename in (name for name in filenames if classify_source(name)):
- parameters = dict(args, file=filename, **consts)
+ # call static analyzer against the compilation
+ for source in compilation.files:
+ parameters.update({'file': source})
logging.debug('analyzer parameters %s', parameters)
- current = action_check(parameters)
+ current = run(parameters)
# display error message from the static analyzer
if current is not None:
for line in current['error_output']:
logging.info(line.rstrip())
except Exception:
logging.exception("run analyzer inside compiler wrapper failed.")
- return 0
+ return result
def analyzer_params(args):
@@ -203,8 +208,8 @@ def analyzer_params(args):
if args.store_model:
result.append('-analyzer-store={0}'.format(args.store_model))
if args.constraints_model:
- result.append(
- '-analyzer-constraints={0}'.format(args.constraints_model))
+ result.append('-analyzer-constraints={0}'.format(
+ args.constraints_model))
if args.internal_stats:
result.append('-analyzer-stats')
if args.analyze_headers:
@@ -450,6 +455,12 @@ def create_parser(from_build_command):
Could be usefull when project contains 3rd party libraries.
The directory path shall be absolute path as file names in
the compilation database.""")
+ advanced.add_argument(
+ '--force-analyze-debug-code',
+ dest='force_debug',
+ action='store_true',
+ help="""Tells analyzer to enable assertions in code even if they were
+ disabled during compilation, enabling more precise results.""")
plugins = parser.add_argument_group('checker options')
plugins.add_argument(
diff --git a/tools/scan-build-py/libscanbuild/command.py b/tools/scan-build-py/libscanbuild/command.py
deleted file mode 100644
index 69ca3393f955e..0000000000000
--- a/tools/scan-build-py/libscanbuild/command.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# -*- coding: utf-8 -*-
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-""" This module is responsible for to parse a compiler invocation. """
-
-import re
-import os
-
-__all__ = ['Action', 'classify_parameters', 'classify_source']
-
-
-class Action(object):
- """ Enumeration class for compiler action. """
-
- Link, Compile, Ignored = range(3)
-
-
-def classify_parameters(command):
- """ Parses the command line arguments of the given invocation. """
-
- # result value of this method.
- # some value are preset, some will be set only when found.
- result = {
- 'action': Action.Link,
- 'files': [],
- 'output': None,
- 'compile_options': [],
- 'c++': is_cplusplus_compiler(command[0])
- # archs_seen
- # language
- }
-
- # data structure to ignore compiler parameters.
- # key: parameter name, value: number of parameters to ignore afterwards.
- ignored = {
- '-g': 0,
- '-fsyntax-only': 0,
- '-save-temps': 0,
- '-install_name': 1,
- '-exported_symbols_list': 1,
- '-current_version': 1,
- '-compatibility_version': 1,
- '-init': 1,
- '-e': 1,
- '-seg1addr': 1,
- '-bundle_loader': 1,
- '-multiply_defined': 1,
- '-sectorder': 3,
- '--param': 1,
- '--serialize-diagnostics': 1
- }
-
- args = iter(command[1:])
- for arg in args:
- # compiler action parameters are the most important ones...
- if arg in {'-E', '-S', '-cc1', '-M', '-MM', '-###'}:
- result.update({'action': Action.Ignored})
- elif arg == '-c':
- result.update({'action': max(result['action'], Action.Compile)})
- # arch flags are taken...
- elif arg == '-arch':
- archs = result.get('archs_seen', [])
- result.update({'archs_seen': archs + [next(args)]})
- # explicit language option taken...
- elif arg == '-x':
- result.update({'language': next(args)})
- # output flag taken...
- elif arg == '-o':
- result.update({'output': next(args)})
- # warning disable options are taken...
- elif re.match(r'^-Wno-', arg):
- result['compile_options'].append(arg)
- # warning options are ignored...
- elif re.match(r'^-[mW].+', arg):
- pass
- # some preprocessor parameters are ignored...
- elif arg in {'-MD', '-MMD', '-MG', '-MP'}:
- pass
- elif arg in {'-MF', '-MT', '-MQ'}:
- next(args)
- # linker options are ignored...
- elif arg in {'-static', '-shared', '-s', '-rdynamic'} or \
- re.match(r'^-[lL].+', arg):
- pass
- elif arg in {'-l', '-L', '-u', '-z', '-T', '-Xlinker'}:
- next(args)
- # some other options are ignored...
- elif arg in ignored.keys():
- for _ in range(ignored[arg]):
- next(args)
- # parameters which looks source file are taken...
- elif re.match(r'^[^-].+', arg) and classify_source(arg):
- result['files'].append(arg)
- # and consider everything else as compile option.
- else:
- result['compile_options'].append(arg)
-
- return result
-
-
-def classify_source(filename, cplusplus=False):
- """ Return the language from file name extension. """
-
- mapping = {
- '.c': 'c++' if cplusplus else 'c',
- '.i': 'c++-cpp-output' if cplusplus else 'c-cpp-output',
- '.ii': 'c++-cpp-output',
- '.m': 'objective-c',
- '.mi': 'objective-c-cpp-output',
- '.mm': 'objective-c++',
- '.mii': 'objective-c++-cpp-output',
- '.C': 'c++',
- '.cc': 'c++',
- '.CC': 'c++',
- '.cp': 'c++',
- '.cpp': 'c++',
- '.cxx': 'c++',
- '.c++': 'c++',
- '.C++': 'c++',
- '.txx': 'c++'
- }
-
- __, extension = os.path.splitext(os.path.basename(filename))
- return mapping.get(extension)
-
-
-def is_cplusplus_compiler(name):
- """ Returns true when the compiler name refer to a C++ compiler. """
-
- match = re.match(r'^([^/]*/)*(\w*-)*(\w+\+\+)(-(\d+(\.\d+){0,3}))?$', name)
- return False if match is None else True
diff --git a/tools/scan-build-py/libscanbuild/compilation.py b/tools/scan-build-py/libscanbuild/compilation.py
new file mode 100644
index 0000000000000..ef906fa60b9be
--- /dev/null
+++ b/tools/scan-build-py/libscanbuild/compilation.py
@@ -0,0 +1,141 @@
+# -*- coding: utf-8 -*-
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+""" This module is responsible for to parse a compiler invocation. """
+
+import re
+import os
+import collections
+
+__all__ = ['split_command', 'classify_source', 'compiler_language']
+
+# Ignored compiler options map for compilation database creation.
+# The map is used in `split_command` method. (Which does ignore and classify
+# parameters.) Please note, that these are not the only parameters which
+# might be ignored.
+#
+# Keys are the option name, value number of options to skip
+IGNORED_FLAGS = {
+ # compiling only flag, ignored because the creator of compilation
+ # database will explicitly set it.
+ '-c': 0,
+ # preprocessor macros, ignored because would cause duplicate entries in
+ # the output (the only difference would be these flags). this is actual
+ # finding from users, who suffered longer execution time caused by the
+ # duplicates.
+ '-MD': 0,
+ '-MMD': 0,
+ '-MG': 0,
+ '-MP': 0,
+ '-MF': 1,
+ '-MT': 1,
+ '-MQ': 1,
+ # linker options, ignored because for compilation database will contain
+ # compilation commands only. so, the compiler would ignore these flags
+ # anyway. the benefit to get rid of them is to make the output more
+ # readable.
+ '-static': 0,
+ '-shared': 0,
+ '-s': 0,
+ '-rdynamic': 0,
+ '-l': 1,
+ '-L': 1,
+ '-u': 1,
+ '-z': 1,
+ '-T': 1,
+ '-Xlinker': 1
+}
+
+# Known C/C++ compiler executable name patterns
+COMPILER_PATTERNS = frozenset([
+ re.compile(r'^(intercept-|analyze-|)c(c|\+\+)$'),
+ re.compile(r'^([^-]*-)*[mg](cc|\+\+)(-\d+(\.\d+){0,2})?$'),
+ re.compile(r'^([^-]*-)*clang(\+\+)?(-\d+(\.\d+){0,2})?$'),
+ re.compile(r'^llvm-g(cc|\+\+)$'),
+])
+
+
+def split_command(command):
+ """ Returns a value when the command is a compilation, None otherwise.
+
+ The value on success is a named tuple with the following attributes:
+
+ files: list of source files
+ flags: list of compile options
+ compiler: string value of 'c' or 'c++' """
+
+ # the result of this method
+ result = collections.namedtuple('Compilation',
+ ['compiler', 'flags', 'files'])
+ result.compiler = compiler_language(command)
+ result.flags = []
+ result.files = []
+ # quit right now, if the program was not a C/C++ compiler
+ if not result.compiler:
+ return None
+ # iterate on the compile options
+ args = iter(command[1:])
+ for arg in args:
+ # quit when compilation pass is not involved
+ if arg in {'-E', '-S', '-cc1', '-M', '-MM', '-###'}:
+ return None
+ # ignore some flags
+ elif arg in IGNORED_FLAGS:
+ count = IGNORED_FLAGS[arg]
+ for _ in range(count):
+ next(args)
+ elif re.match(r'^-(l|L|Wl,).+', arg):
+ pass
+ # some parameters could look like filename, take as compile option
+ elif arg in {'-D', '-I'}:
+ result.flags.extend([arg, next(args)])
+ # parameter which looks source file is taken...
+ elif re.match(r'^[^-].+', arg) and classify_source(arg):
+ result.files.append(arg)
+ # and consider everything else as compile option.
+ else:
+ result.flags.append(arg)
+ # do extra check on number of source files
+ return result if result.files else None
+
+
+def classify_source(filename, c_compiler=True):
+ """ Return the language from file name extension. """
+
+ mapping = {
+ '.c': 'c' if c_compiler else 'c++',
+ '.i': 'c-cpp-output' if c_compiler else 'c++-cpp-output',
+ '.ii': 'c++-cpp-output',
+ '.m': 'objective-c',
+ '.mi': 'objective-c-cpp-output',
+ '.mm': 'objective-c++',
+ '.mii': 'objective-c++-cpp-output',
+ '.C': 'c++',
+ '.cc': 'c++',
+ '.CC': 'c++',
+ '.cp': 'c++',
+ '.cpp': 'c++',
+ '.cxx': 'c++',
+ '.c++': 'c++',
+ '.C++': 'c++',
+ '.txx': 'c++'
+ }
+
+ __, extension = os.path.splitext(os.path.basename(filename))
+ return mapping.get(extension)
+
+
+def compiler_language(command):
+ """ A predicate to decide the command is a compiler call or not.
+
+ Returns 'c' or 'c++' when it match. None otherwise. """
+
+ cplusplus = re.compile(r'^(.+)(\+\+)(-.+|)$')
+
+ if command:
+ executable = os.path.basename(command[0])
+ if any(pattern.match(executable) for pattern in COMPILER_PATTERNS):
+ return 'c++' if cplusplus.match(executable) else 'c'
+ return None
diff --git a/tools/scan-build-py/libscanbuild/intercept.py b/tools/scan-build-py/libscanbuild/intercept.py
index 6062e2ea8ca9f..6a9f75349fb58 100644
--- a/tools/scan-build-py/libscanbuild/intercept.py
+++ b/tools/scan-build-py/libscanbuild/intercept.py
@@ -31,9 +31,9 @@ import argparse
import logging
import subprocess
from libear import build_libear, TemporaryDirectory
-from libscanbuild import duplicate_check, tempdir, initialize_logging
from libscanbuild import command_entry_point
-from libscanbuild.command import Action, classify_parameters
+from libscanbuild import duplicate_check, tempdir, initialize_logging
+from libscanbuild.compilation import split_command
from libscanbuild.shell import encode, decode
__all__ = ['capture', 'intercept_build_main', 'intercept_build_wrapper']
@@ -72,23 +72,23 @@ def capture(args, bin_dir):
from the arguments. And do shell escaping on the command.
To support incremental builds, it is desired to read elements from
- an existing compilation database from a previous run. These elemets
+ an existing compilation database from a previous run. These elements
shall be merged with the new elements. """
# create entries from the current run
current = itertools.chain.from_iterable(
# creates a sequence of entry generators from an exec,
- # but filter out non compiler calls before.
- (format_entry(x) for x in commands if is_compiler_call(x)))
+ format_entry(command) for command in commands)
# read entries from previous run
- if 'append' in args and args.append and os.path.exists(args.cdb):
+ if 'append' in args and args.append and os.path.isfile(args.cdb):
with open(args.cdb) as handle:
previous = iter(json.load(handle))
else:
previous = iter([])
# filter out duplicate entries from both
duplicate = duplicate_check(entry_hash)
- return (entry for entry in itertools.chain(previous, current)
+ return (entry
+ for entry in itertools.chain(previous, current)
if os.path.exists(entry['file']) and not duplicate(entry))
with TemporaryDirectory(prefix='intercept-', dir=tempdir()) as tmp_dir:
@@ -98,14 +98,14 @@ def capture(args, bin_dir):
exit_code = subprocess.call(args.build, env=environment)
logging.info('build finished with exit code: %d', exit_code)
# read the intercepted exec calls
- commands = itertools.chain.from_iterable(
+ exec_traces = itertools.chain.from_iterable(
parse_exec_trace(os.path.join(tmp_dir, filename))
for filename in sorted(glob.iglob(os.path.join(tmp_dir, '*.cmd'))))
# do post processing only if that was requested
if 'raw_entries' not in args or not args.raw_entries:
- entries = post_processing(commands)
+ entries = post_processing(exec_traces)
else:
- entries = commands
+ entries = exec_traces
# dump the compilation database
with open(args.cdb, 'w+') as handle:
json.dump(list(entries), handle, sort_keys=True, indent=4)
@@ -209,7 +209,7 @@ def parse_exec_trace(filename):
}
-def format_entry(entry):
+def format_entry(exec_trace):
""" Generate the desired fields for compilation database entries. """
def abspath(cwd, name):
@@ -217,40 +217,20 @@ def format_entry(entry):
fullname = name if os.path.isabs(name) else os.path.join(cwd, name)
return os.path.normpath(fullname)
- logging.debug('format this command: %s', entry['command'])
- atoms = classify_parameters(entry['command'])
- if atoms['action'] <= Action.Compile:
- for source in atoms['files']:
- compiler = 'c++' if atoms['c++'] else 'cc'
- flags = atoms['compile_options']
- flags += ['-o', atoms['output']] if atoms['output'] else []
- flags += ['-x', atoms['language']] if 'language' in atoms else []
- flags += [elem
- for arch in atoms.get('archs_seen', [])
- for elem in ['-arch', arch]]
- command = [compiler, '-c'] + flags + [source]
+ logging.debug('format this command: %s', exec_trace['command'])
+ compilation = split_command(exec_trace['command'])
+ if compilation:
+ for source in compilation.files:
+ compiler = 'c++' if compilation.compiler == 'c++' else 'cc'
+ command = [compiler, '-c'] + compilation.flags + [source]
logging.debug('formated as: %s', command)
yield {
- 'directory': entry['directory'],
+ 'directory': exec_trace['directory'],
'command': encode(command),
- 'file': abspath(entry['directory'], source)
+ 'file': abspath(exec_trace['directory'], source)
}
-def is_compiler_call(entry):
- """ A predicate to decide the entry is a compiler call or not. """
-
- patterns = [
- re.compile(r'^([^/]*/)*intercept-c(c|\+\+)$'),
- re.compile(r'^([^/]*/)*c(c|\+\+)$'),
- re.compile(r'^([^/]*/)*([^-]*-)*[mg](cc|\+\+)(-\d+(\.\d+){0,2})?$'),
- re.compile(r'^([^/]*/)*([^-]*-)*clang(\+\+)?(-\d+(\.\d+){0,2})?$'),
- re.compile(r'^([^/]*/)*llvm-g(cc|\+\+)$'),
- ]
- executable = entry['command'][0]
- return any((pattern.match(executable) for pattern in patterns))
-
-
def is_preload_disabled(platform):
""" Library-based interposition will fail silently if SIP is enabled,
so this should be detected. You can detect whether SIP is enabled on
diff --git a/tools/scan-build-py/libscanbuild/report.py b/tools/scan-build-py/libscanbuild/report.py
index efc0a55de619e..5c33319e206df 100644
--- a/tools/scan-build-py/libscanbuild/report.py
+++ b/tools/scan-build-py/libscanbuild/report.py
@@ -35,7 +35,12 @@ def report_directory(hint, keep):
keep -- a boolean value to keep or delete the empty report directory. """
stamp = time.strftime('scan-build-%Y-%m-%d-%H%M%S-', time.localtime())
- name = tempfile.mkdtemp(prefix=stamp, dir=hint)
+
+ parentdir = os.path.abspath(hint)
+ if not os.path.exists(parentdir):
+ os.makedirs(parentdir)
+
+ name = tempfile.mkdtemp(prefix=stamp, dir=parentdir)
logging.info('Report directory created: %s', name)
diff --git a/tools/scan-build-py/libscanbuild/runner.py b/tools/scan-build-py/libscanbuild/runner.py
index 248ca90ad3e62..628ad90d627a4 100644
--- a/tools/scan-build-py/libscanbuild/runner.py
+++ b/tools/scan-build-py/libscanbuild/runner.py
@@ -5,18 +5,44 @@
# License. See LICENSE.TXT for details.
""" This module is responsible to run the analyzer commands. """
+import re
import os
import os.path
import tempfile
import functools
import subprocess
import logging
-from libscanbuild.command import classify_parameters, Action, classify_source
-from libscanbuild.clang import get_arguments, get_version
+from libscanbuild.compilation import classify_source, compiler_language
+from libscanbuild.clang import get_version, get_arguments
from libscanbuild.shell import decode
__all__ = ['run']
+# To have good results from static analyzer certain compiler options shall be
+# omitted. The compiler flag filtering only affects the static analyzer run.
+#
+# Keys are the option name, value number of options to skip
+IGNORED_FLAGS = {
+ '-c': 0, # compile option will be overwritten
+ '-fsyntax-only': 0, # static analyzer option will be overwritten
+ '-o': 1, # will set up own output file
+ # flags below are inherited from the perl implementation.
+ '-g': 0,
+ '-save-temps': 0,
+ '-install_name': 1,
+ '-exported_symbols_list': 1,
+ '-current_version': 1,
+ '-compatibility_version': 1,
+ '-init': 1,
+ '-e': 1,
+ '-seg1addr': 1,
+ '-bundle_loader': 1,
+ '-multiply_defined': 1,
+ '-sectorder': 3,
+ '--param': 1,
+ '--serialize-diagnostics': 1
+}
+
def require(required):
""" Decorator for checking the required values in state.
@@ -29,8 +55,8 @@ def require(required):
def wrapper(*args, **kwargs):
for key in required:
if key not in args[0]:
- raise KeyError(
- '{0} not passed to {1}'.format(key, function.__name__))
+ raise KeyError('{0} not passed to {1}'.format(
+ key, function.__name__))
return function(*args, **kwargs)
@@ -39,9 +65,15 @@ def require(required):
return decorator
-@require(['command', 'directory', 'file', # an entry from compilation database
- 'clang', 'direct_args', # compiler name, and arguments from command
- 'output_dir', 'output_format', 'output_failures'])
+@require(['command', # entry from compilation database
+ 'directory', # entry from compilation database
+ 'file', # entry from compilation database
+ 'clang', # clang executable name (and path)
+ 'direct_args', # arguments from command line
+ 'force_debug', # kill non debug macros
+ 'output_dir', # where generated report files shall go
+ 'output_format', # it's 'plist' or 'html' or both
+ 'output_failures']) # generate crash reports or not
def run(opts):
""" Entry point to run (or not) static analyzer against a single entry
of the compilation database.
@@ -57,16 +89,17 @@ def run(opts):
try:
command = opts.pop('command')
+ command = command if isinstance(command, list) else decode(command)
logging.debug("Run analyzer against '%s'", command)
- opts.update(classify_parameters(decode(command)))
+ opts.update(classify_parameters(command))
- return action_check(opts)
+ return arch_check(opts)
except Exception:
logging.error("Problem occured during analyzis.", exc_info=1)
return None
-@require(['report', 'directory', 'clang', 'output_dir', 'language', 'file',
+@require(['clang', 'directory', 'flags', 'file', 'output_dir', 'language',
'error_type', 'error_output', 'exit_code'])
def report_failure(opts):
""" Create report when analyzer failed.
@@ -95,36 +128,49 @@ def report_failure(opts):
dir=destination(opts))
os.close(handle)
cwd = opts['directory']
- cmd = get_arguments([opts['clang']] + opts['report'] + ['-o', name], cwd)
+ cmd = get_arguments([opts['clang'], '-fsyntax-only', '-E'] +
+ opts['flags'] + [opts['file'], '-o', name], cwd)
logging.debug('exec command in %s: %s', cwd, ' '.join(cmd))
subprocess.call(cmd, cwd=cwd)
-
+ # write general information about the crash
with open(name + '.info.txt', 'w') as handle:
handle.write(opts['file'] + os.linesep)
handle.write(error.title().replace('_', ' ') + os.linesep)
handle.write(' '.join(cmd) + os.linesep)
handle.write(' '.join(os.uname()) + os.linesep)
- handle.write(get_version(cmd[0]))
+ handle.write(get_version(opts['clang']))
handle.close()
-
+ # write the captured output too
with open(name + '.stderr.txt', 'w') as handle:
handle.writelines(opts['error_output'])
handle.close()
-
+ # return with the previous step exit code and output
return {
'error_output': opts['error_output'],
'exit_code': opts['exit_code']
}
-@require(['clang', 'analyze', 'directory', 'output'])
+@require(['clang', 'directory', 'flags', 'direct_args', 'file', 'output_dir',
+ 'output_format'])
def run_analyzer(opts, continuation=report_failure):
""" It assembles the analysis command line and executes it. Capture the
output of the analysis and returns with it. If failure reports are
requested, it calls the continuation to generate it. """
+ def output():
+ """ Creates output file name for reports. """
+ if opts['output_format'] in {'plist', 'plist-html'}:
+ (handle, name) = tempfile.mkstemp(prefix='report-',
+ suffix='.plist',
+ dir=opts['output_dir'])
+ os.close(handle)
+ return name
+ return opts['output_dir']
+
cwd = opts['directory']
- cmd = get_arguments([opts['clang']] + opts['analyze'] + opts['output'],
+ cmd = get_arguments([opts['clang'], '--analyze'] + opts['direct_args'] +
+ opts['flags'] + [opts['file'], '-o', output()],
cwd)
logging.debug('exec command in %s: %s', cwd, ' '.join(cmd))
child = subprocess.Popen(cmd,
@@ -144,113 +190,124 @@ def run_analyzer(opts, continuation=report_failure):
'exit_code': child.returncode
})
return continuation(opts)
+ # return the output for logging and exit code for testing
return {'error_output': output, 'exit_code': child.returncode}
-@require(['output_dir'])
-def set_analyzer_output(opts, continuation=run_analyzer):
- """ Create output file if was requested.
-
- This plays a role only if .plist files are requested. """
+@require(['flags', 'force_debug'])
+def filter_debug_flags(opts, continuation=run_analyzer):
+ """ Filter out nondebug macros when requested. """
- if opts.get('output_format') in {'plist', 'plist-html'}:
- with tempfile.NamedTemporaryFile(prefix='report-',
- suffix='.plist',
- delete=False,
- dir=opts['output_dir']) as output:
- opts.update({'output': ['-o', output.name]})
- return continuation(opts)
- else:
- opts.update({'output': ['-o', opts['output_dir']]})
- return continuation(opts)
+ if opts.pop('force_debug'):
+ # lazy implementation just append an undefine macro at the end
+ opts.update({'flags': opts['flags'] + ['-UNDEBUG']})
+ return continuation(opts)
-@require(['file', 'directory', 'clang', 'direct_args', 'language',
- 'output_dir', 'output_format', 'output_failures'])
-def create_commands(opts, continuation=set_analyzer_output):
- """ Create command to run analyzer or failure report generation.
- It generates commands (from compilation database entries) which contains
- enough information to run the analyzer (and the crash report generation
- if that was requested). """
+@require(['file', 'directory'])
+def set_file_path_relative(opts, continuation=filter_debug_flags):
+ """ Set source file path to relative to the working directory.
- common = []
- if 'arch' in opts:
- common.extend(['-arch', opts.pop('arch')])
- common.extend(opts.pop('compile_options', []))
- common.extend(['-x', opts['language']])
- common.append(os.path.relpath(opts['file'], opts['directory']))
+ The only purpose of this function is to pass the SATestBuild.py tests. """
- opts.update({
- 'analyze': ['--analyze'] + opts['direct_args'] + common,
- 'report': ['-fsyntax-only', '-E'] + common
- })
+ opts.update({'file': os.path.relpath(opts['file'], opts['directory'])})
return continuation(opts)
-@require(['file', 'c++'])
-def language_check(opts, continuation=create_commands):
+@require(['language', 'compiler', 'file', 'flags'])
+def language_check(opts, continuation=set_file_path_relative):
""" Find out the language from command line parameters or file name
extension. The decision also influenced by the compiler invocation. """
- accepteds = {
+ accepted = frozenset({
'c', 'c++', 'objective-c', 'objective-c++', 'c-cpp-output',
'c++-cpp-output', 'objective-c-cpp-output'
- }
+ })
- key = 'language'
- language = opts[key] if key in opts else \
- classify_source(opts['file'], opts['c++'])
+ # language can be given as a parameter...
+ language = opts.pop('language')
+ compiler = opts.pop('compiler')
+ # ... or find out from source file extension
+ if language is None and compiler is not None:
+ language = classify_source(opts['file'], compiler == 'c')
if language is None:
logging.debug('skip analysis, language not known')
return None
- elif language not in accepteds:
+ elif language not in accepted:
logging.debug('skip analysis, language not supported')
return None
else:
logging.debug('analysis, language: %s', language)
- opts.update({key: language})
+ opts.update({'language': language,
+ 'flags': ['-x', language] + opts['flags']})
return continuation(opts)
-@require([])
+@require(['arch_list', 'flags'])
def arch_check(opts, continuation=language_check):
""" Do run analyzer through one of the given architectures. """
- disableds = {'ppc', 'ppc64'}
+ disabled = frozenset({'ppc', 'ppc64'})
- key = 'archs_seen'
- if key in opts:
+ received_list = opts.pop('arch_list')
+ if received_list:
# filter out disabled architectures and -arch switches
- archs = [a for a in opts[key] if a not in disableds]
-
- if not archs:
- logging.debug('skip analysis, found not supported arch')
- return None
- else:
+ filtered_list = [a for a in received_list if a not in disabled]
+ if filtered_list:
# There should be only one arch given (or the same multiple
# times). If there are multiple arch are given and are not
# the same, those should not change the pre-processing step.
# But that's the only pass we have before run the analyzer.
- arch = archs.pop()
- logging.debug('analysis, on arch: %s', arch)
+ current = filtered_list.pop()
+ logging.debug('analysis, on arch: %s', current)
- opts.update({'arch': arch})
- del opts[key]
+ opts.update({'flags': ['-arch', current] + opts['flags']})
return continuation(opts)
+ else:
+ logging.debug('skip analysis, found not supported arch')
+ return None
else:
logging.debug('analysis, on default arch')
return continuation(opts)
-@require(['action'])
-def action_check(opts, continuation=arch_check):
- """ Continue analysis only if it compilation or link. """
+def classify_parameters(command):
+ """ Prepare compiler flags (filters some and add others) and take out
+ language (-x) and architecture (-arch) flags for future processing. """
- if opts.pop('action') <= Action.Compile:
- return continuation(opts)
- else:
- logging.debug('skip analysis, not compilation nor link')
- return None
+ result = {
+ 'flags': [], # the filtered compiler flags
+ 'arch_list': [], # list of architecture flags
+ 'language': None, # compilation language, None, if not specified
+ 'compiler': compiler_language(command) # 'c' or 'c++'
+ }
+
+ # iterate on the compile options
+ args = iter(command[1:])
+ for arg in args:
+ # take arch flags into a separate basket
+ if arg == '-arch':
+ result['arch_list'].append(next(args))
+ # take language
+ elif arg == '-x':
+ result['language'] = next(args)
+ # parameters which looks source file are not flags
+ elif re.match(r'^[^-].+', arg) and classify_source(arg):
+ pass
+ # ignore some flags
+ elif arg in IGNORED_FLAGS:
+ count = IGNORED_FLAGS[arg]
+ for _ in range(count):
+ next(args)
+ # we don't care about extra warnings, but we should suppress ones
+ # that we don't want to see.
+ elif re.match(r'^-W.+', arg) and not re.match(r'^-Wno-.+', arg):
+ pass
+ # and consider everything else as compilation flag.
+ else:
+ result['flags'].append(arg)
+
+ return result