diff options
Diffstat (limited to 'utils/analyzer/SATestBuild.py')
-rw-r--r-- | utils/analyzer/SATestBuild.py | 640 |
1 files changed, 288 insertions, 352 deletions
diff --git a/utils/analyzer/SATestBuild.py b/utils/analyzer/SATestBuild.py index 18c5393988ae..60c8796e338f 100644 --- a/utils/analyzer/SATestBuild.py +++ b/utils/analyzer/SATestBuild.py @@ -3,8 +3,8 @@ """ Static Analyzer qualification infrastructure. -The goal is to test the analyzer against different projects, check for failures, -compare results, and measure performance. +The goal is to test the analyzer against different projects, +check for failures, compare results, and measure performance. Repository Directory will contain sources of the projects as well as the information on how to build them and the expected output. @@ -20,7 +20,8 @@ Note that the build tree must be inside the project dir. To test the build of the analyzer one would: - Copy over a copy of the Repository Directory. (TODO: Prefer to ensure that - the build directory does not pollute the repository to min network traffic). + the build directory does not pollute the repository to min network + traffic). - Build all projects, until error. Produce logs to report errors. - Compare results. @@ -42,6 +43,7 @@ For testing additional checkers, use the SA_ADDITIONAL_CHECKERS environment variable. It should contain a comma separated list. """ import CmpRuns +import SATestUtils import os import csv @@ -52,103 +54,53 @@ import shutil import time import plistlib import argparse -from subprocess import check_call, check_output, CalledProcessError +from subprocess import check_call, CalledProcessError +import multiprocessing #------------------------------------------------------------------------------ # Helper functions. #------------------------------------------------------------------------------ -def detectCPUs(): - """ - Detects the number of CPUs on a system. Cribbed from pp. - """ - # Linux, Unix and MacOS: - if hasattr(os, "sysconf"): - if os.sysconf_names.has_key("SC_NPROCESSORS_ONLN"): - # Linux & Unix: - ncpus = os.sysconf("SC_NPROCESSORS_ONLN") - if isinstance(ncpus, int) and ncpus > 0: - return ncpus - else: # OSX: - return int(capture(['sysctl', '-n', 'hw.ncpu'])) - # Windows: - if os.environ.has_key("NUMBER_OF_PROCESSORS"): - ncpus = int(os.environ["NUMBER_OF_PROCESSORS"]) - if ncpus > 0: - return ncpus - return 1 # Default - -def which(command, paths = None): - """which(command, [paths]) - Look up the given command in the paths string - (or the PATH environment variable, if unspecified).""" - - if paths is None: - paths = os.environ.get('PATH','') - - # Check for absolute match first. - if os.path.exists(command): - return command - - # Would be nice if Python had a lib function for this. - if not paths: - paths = os.defpath - - # Get suffixes to search. - # On Cygwin, 'PATHEXT' may exist but it should not be used. - if os.pathsep == ';': - pathext = os.environ.get('PATHEXT', '').split(';') - else: - pathext = [''] - - # Search the paths... - for path in paths.split(os.pathsep): - for ext in pathext: - p = os.path.join(path, command + ext) - if os.path.exists(p): - return p - - return None - -# Make sure we flush the output after every print statement. -class flushfile(object): - def __init__(self, f): - self.f = f - def write(self, x): - self.f.write(x) - self.f.flush() - -sys.stdout = flushfile(sys.stdout) + +sys.stdout = SATestUtils.flushfile(sys.stdout) + def getProjectMapPath(): ProjectMapPath = os.path.join(os.path.abspath(os.curdir), ProjectMapFile) if not os.path.exists(ProjectMapPath): print "Error: Cannot find the Project Map file " + ProjectMapPath +\ - "\nRunning script for the wrong directory?" - sys.exit(-1) + "\nRunning script for the wrong directory?" + sys.exit(1) return ProjectMapPath + def getProjectDir(ID): return os.path.join(os.path.abspath(os.curdir), ID) -def getSBOutputDirName(IsReferenceBuild) : - if IsReferenceBuild == True : + +def getSBOutputDirName(IsReferenceBuild): + if IsReferenceBuild: return SBOutputDirReferencePrefix + SBOutputDirName - else : + else: return SBOutputDirName #------------------------------------------------------------------------------ # Configuration setup. #------------------------------------------------------------------------------ + # Find Clang for static analysis. -Clang = which("clang", os.environ['PATH']) +if 'CC' in os.environ: + Clang = os.environ['CC'] +else: + Clang = SATestUtils.which("clang", os.environ['PATH']) if not Clang: print "Error: cannot find 'clang' in PATH" - sys.exit(-1) + sys.exit(1) # Number of jobs. -Jobs = int(math.ceil(detectCPUs() * 0.75)) +Jobs = int(math.ceil(multiprocessing.cpu_count() * 0.75)) # Project map stores info about all the "registered" projects. ProjectMapFile = "projectMap.csv" @@ -168,16 +120,15 @@ BuildLogName = "run_static_analyzer.log" # displayed when buildbot detects a build failure. NumOfFailuresInSummary = 10 FailuresSummaryFileName = "failures.txt" -# Summary of the result diffs. -DiffsSummaryFileName = "diffs.txt" # The scan-build result directory. SBOutputDirName = "ScanBuildResults" SBOutputDirReferencePrefix = "Ref" -# The name of the directory storing the cached project source. If this directory -# does not exist, the download script will be executed. That script should -# create the "CachedSource" directory and download the project source into it. +# The name of the directory storing the cached project source. If this +# directory does not exist, the download script will be executed. +# That script should create the "CachedSource" directory and download the +# project source into it. CachedSourceDirName = "CachedSource" # The name of the directory containing the source code that will be analyzed. @@ -193,7 +144,18 @@ PatchfileName = "changes_for_analyzer.patch" # The list of checkers used during analyzes. # Currently, consists of all the non-experimental checkers, plus a few alpha # checkers we don't want to regress on. -Checkers="alpha.unix.SimpleStream,alpha.security.taint,cplusplus.NewDeleteLeaks,core,cplusplus,deadcode,security,unix,osx" +Checkers = ",".join([ + "alpha.unix.SimpleStream", + "alpha.security.taint", + "cplusplus.NewDeleteLeaks", + "core", + "cplusplus", + "deadcode", + "security", + "unix", + "osx", + "nullability" +]) Verbose = 1 @@ -201,46 +163,38 @@ Verbose = 1 # Test harness logic. #------------------------------------------------------------------------------ -# Run pre-processing script if any. + def runCleanupScript(Dir, PBuildLogFile): + """ + Run pre-processing script if any. + """ Cwd = os.path.join(Dir, PatchedSourceDirName) ScriptPath = os.path.join(Dir, CleanupScript) - runScript(ScriptPath, PBuildLogFile, Cwd) + SATestUtils.runScript(ScriptPath, PBuildLogFile, Cwd) + -# Run the script to download the project, if it exists. def runDownloadScript(Dir, PBuildLogFile): + """ + Run the script to download the project, if it exists. + """ ScriptPath = os.path.join(Dir, DownloadScript) - runScript(ScriptPath, PBuildLogFile, Dir) + SATestUtils.runScript(ScriptPath, PBuildLogFile, Dir) + -# Run the provided script if it exists. -def runScript(ScriptPath, PBuildLogFile, Cwd): - if os.path.exists(ScriptPath): - try: - if Verbose == 1: - print " Executing: %s" % (ScriptPath,) - check_call("chmod +x '%s'" % ScriptPath, cwd = Cwd, - stderr=PBuildLogFile, - stdout=PBuildLogFile, - shell=True) - check_call("'%s'" % ScriptPath, cwd = Cwd, stderr=PBuildLogFile, - stdout=PBuildLogFile, - shell=True) - except: - print "Error: Running %s failed. See %s for details." % (ScriptPath, - PBuildLogFile.name) - sys.exit(-1) - -# Download the project and apply the local patchfile if it exists. def downloadAndPatch(Dir, PBuildLogFile): + """ + Download the project and apply the local patchfile if it exists. + """ CachedSourceDirPath = os.path.join(Dir, CachedSourceDirName) # If the we don't already have the cached source, run the project's # download script to download it. if not os.path.exists(CachedSourceDirPath): - runDownloadScript(Dir, PBuildLogFile) - if not os.path.exists(CachedSourceDirPath): - print "Error: '%s' not found after download." % (CachedSourceDirPath) - exit(-1) + runDownloadScript(Dir, PBuildLogFile) + if not os.path.exists(CachedSourceDirPath): + print "Error: '%s' not found after download." % ( + CachedSourceDirPath) + exit(1) PatchedSourceDirPath = os.path.join(Dir, PatchedSourceDirName) @@ -252,6 +206,7 @@ def downloadAndPatch(Dir, PBuildLogFile): shutil.copytree(CachedSourceDirPath, PatchedSourceDirPath, symlinks=True) applyPatch(Dir, PBuildLogFile) + def applyPatch(Dir, PBuildLogFile): PatchfilePath = os.path.join(Dir, PatchfileName) PatchedSourceDirPath = os.path.join(Dir, PatchedSourceDirName) @@ -262,30 +217,33 @@ def applyPatch(Dir, PBuildLogFile): print " Applying patch." try: check_call("patch -p1 < '%s'" % (PatchfilePath), - cwd = PatchedSourceDirPath, - stderr=PBuildLogFile, - stdout=PBuildLogFile, - shell=True) + cwd=PatchedSourceDirPath, + stderr=PBuildLogFile, + stdout=PBuildLogFile, + shell=True) except: print "Error: Patch failed. See %s for details." % (PBuildLogFile.name) - sys.exit(-1) + sys.exit(1) + -# Build the project with scan-build by reading in the commands and -# prefixing them with the scan-build options. def runScanBuild(Dir, SBOutputDir, PBuildLogFile): + """ + Build the project with scan-build by reading in the commands and + prefixing them with the scan-build options. + """ BuildScriptPath = os.path.join(Dir, BuildScript) if not os.path.exists(BuildScriptPath): print "Error: build script is not defined: %s" % BuildScriptPath - sys.exit(-1) + sys.exit(1) AllCheckers = Checkers - if os.environ.has_key('SA_ADDITIONAL_CHECKERS'): + if 'SA_ADDITIONAL_CHECKERS' in os.environ: AllCheckers = AllCheckers + ',' + os.environ['SA_ADDITIONAL_CHECKERS'] # Run scan-build from within the patched source directory. SBCwd = os.path.join(Dir, PatchedSourceDirName) - SBOptions = "--use-analyzer '%s' " % Clang + SBOptions = "--use-analyzer '%s' " % Clang SBOptions += "-plist-html -o '%s' " % SBOutputDir SBOptions += "-enable-checker " + AllCheckers + " " SBOptions += "--keep-empty " @@ -298,80 +256,63 @@ def runScanBuild(Dir, SBOutputDir, PBuildLogFile): for Command in SBCommandFile: Command = Command.strip() if len(Command) == 0: - continue; + continue # If using 'make', auto imply a -jX argument # to speed up analysis. xcodebuild will # automatically use the maximum number of cores. if (Command.startswith("make ") or Command == "make") and \ - "-j" not in Command: + "-j" not in Command: Command += " -j%d" % Jobs SBCommand = SBPrefix + Command if Verbose == 1: print " Executing: %s" % (SBCommand,) - check_call(SBCommand, cwd = SBCwd, stderr=PBuildLogFile, - stdout=PBuildLogFile, - shell=True) - except: - print "Error: scan-build failed. See ",PBuildLogFile.name,\ - " for details." - raise - -def hasNoExtension(FileName): - (Root, Ext) = os.path.splitext(FileName) - if ((Ext == "")) : - return True - return False - -def isValidSingleInputFile(FileName): - (Root, Ext) = os.path.splitext(FileName) - if ((Ext == ".i") | (Ext == ".ii") | - (Ext == ".c") | (Ext == ".cpp") | - (Ext == ".m") | (Ext == "")) : - return True - return False - -# Get the path to the SDK for the given SDK name. Returns None if -# the path cannot be determined. -def getSDKPath(SDKName): - if which("xcrun") is None: - return None - - Cmd = "xcrun --sdk " + SDKName + " --show-sdk-path" - return check_output(Cmd, shell=True).rstrip() - -# Run analysis on a set of preprocessed files. + check_call(SBCommand, cwd=SBCwd, + stderr=PBuildLogFile, + stdout=PBuildLogFile, + shell=True) + except CalledProcessError: + print "Error: scan-build failed. Its output was: " + PBuildLogFile.seek(0) + shutil.copyfileobj(PBuildLogFile, sys.stdout) + sys.exit(1) + + def runAnalyzePreprocessed(Dir, SBOutputDir, Mode): + """ + Run analysis on a set of preprocessed files. + """ if os.path.exists(os.path.join(Dir, BuildScript)): print "Error: The preprocessed files project should not contain %s" % \ - BuildScript + BuildScript raise Exception() CmdPrefix = Clang + " -cc1 " # For now, we assume the preprocessed files should be analyzed # with the OS X SDK. - SDKPath = getSDKPath("macosx") + SDKPath = SATestUtils.getSDKPath("macosx") if SDKPath is not None: - CmdPrefix += "-isysroot " + SDKPath + " " + CmdPrefix += "-isysroot " + SDKPath + " " CmdPrefix += "-analyze -analyzer-output=plist -w " - CmdPrefix += "-analyzer-checker=" + Checkers +" -fcxx-exceptions -fblocks " + CmdPrefix += "-analyzer-checker=" + Checkers + CmdPrefix += " -fcxx-exceptions -fblocks " - if (Mode == 2) : + if (Mode == 2): CmdPrefix += "-std=c++11 " PlistPath = os.path.join(Dir, SBOutputDir, "date") - FailPath = os.path.join(PlistPath, "failures"); - os.makedirs(FailPath); + FailPath = os.path.join(PlistPath, "failures") + os.makedirs(FailPath) for FullFileName in glob.glob(Dir + "/*"): FileName = os.path.basename(FullFileName) Failed = False # Only run the analyzes on supported files. - if (hasNoExtension(FileName)): + if SATestUtils.hasNoExtension(FileName): continue - if (isValidSingleInputFile(FileName) == False): + if not SATestUtils.isValidSingleInputFile(FileName): print "Error: Invalid single input file %s." % (FullFileName,) raise Exception() @@ -382,44 +323,47 @@ def runAnalyzePreprocessed(Dir, SBOutputDir, Mode): try: if Verbose == 1: print " Executing: %s" % (Command,) - check_call(Command, cwd = Dir, stderr=LogFile, - stdout=LogFile, - shell=True) + check_call(Command, cwd=Dir, stderr=LogFile, + stdout=LogFile, + shell=True) except CalledProcessError, e: print "Error: Analyzes of %s failed. See %s for details." \ - "Error code %d." % \ - (FullFileName, LogFile.name, e.returncode) + "Error code %d." % ( + FullFileName, LogFile.name, e.returncode) Failed = True finally: LogFile.close() # If command did not fail, erase the log file. - if Failed == False: - os.remove(LogFile.name); + if not Failed: + os.remove(LogFile.name) + def getBuildLogPath(SBOutputDir): - return os.path.join(SBOutputDir, LogFolderName, BuildLogName) + return os.path.join(SBOutputDir, LogFolderName, BuildLogName) + def removeLogFile(SBOutputDir): - BuildLogPath = getBuildLogPath(SBOutputDir) - # Clean up the log file. - if (os.path.exists(BuildLogPath)) : - RmCommand = "rm '%s'" % BuildLogPath - if Verbose == 1: - print " Executing: %s" % (RmCommand,) - check_call(RmCommand, shell=True) + BuildLogPath = getBuildLogPath(SBOutputDir) + # Clean up the log file. + if (os.path.exists(BuildLogPath)): + RmCommand = "rm '%s'" % BuildLogPath + if Verbose == 1: + print " Executing: %s" % (RmCommand,) + check_call(RmCommand, shell=True) + def buildProject(Dir, SBOutputDir, ProjectBuildMode, IsReferenceBuild): TBegin = time.time() BuildLogPath = getBuildLogPath(SBOutputDir) print "Log file: %s" % (BuildLogPath,) - print "Output directory: %s" %(SBOutputDir, ) + print "Output directory: %s" % (SBOutputDir, ) removeLogFile(SBOutputDir) # Clean up scan build results. - if (os.path.exists(SBOutputDir)) : + if (os.path.exists(SBOutputDir)): RmCommand = "rm -r '%s'" % SBOutputDir if Verbose == 1: print " Executing: %s" % (RmCommand,) @@ -427,11 +371,8 @@ def buildProject(Dir, SBOutputDir, ProjectBuildMode, IsReferenceBuild): assert(not os.path.exists(SBOutputDir)) os.makedirs(os.path.join(SBOutputDir, LogFolderName)) - # Open the log file. - PBuildLogFile = open(BuildLogPath, "wb+") - # Build and analyze the project. - try: + with open(BuildLogPath, "wb+") as PBuildLogFile: if (ProjectBuildMode == 1): downloadAndPatch(Dir, PBuildLogFile) runCleanupScript(Dir, PBuildLogFile) @@ -439,34 +380,48 @@ def buildProject(Dir, SBOutputDir, ProjectBuildMode, IsReferenceBuild): else: runAnalyzePreprocessed(Dir, SBOutputDir, ProjectBuildMode) - if IsReferenceBuild : + if IsReferenceBuild: runCleanupScript(Dir, PBuildLogFile) - - # Make the absolute paths relative in the reference results. - for (DirPath, Dirnames, Filenames) in os.walk(SBOutputDir): - for F in Filenames: - if (not F.endswith('plist')): - continue - Plist = os.path.join(DirPath, F) - Data = plistlib.readPlist(Plist) - PathPrefix = Dir - if (ProjectBuildMode == 1): - PathPrefix = os.path.join(Dir, PatchedSourceDirName) - Paths = [SourceFile[len(PathPrefix)+1:]\ - if SourceFile.startswith(PathPrefix)\ - else SourceFile for SourceFile in Data['files']] - Data['files'] = Paths - plistlib.writePlist(Data, Plist) - - finally: - PBuildLogFile.close() + normalizeReferenceResults(Dir, SBOutputDir, ProjectBuildMode) print "Build complete (time: %.2f). See the log for more details: %s" % \ - ((time.time()-TBegin), BuildLogPath) + ((time.time() - TBegin), BuildLogPath) + + +def normalizeReferenceResults(Dir, SBOutputDir, ProjectBuildMode): + """ + Make the absolute paths relative in the reference results. + """ + for (DirPath, Dirnames, Filenames) in os.walk(SBOutputDir): + for F in Filenames: + if (not F.endswith('plist')): + continue + Plist = os.path.join(DirPath, F) + Data = plistlib.readPlist(Plist) + PathPrefix = Dir + if (ProjectBuildMode == 1): + PathPrefix = os.path.join(Dir, PatchedSourceDirName) + Paths = [SourceFile[len(PathPrefix) + 1:] + if SourceFile.startswith(PathPrefix) + else SourceFile for SourceFile in Data['files']] + Data['files'] = Paths + + # Remove transient fields which change from run to run. + for Diag in Data['diagnostics']: + if 'HTMLDiagnostics_files' in Diag: + Diag.pop('HTMLDiagnostics_files') + if 'clang_version' in Data: + Data.pop('clang_version') + + plistlib.writePlist(Data, Plist) + -# A plist file is created for each call to the analyzer(each source file). -# We are only interested on the once that have bug reports, so delete the rest. def CleanUpEmptyPlists(SBOutputDir): + """ + A plist file is created for each call to the analyzer(each source file). + We are only interested on the once that have bug reports, + so delete the rest. + """ for F in glob.glob(SBOutputDir + "/*/*.plist"): P = os.path.join(SBOutputDir, F) @@ -476,62 +431,65 @@ def CleanUpEmptyPlists(SBOutputDir): os.remove(P) continue -# Given the scan-build output directory, checks if the build failed -# (by searching for the failures directories). If there are failures, it -# creates a summary file in the output directory. + +def CleanUpEmptyFolders(SBOutputDir): + """ + Remove empty folders from results, as git would not store them. + """ + Subfolders = glob.glob(SBOutputDir + "/*") + for Folder in Subfolders: + if not os.listdir(Folder): + os.removedirs(Folder) + + def checkBuild(SBOutputDir): + """ + Given the scan-build output directory, checks if the build failed + (by searching for the failures directories). If there are failures, it + creates a summary file in the output directory. + + """ # Check if there are failures. Failures = glob.glob(SBOutputDir + "/*/failures/*.stderr.txt") - TotalFailed = len(Failures); + TotalFailed = len(Failures) if TotalFailed == 0: CleanUpEmptyPlists(SBOutputDir) + CleanUpEmptyFolders(SBOutputDir) Plists = glob.glob(SBOutputDir + "/*/*.plist") print "Number of bug reports (non-empty plist files) produced: %d" %\ - len(Plists) - return; - - # Create summary file to display when the build fails. - SummaryPath = os.path.join(SBOutputDir, LogFolderName, FailuresSummaryFileName) - if (Verbose > 0): - print " Creating the failures summary file %s" % (SummaryPath,) + len(Plists) + return - SummaryLog = open(SummaryPath, "w+") - try: - SummaryLog.write("Total of %d failures discovered.\n" % (TotalFailed,)) - if TotalFailed > NumOfFailuresInSummary: - SummaryLog.write("See the first %d below.\n" - % (NumOfFailuresInSummary,)) + print "Error: analysis failed." + print "Total of %d failures discovered." % TotalFailed + if TotalFailed > NumOfFailuresInSummary: + print "See the first %d below.\n" % NumOfFailuresInSummary # TODO: Add a line "See the results folder for more." - FailuresCopied = NumOfFailuresInSummary - Idx = 0 - for FailLogPathI in Failures: - if Idx >= NumOfFailuresInSummary: - break; - Idx += 1 - SummaryLog.write("\n-- Error #%d -----------\n" % (Idx,)); - FailLogI = open(FailLogPathI, "r"); - try: - shutil.copyfileobj(FailLogI, SummaryLog); - finally: - FailLogI.close() - finally: - SummaryLog.close() - - print "Error: analysis failed. See ", SummaryPath - sys.exit(-1) - -# Auxiliary object to discard stdout. -class Discarder(object): - def write(self, text): - pass # do nothing - -# Compare the warnings produced by scan-build. -# Strictness defines the success criteria for the test: -# 0 - success if there are no crashes or analyzer failure. -# 1 - success if there are no difference in the number of reported bugs. -# 2 - success if all the bug reports are identical. -def runCmpResults(Dir, Strictness = 0): + Idx = 0 + for FailLogPathI in Failures: + if Idx >= NumOfFailuresInSummary: + break + Idx += 1 + print "\n-- Error #%d -----------\n" % Idx + with open(FailLogPathI, "r") as FailLogI: + shutil.copyfileobj(FailLogI, sys.stdout) + + sys.exit(1) + + +def runCmpResults(Dir, Strictness=0): + """ + Compare the warnings produced by scan-build. + Strictness defines the success criteria for the test: + 0 - success if there are no crashes or analyzer failure. + 1 - success if there are no difference in the number of reported bugs. + 2 - success if all the bug reports are identical. + + :return success: Whether tests pass according to the Strictness + criteria. + """ + TestsPassed = True TBegin = time.time() RefDir = os.path.join(Dir, SBOutputDirReferencePrefix + SBOutputDirName) @@ -547,9 +505,10 @@ def runCmpResults(Dir, Strictness = 0): RefList.remove(RefLogDir) NewList.remove(os.path.join(NewDir, LogFolderName)) - if len(RefList) == 0 or len(NewList) == 0: - return False - assert(len(RefList) == len(NewList)) + if len(RefList) != len(NewList): + print "Mismatch in number of results folders: %s vs %s" % ( + RefList, NewList) + sys.exit(1) # There might be more then one folder underneath - one per each scan-build # command (Ex: one for configure and one for make). @@ -569,32 +528,31 @@ def runCmpResults(Dir, Strictness = 0): if Verbose == 1: print " Comparing Results: %s %s" % (RefDir, NewDir) - DiffsPath = os.path.join(NewDir, DiffsSummaryFileName) PatchedSourceDirPath = os.path.join(Dir, PatchedSourceDirName) - Opts = CmpRuns.CmpOptions(DiffsPath, "", PatchedSourceDirPath) - # Discard everything coming out of stdout (CmpRun produces a lot of them). - OLD_STDOUT = sys.stdout - sys.stdout = Discarder() + Opts = CmpRuns.CmpOptions(rootA="", rootB=PatchedSourceDirPath) # Scan the results, delete empty plist files. NumDiffs, ReportsInRef, ReportsInNew = \ CmpRuns.dumpScanBuildResultsDiff(RefDir, NewDir, Opts, False) - sys.stdout = OLD_STDOUT - if (NumDiffs > 0) : - print "Warning: %r differences in diagnostics. See %s" % \ - (NumDiffs, DiffsPath,) + if (NumDiffs > 0): + print "Warning: %s differences in diagnostics." % NumDiffs if Strictness >= 2 and NumDiffs > 0: print "Error: Diffs found in strict mode (2)." - sys.exit(-1) + TestsPassed = False elif Strictness >= 1 and ReportsInRef != ReportsInNew: - print "Error: The number of results are different in strict mode (1)." - sys.exit(-1) + print "Error: The number of results are different in "\ + "strict mode (1)." + TestsPassed = False + + print "Diagnostic comparison complete (time: %.2f)." % ( + time.time() - TBegin) + return TestsPassed - print "Diagnostic comparison complete (time: %.2f)." % (time.time()-TBegin) - return (NumDiffs > 0) def cleanupReferenceResults(SBOutputDir): - # Delete html, css, and js files from reference results. These can - # include multiple copies of the benchmark source and so get very large. + """ + Delete html, css, and js files from reference results. These can + include multiple copies of the benchmark source and so get very large. + """ Extensions = ["html", "css", "js"] for E in Extensions: for F in glob.glob("%s/*/*.%s" % (SBOutputDir, E)): @@ -605,42 +563,18 @@ def cleanupReferenceResults(SBOutputDir): # Remove the log file. It leaks absolute path names. removeLogFile(SBOutputDir) -def updateSVN(Mode, ProjectsMap): - try: - ProjectsMap.seek(0) - for I in csv.reader(ProjectsMap): - ProjName = I[0] - Path = os.path.join(ProjName, getSBOutputDirName(True)) - - if Mode == "delete": - Command = "svn delete '%s'" % (Path,) - else: - Command = "svn add '%s'" % (Path,) - - if Verbose == 1: - print " Executing: %s" % (Command,) - check_call(Command, shell=True) - - if Mode == "delete": - CommitCommand = "svn commit -m \"[analyzer tests] Remove " \ - "reference results.\"" - else: - CommitCommand = "svn commit -m \"[analyzer tests] Add new " \ - "reference results.\"" - if Verbose == 1: - print " Executing: %s" % (CommitCommand,) - check_call(CommitCommand, shell=True) - except: - print "Error: SVN update failed." - sys.exit(-1) -def testProject(ID, ProjectBuildMode, IsReferenceBuild=False, Dir=None, Strictness = 0): +def testProject(ID, ProjectBuildMode, IsReferenceBuild=False, Strictness=0): + """ + Test a given project. + :return TestsPassed: Whether tests have passed according + to the :param Strictness: criteria. + """ print " \n\n--- Building project %s" % (ID,) TBegin = time.time() - if Dir is None : - Dir = getProjectDir(ID) + Dir = getProjectDir(ID) if Verbose == 1: print " Build directory: %s." % (Dir,) @@ -652,76 +586,78 @@ def testProject(ID, ProjectBuildMode, IsReferenceBuild=False, Dir=None, Strictne checkBuild(SBOutputDir) - if IsReferenceBuild == False: - runCmpResults(Dir, Strictness) - else: + if IsReferenceBuild: cleanupReferenceResults(SBOutputDir) + TestsPassed = True + else: + TestsPassed = runCmpResults(Dir, Strictness) print "Completed tests for project %s (time: %.2f)." % \ - (ID, (time.time()-TBegin)) + (ID, (time.time() - TBegin)) + return TestsPassed -def isCommentCSVLine(Entries): - # Treat CSV lines starting with a '#' as a comment. - return len(Entries) > 0 and Entries[0].startswith("#") -def testAll(IsReferenceBuild = False, UpdateSVN = False, Strictness = 0): - PMapFile = open(getProjectMapPath(), "rb") - try: - # Validate the input. - for I in csv.reader(PMapFile): - if (isCommentCSVLine(I)): - continue - if (len(I) != 2) : - print "Error: Rows in the ProjectMapFile should have 3 entries." - raise Exception() - if (not ((I[1] == "0") | (I[1] == "1") | (I[1] == "2"))): - print "Error: Second entry in the ProjectMapFile should be 0" \ - " (single file), 1 (project), or 2(single file c++11)." - raise Exception() - - # When we are regenerating the reference results, we might need to - # update svn. Remove reference results from SVN. - if UpdateSVN == True: - assert(IsReferenceBuild == True); - updateSVN("delete", PMapFile); +def projectFileHandler(): + return open(getProjectMapPath(), "rb") - # Test the projects. - PMapFile.seek(0) - for I in csv.reader(PMapFile): - if isCommentCSVLine(I): - continue; - testProject(I[0], int(I[1]), IsReferenceBuild, None, Strictness) - # Add reference results to SVN. - if UpdateSVN == True: - updateSVN("add", PMapFile); +def iterateOverProjects(PMapFile): + """ + Iterate over all projects defined in the project file handler `PMapFile` + from the start. + """ + PMapFile.seek(0) + for I in csv.reader(PMapFile): + if (SATestUtils.isCommentCSVLine(I)): + continue + yield I + + +def validateProjectFile(PMapFile): + """ + Validate project file. + """ + for I in iterateOverProjects(PMapFile): + if len(I) != 2: + print "Error: Rows in the ProjectMapFile should have 2 entries." + raise Exception() + if I[1] not in ('0', '1', '2'): + print "Error: Second entry in the ProjectMapFile should be 0" \ + " (single file), 1 (project), or 2(single file c++11)." + raise Exception() + + +def testAll(IsReferenceBuild=False, Strictness=0): + TestsPassed = True + with projectFileHandler() as PMapFile: + validateProjectFile(PMapFile) + + # Test the projects. + for (ProjName, ProjBuildMode) in iterateOverProjects(PMapFile): + TestsPassed &= testProject( + ProjName, int(ProjBuildMode), IsReferenceBuild, Strictness) + return TestsPassed - except: - print "Error occurred. Premature termination." - raise - finally: - PMapFile.close() if __name__ == '__main__': # Parse command line arguments. - Parser = argparse.ArgumentParser(description='Test the Clang Static Analyzer.') + Parser = argparse.ArgumentParser( + description='Test the Clang Static Analyzer.') Parser.add_argument('--strictness', dest='strictness', type=int, default=0, - help='0 to fail on runtime errors, 1 to fail when the number\ - of found bugs are different from the reference, 2 to \ - fail on any difference from the reference. Default is 0.') - Parser.add_argument('-r', dest='regenerate', action='store_true', default=False, - help='Regenerate reference output.') - Parser.add_argument('-rs', dest='update_reference', action='store_true', - default=False, help='Regenerate reference output and update svn.') + help='0 to fail on runtime errors, 1 to fail when the \ + number of found bugs are different from the \ + reference, 2 to fail on any difference from the \ + reference. Default is 0.') + Parser.add_argument('-r', dest='regenerate', action='store_true', + default=False, help='Regenerate reference output.') Args = Parser.parse_args() IsReference = False - UpdateSVN = False Strictness = Args.strictness if Args.regenerate: IsReference = True - elif Args.update_reference: - IsReference = True - UpdateSVN = True - testAll(IsReference, UpdateSVN, Strictness) + TestsPassed = testAll(IsReference, Strictness) + if not TestsPassed: + print "ERROR: Tests failed." + sys.exit(42) |