From 222e2a7620e6520ffaf4fc4e69d79c18da31542e Mon Sep 17 00:00:00 2001
From: "Zancanaro; Carlo" <czan8762@plang3.cs.usyd.edu.au>
Date: Mon, 24 Sep 2012 09:58:17 +1000
Subject: Add the clang library to the repo (with some of my changes, too).

---
 clang/utils/analyzer/CmpRuns.py     | 253 +++++++++++++++++++
 clang/utils/analyzer/SATestAdd.py   |  80 ++++++
 clang/utils/analyzer/SATestBuild.py | 475 ++++++++++++++++++++++++++++++++++++
 clang/utils/analyzer/ubiviz         |  74 ++++++
 4 files changed, 882 insertions(+)
 create mode 100755 clang/utils/analyzer/CmpRuns.py
 create mode 100755 clang/utils/analyzer/SATestAdd.py
 create mode 100755 clang/utils/analyzer/SATestBuild.py
 create mode 100755 clang/utils/analyzer/ubiviz

(limited to 'clang/utils/analyzer')

diff --git a/clang/utils/analyzer/CmpRuns.py b/clang/utils/analyzer/CmpRuns.py
new file mode 100755
index 0000000..e68c45d
--- /dev/null
+++ b/clang/utils/analyzer/CmpRuns.py
@@ -0,0 +1,253 @@
+#!/usr/bin/env python
+
+"""
+CmpRuns - A simple tool for comparing two static analyzer runs to determine
+which reports have been added, removed, or changed.
+
+This is designed to support automated testing using the static analyzer, from
+two perspectives: 
+  1. To monitor changes in the static analyzer's reports on real code bases, for
+     regression testing.
+
+  2. For use by end users who want to integrate regular static analyzer testing
+     into a buildbot like environment.
+"""
+
+import os
+import plistlib
+
+#
+
+class multidict:
+    def __init__(self, elts=()):
+        self.data = {}
+        for key,value in elts:
+            self[key] = value
+    
+    def __getitem__(self, item):
+        return self.data[item]
+    def __setitem__(self, key, value):
+        if key in self.data:
+            self.data[key].append(value)
+        else:
+            self.data[key] = [value]
+    def items(self):
+        return self.data.items()
+    def values(self):
+        return self.data.values()
+    def keys(self):
+        return self.data.keys()
+    def __len__(self):
+        return len(self.data)
+    def get(self, key, default=None):
+        return self.data.get(key, default)
+    
+#
+
+class CmpOptions:
+    def __init__(self, verboseLog=None, root=""):
+        self.root = root
+        self.verboseLog = verboseLog
+
+class AnalysisReport:
+    def __init__(self, run, files):
+        self.run = run
+        self.files = files
+
+class AnalysisDiagnostic:
+    def __init__(self, data, report, htmlReport):
+        self.data = data
+        self.report = report
+        self.htmlReport = htmlReport
+
+    def getReadableName(self):
+        loc = self.data['location']
+        filename = self.report.run.getSourceName(self.report.files[loc['file']])
+        line = loc['line']
+        column = loc['col']
+        category = self.data['category']
+        description = self.data['description']
+
+        # FIXME: Get a report number based on this key, to 'distinguish'
+        # reports, or something.
+        
+        return '%s:%d:%d, %s: %s' % (filename, line, column, category, 
+                                   description)
+
+    def getReportData(self):
+        if self.htmlReport is None:
+            return " "
+        return os.path.join(self.report.run.path, self.htmlReport)
+        # We could also dump the report with:
+        # return open(os.path.join(self.report.run.path,
+        #                         self.htmlReport), "rb").read() 
+
+class AnalysisRun:
+    def __init__(self, path, opts):
+        self.path = path
+        self.reports = []
+        self.diagnostics = []
+        self.opts = opts
+
+    def getSourceName(self, path):
+        if path.startswith(self.opts.root):
+            return path[len(self.opts.root):]
+        return path
+
+def loadResults(path, opts, deleteEmpty=True):
+    run = AnalysisRun(path, opts)
+
+    for f in os.listdir(path):
+        if (not f.startswith('report') or
+            not f.endswith('plist')):
+            continue
+
+        p = os.path.join(path, f)
+        data = plistlib.readPlist(p)
+
+        # Ignore/delete empty reports.
+        if not data['files']:
+            if deleteEmpty == True:
+                os.remove(p)
+            continue
+
+        # Extract the HTML reports, if they exists.
+        if 'HTMLDiagnostics_files' in data['diagnostics'][0]:
+            htmlFiles = []
+            for d in data['diagnostics']:
+                # FIXME: Why is this named files, when does it have multiple
+                # files?
+                assert len(d['HTMLDiagnostics_files']) == 1
+                htmlFiles.append(d.pop('HTMLDiagnostics_files')[0])
+        else:
+            htmlFiles = [None] * len(data['diagnostics'])
+            
+        report = AnalysisReport(run, data.pop('files'))
+        diagnostics = [AnalysisDiagnostic(d, report, h) 
+                       for d,h in zip(data.pop('diagnostics'),
+                                      htmlFiles)]
+
+        assert not data
+
+        run.reports.append(report)
+        run.diagnostics.extend(diagnostics)
+
+    return run
+
+def compareResults(A, B):
+    """
+    compareResults - Generate a relation from diagnostics in run A to
+    diagnostics in run B.
+
+    The result is the relation as a list of triples (a, b, confidence) where
+    each element {a,b} is None or an element from the respective run, and
+    confidence is a measure of the match quality (where 0 indicates equality,
+    and None is used if either element is None).
+    """
+
+    res = []
+
+    # Quickly eliminate equal elements.
+    neqA = []
+    neqB = []
+    eltsA = list(A.diagnostics)
+    eltsB = list(B.diagnostics)
+    eltsA.sort(key = lambda d: d.data)
+    eltsB.sort(key = lambda d: d.data)
+    while eltsA and eltsB:
+        a = eltsA.pop()
+        b = eltsB.pop()
+        if a.data['location'] == b.data['location']:
+            res.append((a, b, 0))
+        elif a.data > b.data:
+            neqA.append(a)
+            eltsB.append(b)
+        else:
+            neqB.append(b)
+            eltsA.append(a)
+    neqA.extend(eltsA)
+    neqB.extend(eltsB)
+
+    # FIXME: Add fuzzy matching. One simple and possible effective idea would be
+    # to bin the diagnostics, print them in a normalized form (based solely on
+    # the structure of the diagnostic), compute the diff, then use that as the
+    # basis for matching. This has the nice property that we don't depend in any
+    # way on the diagnostic format.
+
+    for a in neqA:
+        res.append((a, None, None))
+    for b in neqB:
+        res.append((None, b, None))
+
+    return res
+
+def cmpScanBuildResults(dirA, dirB, opts, deleteEmpty=True):
+    # Load the run results.
+    resultsA = loadResults(dirA, opts, deleteEmpty)
+    resultsB = loadResults(dirB, opts, deleteEmpty)
+    
+    # Open the verbose log, if given.
+    if opts.verboseLog:
+        auxLog = open(opts.verboseLog, "wb")
+    else:
+        auxLog = None
+
+    diff = compareResults(resultsA, resultsB)
+    foundDiffs = 0
+    for res in diff:
+        a,b,confidence = res
+        if a is None:
+            print "ADDED: %r" % b.getReadableName()
+            foundDiffs += 1
+            if auxLog:
+                print >>auxLog, ("('ADDED', %r, %r)" % (b.getReadableName(),
+                                                        b.getReportData()))
+        elif b is None:
+            print "REMOVED: %r" % a.getReadableName()
+            foundDiffs += 1
+            if auxLog:
+                print >>auxLog, ("('REMOVED', %r, %r)" % (a.getReadableName(),
+                                                          a.getReportData()))
+        elif confidence:
+            print "CHANGED: %r to %r" % (a.getReadableName(),
+                                         b.getReadableName())
+            foundDiffs += 1
+            if auxLog:
+                print >>auxLog, ("('CHANGED', %r, %r, %r, %r)" 
+                                 % (a.getReadableName(),
+                                    b.getReadableName(),
+                                    a.getReportData(),
+                                    b.getReportData()))
+        else:
+            pass
+
+    TotalReports = len(resultsB.diagnostics)
+    print "TOTAL REPORTS: %r" % TotalReports
+    print "TOTAL DIFFERENCES: %r" % foundDiffs
+    if auxLog:
+        print >>auxLog, "('TOTAL NEW REPORTS', %r)" % TotalReports
+        print >>auxLog, "('TOTAL DIFFERENCES', %r)" % foundDiffs
+        
+    return foundDiffs    
+
+def main():
+    from optparse import OptionParser
+    parser = OptionParser("usage: %prog [options] [dir A] [dir B]")
+    parser.add_option("", "--root", dest="root",
+                      help="Prefix to ignore on source files",
+                      action="store", type=str, default="")
+    parser.add_option("", "--verbose-log", dest="verboseLog",
+                      help="Write additional information to LOG [default=None]",
+                      action="store", type=str, default=None,
+                      metavar="LOG")
+    (opts, args) = parser.parse_args()
+
+    if len(args) != 2:
+        parser.error("invalid number of arguments")
+
+    dirA,dirB = args
+
+    cmpScanBuildResults(dirA, dirB, opts)    
+
+if __name__ == '__main__':
+    main()
diff --git a/clang/utils/analyzer/SATestAdd.py b/clang/utils/analyzer/SATestAdd.py
new file mode 100755
index 0000000..ce64bc8
--- /dev/null
+++ b/clang/utils/analyzer/SATestAdd.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+
+"""
+Static Analyzer qualification infrastructure: adding a new project to 
+the Repository Directory.
+
+ Add a new project for testing: build it and add to the Project Map file.
+   Assumes it's being run from the Repository Directory.
+   The project directory should be added inside the Repository Directory and 
+   have the same name as the project ID
+   
+ The project should use the following files for set up:
+      - pre_run_static_analyzer.sh - prepare the build environment.
+                                     Ex: make clean can be a part of it.
+      - run_static_analyzer.cmd - a list of commands to run through scan-build.
+                                     Each command should be on a separate line.
+                                     Choose from: configure, make, xcodebuild 
+"""
+import SATestBuild
+
+import os
+import csv
+import sys
+
+def isExistingProject(PMapFile, projectID) :
+    PMapReader = csv.reader(PMapFile)
+    for I in PMapReader:
+        if projectID == I[0]:
+            return True
+    return False    
+
+# Add a new project for testing: build it and add to the Project Map file.
+# Params:
+#   Dir is the directory where the sources are.
+#   ID is a short string used to identify a project.
+def addNewProject(ID, IsScanBuild) :
+    CurDir = os.path.abspath(os.curdir)
+    Dir = SATestBuild.getProjectDir(ID)
+    if not os.path.exists(Dir):
+        print "Error: Project directory is missing: %s" % Dir
+        sys.exit(-1)
+        
+    # Build the project.
+    SATestBuild.testProject(ID, True, IsScanBuild, Dir)
+
+    # Add the project ID to the project map.
+    ProjectMapPath = os.path.join(CurDir, SATestBuild.ProjectMapFile)
+    if os.path.exists(ProjectMapPath):
+        PMapFile = open(ProjectMapPath, "r+b")
+    else:
+        print "Warning: Creating the Project Map file!!"
+        PMapFile = open(ProjectMapPath, "w+b")
+    try:
+        if (isExistingProject(PMapFile, ID)) :        
+            print >> sys.stdout, 'Warning: Project with ID \'', ID, \
+                                 '\' already exists.'
+            print >> sys.stdout, "Reference output has been regenerated."
+        else:                     
+            PMapWriter = csv.writer(PMapFile)
+            PMapWriter.writerow( (ID, int(IsScanBuild)) );
+            print "The project map is updated: ", ProjectMapPath
+    finally:
+        PMapFile.close()
+            
+
+# TODO: Add an option not to build. 
+# TODO: Set the path to the Repository directory.
+if __name__ == '__main__':
+    if len(sys.argv) < 2:
+        print >> sys.stderr, 'Usage: ', sys.argv[0],\
+                             'project_ID <mode>' \
+                             'mode - 0 for single file project; 1 for scan_build'
+        sys.exit(-1)
+    
+    IsScanBuild = 1    
+    if (len(sys.argv) >= 3):
+        IsScanBuild = int(sys.argv[2])  
+    assert((IsScanBuild == 0) | (IsScanBuild == 1))
+        
+    addNewProject(sys.argv[1], IsScanBuild)
diff --git a/clang/utils/analyzer/SATestBuild.py b/clang/utils/analyzer/SATestBuild.py
new file mode 100755
index 0000000..3fccb9a
--- /dev/null
+++ b/clang/utils/analyzer/SATestBuild.py
@@ -0,0 +1,475 @@
+#!/usr/bin/env python
+
+"""
+Static Analyzer qualification infrastructure.
+
+The goal is to test the analyzer against different projects, check for failures,
+compare results, and measure performance.
+
+Repository Directory will contain sources of the projects as well as the 
+information on how to build them and the expected output. 
+Repository Directory structure:
+   - ProjectMap file
+   - Historical Performance Data
+   - Project Dir1
+     - ReferenceOutput
+   - Project Dir2
+     - ReferenceOutput
+   ..
+
+To test the build of the analyzer one would:
+   - Copy over a copy of the Repository Directory. (TODO: Prefer to ensure that 
+     the build directory does not pollute the repository to min network traffic).
+   - Build all projects, until error. Produce logs to report errors.
+   - Compare results.  
+
+The files which should be kept around for failure investigations: 
+   RepositoryCopy/Project DirI/ScanBuildResults
+   RepositoryCopy/Project DirI/run_static_analyzer.log      
+   
+Assumptions (TODO: shouldn't need to assume these.):   
+   The script is being run from the Repository Directory.
+   The compiler for scan-build and scan-build are in the PATH.
+   export PATH=/Users/zaks/workspace/c2llvm/build/Release+Asserts/bin:$PATH
+
+For more logging, set the  env variables:
+   zaks:TI zaks$ export CCC_ANALYZER_LOG=1
+   zaks:TI zaks$ export CCC_ANALYZER_VERBOSE=1
+"""
+import CmpRuns
+
+import os
+import csv
+import sys
+import glob
+import shutil
+import time
+import plistlib
+from subprocess import check_call, CalledProcessError
+
+# Project map stores info about all the "registered" projects.
+ProjectMapFile = "projectMap.csv"
+
+# Names of the project specific scripts.
+# The script that needs to be executed before the build can start.
+CleanupScript = "cleanup_run_static_analyzer.sh"
+# This is a file containing commands for scan-build.  
+BuildScript = "run_static_analyzer.cmd"
+
+# The log file name.
+LogFolderName = "Logs"
+BuildLogName = "run_static_analyzer.log"
+# Summary file - contains the summary of the failures. Ex: This info can be be  
+# displayed when buildbot detects a build failure.
+NumOfFailuresInSummary = 10
+FailuresSummaryFileName = "failures.txt"
+# Summary of the result diffs.
+DiffsSummaryFileName = "diffs.txt"
+
+# The scan-build result directory.
+SBOutputDirName = "ScanBuildResults"
+SBOutputDirReferencePrefix = "Ref"
+
+# The list of checkers used during analyzes.
+# Currently, consists of all the non experimental checkers.
+Checkers="experimental.security.taint,core,deadcode,cplusplus,security,unix,osx,cocoa"
+
+Verbose = 1
+
+IsReferenceBuild = False
+
+# Make sure we flush the output after every print statement.
+class flushfile(object):
+    def __init__(self, f):
+        self.f = f
+    def write(self, x):
+        self.f.write(x)
+        self.f.flush()
+
+sys.stdout = flushfile(sys.stdout)
+
+def getProjectMapPath():
+    ProjectMapPath = os.path.join(os.path.abspath(os.curdir), 
+                                  ProjectMapFile)
+    if not os.path.exists(ProjectMapPath):
+        print "Error: Cannot find the Project Map file " + ProjectMapPath +\
+                "\nRunning script for the wrong directory?"
+        sys.exit(-1)  
+    return ProjectMapPath         
+
+def getProjectDir(ID):
+    return os.path.join(os.path.abspath(os.curdir), ID)        
+
+def getSBOutputDirName() :
+    if IsReferenceBuild == True :
+        return SBOutputDirReferencePrefix + SBOutputDirName
+    else :
+        return SBOutputDirName
+
+# Run pre-processing script if any.
+def runCleanupScript(Dir, PBuildLogFile):
+    ScriptPath = os.path.join(Dir, CleanupScript)
+    if os.path.exists(ScriptPath):
+        try:
+            if Verbose == 1:        
+                print "  Executing: %s" % (ScriptPath,)
+            check_call("chmod +x %s" % ScriptPath, cwd = Dir, 
+                                              stderr=PBuildLogFile,
+                                              stdout=PBuildLogFile, 
+                                              shell=True)    
+            check_call(ScriptPath, cwd = Dir, stderr=PBuildLogFile,
+                                              stdout=PBuildLogFile, 
+                                              shell=True)
+        except:
+            print "Error: The pre-processing step failed. See ", \
+                  PBuildLogFile.name, " for details."
+            sys.exit(-1)
+
+# Build the project with scan-build by reading in the commands and 
+# prefixing them with the scan-build options.
+def runScanBuild(Dir, SBOutputDir, PBuildLogFile):
+    BuildScriptPath = os.path.join(Dir, BuildScript)
+    if not os.path.exists(BuildScriptPath):
+        print "Error: build script is not defined: %s" % BuildScriptPath
+        sys.exit(-1)       
+    SBOptions = "-plist-html -o " + SBOutputDir + " "
+    SBOptions += "-enable-checker " + Checkers + " "  
+    try:
+        SBCommandFile = open(BuildScriptPath, "r")
+        SBPrefix = "scan-build " + SBOptions + " "
+        for Command in SBCommandFile:
+            SBCommand = SBPrefix + Command
+            if Verbose == 1:        
+                print "  Executing: %s" % (SBCommand,)
+            check_call(SBCommand, cwd = Dir, stderr=PBuildLogFile,
+                                             stdout=PBuildLogFile, 
+                                             shell=True)
+    except:
+        print "Error: scan-build failed. See ",PBuildLogFile.name,\
+              " for details."
+        raise
+
+def hasNoExtension(FileName):
+    (Root, Ext) = os.path.splitext(FileName)
+    if ((Ext == "")) :
+        return True
+    return False
+
+def isValidSingleInputFile(FileName):
+    (Root, Ext) = os.path.splitext(FileName)
+    if ((Ext == ".i") | (Ext == ".ii") | 
+        (Ext == ".c") | (Ext == ".cpp") | 
+        (Ext == ".m") | (Ext == "")) :
+        return True
+    return False
+
+# Run analysis on a set of preprocessed files.
+def runAnalyzePreprocessed(Dir, SBOutputDir):
+    if os.path.exists(os.path.join(Dir, BuildScript)):
+        print "Error: The preprocessed files project should not contain %s" % \
+               BuildScript
+        raise Exception()       
+
+    CmdPrefix = "clang -cc1 -analyze -analyzer-output=plist -w "
+    CmdPrefix += "-analyzer-checker=" + Checkers +" -fcxx-exceptions -fblocks "   
+    
+    PlistPath = os.path.join(Dir, SBOutputDir, "date")
+    FailPath = os.path.join(PlistPath, "failures");
+    os.makedirs(FailPath);
+ 
+    for FullFileName in glob.glob(Dir + "/*"):
+        FileName = os.path.basename(FullFileName)
+        Failed = False
+        
+        # Only run the analyzes on supported files.
+        if (hasNoExtension(FileName)):
+            continue
+        if (isValidSingleInputFile(FileName) == False):
+            print "Error: Invalid single input file %s." % (FullFileName,)
+            raise Exception()
+        
+        # Build and call the analyzer command.
+        OutputOption = "-o " + os.path.join(PlistPath, FileName) + ".plist "
+        Command = CmdPrefix + OutputOption + os.path.join(Dir, FileName)
+        LogFile = open(os.path.join(FailPath, FileName + ".stderr.txt"), "w+b")
+        try:
+            if Verbose == 1:        
+                print "  Executing: %s" % (Command,)
+            check_call(Command, cwd = Dir, stderr=LogFile,
+                                           stdout=LogFile, 
+                                           shell=True)
+        except CalledProcessError, e:
+            print "Error: Analyzes of %s failed. See %s for details." \
+                  "Error code %d." % \
+                   (FullFileName, LogFile.name, e.returncode)
+            Failed = True       
+        finally:
+            LogFile.close()            
+        
+        # If command did not fail, erase the log file.
+        if Failed == False:
+            os.remove(LogFile.name);
+
+def buildProject(Dir, SBOutputDir, IsScanBuild):
+    TBegin = time.time() 
+
+    BuildLogPath = os.path.join(SBOutputDir, LogFolderName, BuildLogName)
+    print "Log file: %s" % (BuildLogPath,) 
+    print "Output directory: %s" %(SBOutputDir, )
+    
+    # Clean up the log file.
+    if (os.path.exists(BuildLogPath)) :
+        RmCommand = "rm " + BuildLogPath
+        if Verbose == 1:
+            print "  Executing: %s" % (RmCommand,)
+        check_call(RmCommand, shell=True)
+    
+    # Clean up scan build results.
+    if (os.path.exists(SBOutputDir)) :
+        RmCommand = "rm -r " + SBOutputDir
+        if Verbose == 1: 
+            print "  Executing: %s" % (RmCommand,)
+            check_call(RmCommand, shell=True)
+    assert(not os.path.exists(SBOutputDir))
+    os.makedirs(os.path.join(SBOutputDir, LogFolderName))
+        
+    # Open the log file.
+    PBuildLogFile = open(BuildLogPath, "wb+")
+    
+    # Build and analyze the project.
+    try:
+        runCleanupScript(Dir, PBuildLogFile)
+        
+        if IsScanBuild:
+            runScanBuild(Dir, SBOutputDir, PBuildLogFile)
+        else:
+            runAnalyzePreprocessed(Dir, SBOutputDir)
+        
+        if IsReferenceBuild :
+            runCleanupScript(Dir, PBuildLogFile)
+           
+    finally:
+        PBuildLogFile.close()
+        
+    print "Build complete (time: %.2f). See the log for more details: %s" % \
+           ((time.time()-TBegin), BuildLogPath) 
+       
+# A plist file is created for each call to the analyzer(each source file).
+# We are only interested on the once that have bug reports, so delete the rest.        
+def CleanUpEmptyPlists(SBOutputDir):
+    for F in glob.glob(SBOutputDir + "/*/*.plist"):
+        P = os.path.join(SBOutputDir, F)
+        
+        Data = plistlib.readPlist(P)
+        # Delete empty reports.
+        if not Data['files']:
+            os.remove(P)
+            continue
+
+# Given the scan-build output directory, checks if the build failed 
+# (by searching for the failures directories). If there are failures, it 
+# creates a summary file in the output directory.         
+def checkBuild(SBOutputDir):
+    # Check if there are failures.
+    Failures = glob.glob(SBOutputDir + "/*/failures/*.stderr.txt")
+    TotalFailed = len(Failures);
+    if TotalFailed == 0:
+        CleanUpEmptyPlists(SBOutputDir)
+        Plists = glob.glob(SBOutputDir + "/*/*.plist")
+        print "Number of bug reports (non empty plist files) produced: %d" %\
+           len(Plists)
+        return;
+    
+    # Create summary file to display when the build fails.
+    SummaryPath = os.path.join(SBOutputDir, LogFolderName, FailuresSummaryFileName)
+    if (Verbose > 0):
+        print "  Creating the failures summary file %s" % (SummaryPath,)
+    
+    SummaryLog = open(SummaryPath, "w+")
+    try:
+        SummaryLog.write("Total of %d failures discovered.\n" % (TotalFailed,))
+        if TotalFailed > NumOfFailuresInSummary:
+            SummaryLog.write("See the first %d below.\n" 
+                                                   % (NumOfFailuresInSummary,))
+        # TODO: Add a line "See the results folder for more."
+    
+        FailuresCopied = NumOfFailuresInSummary
+        Idx = 0
+        for FailLogPathI in glob.glob(SBOutputDir + "/*/failures/*.stderr.txt"):
+            if Idx >= NumOfFailuresInSummary:
+                break;
+            Idx += 1 
+            SummaryLog.write("\n-- Error #%d -----------\n" % (Idx,));
+            FailLogI = open(FailLogPathI, "r");
+            try: 
+                shutil.copyfileobj(FailLogI, SummaryLog);
+            finally:
+                FailLogI.close()
+    finally:
+        SummaryLog.close()
+    
+    print "Error: analysis failed. See ", SummaryPath
+    sys.exit(-1)       
+
+# Auxiliary object to discard stdout.
+class Discarder(object):
+    def write(self, text):
+        pass # do nothing
+
+# Compare the warnings produced by scan-build.
+def runCmpResults(Dir):   
+    TBegin = time.time() 
+
+    RefDir = os.path.join(Dir, SBOutputDirReferencePrefix + SBOutputDirName)
+    NewDir = os.path.join(Dir, SBOutputDirName)
+    
+    # We have to go one level down the directory tree.
+    RefList = glob.glob(RefDir + "/*") 
+    NewList = glob.glob(NewDir + "/*")
+    
+    # Log folders are also located in the results dir, so ignore them. 
+    RefList.remove(os.path.join(RefDir, LogFolderName))
+    NewList.remove(os.path.join(NewDir, LogFolderName))
+    
+    if len(RefList) == 0 or len(NewList) == 0:
+        return False
+    assert(len(RefList) == len(NewList))
+
+    # There might be more then one folder underneath - one per each scan-build 
+    # command (Ex: one for configure and one for make).
+    if (len(RefList) > 1):
+        # Assume that the corresponding folders have the same names.
+        RefList.sort()
+        NewList.sort()
+    
+    # Iterate and find the differences.
+    NumDiffs = 0
+    PairList = zip(RefList, NewList)    
+    for P in PairList:    
+        RefDir = P[0] 
+        NewDir = P[1]
+    
+        assert(RefDir != NewDir) 
+        if Verbose == 1:        
+            print "  Comparing Results: %s %s" % (RefDir, NewDir)
+    
+        DiffsPath = os.path.join(NewDir, DiffsSummaryFileName)
+        Opts = CmpRuns.CmpOptions(DiffsPath)
+        # Discard everything coming out of stdout (CmpRun produces a lot of them).
+        OLD_STDOUT = sys.stdout
+        sys.stdout = Discarder()
+        # Scan the results, delete empty plist files.
+        NumDiffs = CmpRuns.cmpScanBuildResults(RefDir, NewDir, Opts, False)
+        sys.stdout = OLD_STDOUT
+        if (NumDiffs > 0) :
+            print "Warning: %r differences in diagnostics. See %s" % \
+                  (NumDiffs, DiffsPath,)
+                    
+    print "Diagnostic comparison complete (time: %.2f)." % (time.time()-TBegin) 
+    return (NumDiffs > 0)
+    
+def updateSVN(Mode, ProjectsMap):
+    try:
+        ProjectsMap.seek(0)    
+        for I in csv.reader(ProjectsMap):
+            ProjName = I[0] 
+            Path = os.path.join(ProjName, getSBOutputDirName())
+    
+            if Mode == "delete":
+                Command = "svn delete %s" % (Path,)
+            else:
+                Command = "svn add %s" % (Path,)
+
+            if Verbose == 1:        
+                print "  Executing: %s" % (Command,)
+                check_call(Command, shell=True)    
+    
+        if Mode == "delete":
+            CommitCommand = "svn commit -m \"[analyzer tests] Remove " \
+                            "reference results.\""     
+        else:
+            CommitCommand = "svn commit -m \"[analyzer tests] Add new " \
+                            "reference results.\""
+        if Verbose == 1:        
+            print "  Executing: %s" % (CommitCommand,)
+            check_call(CommitCommand, shell=True)    
+    except:
+        print "Error: SVN update failed."
+        sys.exit(-1)
+        
+def testProject(ID, IsScanBuild, Dir=None):
+    print " \n\n--- Building project %s" % (ID,)
+
+    TBegin = time.time() 
+
+    if Dir is None :
+        Dir = getProjectDir(ID)        
+    if Verbose == 1:        
+        print "  Build directory: %s." % (Dir,)
+    
+    # Set the build results directory.
+    RelOutputDir = getSBOutputDirName()
+    SBOutputDir = os.path.join(Dir, RelOutputDir)
+                
+    buildProject(Dir, SBOutputDir, IsScanBuild)    
+
+    checkBuild(SBOutputDir)
+    
+    if IsReferenceBuild == False:
+        runCmpResults(Dir)
+        
+    print "Completed tests for project %s (time: %.2f)." % \
+          (ID, (time.time()-TBegin))
+    
+def testAll(InIsReferenceBuild = False, UpdateSVN = False):
+    global IsReferenceBuild
+    IsReferenceBuild = InIsReferenceBuild
+
+    PMapFile = open(getProjectMapPath(), "rb")
+    try:        
+        # Validate the input.
+        for I in csv.reader(PMapFile):
+            if (len(I) != 2) :
+                print "Error: Rows in the ProjectMapFile should have 3 entries."
+                raise Exception()
+            if (not ((I[1] == "1") | (I[1] == "0"))):
+                print "Error: Second entry in the ProjectMapFile should be 0 or 1."
+                raise Exception()              
+
+        # When we are regenerating the reference results, we might need to 
+        # update svn. Remove reference results from SVN.
+        if UpdateSVN == True:
+            assert(InIsReferenceBuild == True);
+            updateSVN("delete",  PMapFile);
+            
+        # Test the projects.
+        PMapFile.seek(0)    
+        for I in csv.reader(PMapFile):
+            testProject(I[0], int(I[1]))
+
+        # Add reference results to SVN.
+        if UpdateSVN == True:
+            updateSVN("add",  PMapFile);
+
+    except:
+        print "Error occurred. Premature termination."
+        raise                            
+    finally:
+        PMapFile.close()    
+            
+if __name__ == '__main__':
+    IsReference = False
+    UpdateSVN = False
+    if len(sys.argv) >= 2:
+        if sys.argv[1] == "-r":
+            IsReference = True
+        elif sys.argv[1] == "-rs":
+            IsReference = True
+            UpdateSVN = True
+        else:     
+          print >> sys.stderr, 'Usage: ', sys.argv[0],\
+                             '[-r|-rs]' \
+                             'Use -r to regenerate reference output' \
+                             'Use -rs to regenerate reference output and update svn'
+
+    testAll(IsReference, UpdateSVN)
diff --git a/clang/utils/analyzer/ubiviz b/clang/utils/analyzer/ubiviz
new file mode 100755
index 0000000..1582797
--- /dev/null
+++ b/clang/utils/analyzer/ubiviz
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+#
+#                     The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# This script reads visualization data emitted by the static analyzer for
+# display in Ubigraph.
+#
+##===----------------------------------------------------------------------===##
+
+import xmlrpclib
+import sys
+
+def Error(message):
+    print >> sys.stderr, 'ubiviz: ' + message
+    sys.exit(1)
+    
+def StreamData(filename):
+  file = open(filename)
+  for ln in file:
+    yield eval(ln)
+  file.close()
+
+def Display(G, data):
+  action = data[0]
+  if action == 'vertex':
+    vertex = data[1]
+    G.new_vertex_w_id(vertex)
+    for attribute in data[2:]:
+      G.set_vertex_attribute(vertex, attribute[0], attribute[1])
+  elif action == 'edge':
+    src = data[1]
+    dst = data[2]
+    edge = G.new_edge(src,dst)
+    for attribute in data[3:]:
+      G.set_edge_attribute(edge, attribute[0], attribute[1])
+  elif action == "vertex_style":
+    style_id = data[1]
+    parent_id = data[2]
+    G.new_vertex_style_w_id(style_id, parent_id)
+    for attribute in data[3:]:
+      G.set_vertex_style_attribute(style_id, attribute[0], attribute[1])
+  elif action == "vertex_style_attribute":
+    style_id = data[1]
+    for attribute in data[2:]:
+      G.set_vertex_style_attribute(style_id, attribute[0], attribute[1])
+  elif action == "change_vertex_style":
+     vertex_id = data[1]
+     style_id = data[2]
+     G.change_vertex_style(vertex_id,style_id)
+
+def main(args):
+  if len(args) == 0:
+    Error('no input files')    
+
+  server = xmlrpclib.Server('http://127.0.0.1:20738/RPC2')
+  G = server.ubigraph
+            
+  for arg in args:
+    G.clear()
+    for x in StreamData(arg):
+      Display(G,x)
+  
+  sys.exit(0)
+  
+
+if __name__ == '__main__':
+    main(sys.argv[1:])
+    
+    
\ No newline at end of file
-- 
cgit v1.2.3